summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
authorLiam <byteslice@airmail.cc>2023-11-30 20:05:02 +0100
committerLiam <byteslice@airmail.cc>2023-12-11 00:23:42 +0100
commitbd599343505d202dc3d8226ca80d5f1af0247bf2 (patch)
treead5dba92452f2b8909a1ac958237d2bbdfa54838 /src/core
parentMerge pull request #12322 from liamwhite/savedata-absurdity (diff)
downloadyuzu-bd599343505d202dc3d8226ca80d5f1af0247bf2.tar
yuzu-bd599343505d202dc3d8226ca80d5f1af0247bf2.tar.gz
yuzu-bd599343505d202dc3d8226ca80d5f1af0247bf2.tar.bz2
yuzu-bd599343505d202dc3d8226ca80d5f1af0247bf2.tar.lz
yuzu-bd599343505d202dc3d8226ca80d5f1af0247bf2.tar.xz
yuzu-bd599343505d202dc3d8226ca80d5f1af0247bf2.tar.zst
yuzu-bd599343505d202dc3d8226ca80d5f1af0247bf2.zip
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CMakeLists.txt8
-rw-r--r--src/core/arm/nce/arm_nce.cpp102
-rw-r--r--src/core/arm/nce/arm_nce.h13
-rw-r--r--src/core/arm/nce/arm_nce.s60
-rw-r--r--src/core/arm/nce/arm_nce_asm_definitions.h3
-rw-r--r--src/core/arm/nce/interpreter_visitor.cpp825
-rw-r--r--src/core/arm/nce/interpreter_visitor.h103
-rw-r--r--src/core/arm/nce/visitor_base.h2777
8 files changed, 3848 insertions, 43 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 7b9ed856f..bffe68bd3 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -947,15 +947,19 @@ if (HAS_NCE)
set(CMAKE_ASM_FLAGS "${CFLAGS} -x assembler-with-cpp")
target_sources(core PRIVATE
+ arm/nce/arm_nce_asm_definitions.h
arm/nce/arm_nce.cpp
arm/nce/arm_nce.h
arm/nce/arm_nce.s
arm/nce/guest_context.h
+ arm/nce/instructions.h
+ arm/nce/interpreter_visitor.cpp
+ arm/nce/interpreter_visitor.h
arm/nce/patcher.cpp
arm/nce/patcher.h
- arm/nce/instructions.h
+ arm/nce/visitor_base.h
)
- target_link_libraries(core PRIVATE merry::oaknut)
+ target_link_libraries(core PRIVATE merry::mcl merry::oaknut)
endif()
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
diff --git a/src/core/arm/nce/arm_nce.cpp b/src/core/arm/nce/arm_nce.cpp
index b42a32a0b..858c9f0a7 100644
--- a/src/core/arm/nce/arm_nce.cpp
+++ b/src/core/arm/nce/arm_nce.cpp
@@ -6,7 +6,7 @@
#include "common/signal_chain.h"
#include "core/arm/nce/arm_nce.h"
-#include "core/arm/nce/guest_context.h"
+#include "core/arm/nce/interpreter_visitor.h"
#include "core/arm/nce/patcher.h"
#include "core/core.h"
#include "core/memory.h"
@@ -21,7 +21,8 @@ namespace Core {
namespace {
-struct sigaction g_orig_action;
+struct sigaction g_orig_bus_action;
+struct sigaction g_orig_segv_action;
// Verify assembly offsets.
using NativeExecutionParameters = Kernel::KThread::NativeExecutionParameters;
@@ -37,6 +38,9 @@ fpsimd_context* GetFloatingPointState(mcontext_t& host_ctx) {
return reinterpret_cast<fpsimd_context*>(header);
}
+using namespace Common::Literals;
+constexpr u32 StackSize = 32_KiB;
+
} // namespace
void* ArmNce::RestoreGuestContext(void* raw_context) {
@@ -104,19 +108,10 @@ void ArmNce::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) {
host_ctx.regs[0] = guest_ctx->esr_el1.exchange(0);
}
-bool ArmNce::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
+bool ArmNce::HandleFailedGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
auto* info = static_cast<siginfo_t*>(raw_info);
- // Try to handle an invalid access.
- // TODO: handle accesses which split a page?
- const Common::ProcessAddress addr =
- (reinterpret_cast<u64>(info->si_addr) & ~Memory::YUZU_PAGEMASK);
- if (guest_ctx->system->ApplicationMemory().InvalidateNCE(addr, Memory::YUZU_PAGESIZE)) {
- // We handled the access successfully and are returning to guest code.
- return true;
- }
-
// We can't handle the access, so determine why we crashed.
const bool is_prefetch_abort = host_ctx.pc == reinterpret_cast<u64>(info->si_addr);
@@ -143,8 +138,44 @@ bool ArmNce::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw
return false;
}
-void ArmNce::HandleHostFault(int sig, void* raw_info, void* raw_context) {
- return g_orig_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context);
+bool ArmNce::HandleGuestAlignmentFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
+ auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
+ auto* fpctx = GetFloatingPointState(host_ctx);
+ auto& memory = guest_ctx->system->ApplicationMemory();
+
+ // Match and execute an instruction.
+ auto next_pc = MatchAndExecuteOneInstruction(memory, &host_ctx, fpctx);
+ if (next_pc) {
+ host_ctx.pc = *next_pc;
+ return true;
+ }
+
+ // We couldn't handle the access.
+ return HandleFailedGuestFault(guest_ctx, raw_info, raw_context);
+}
+
+bool ArmNce::HandleGuestAccessFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
+ auto* info = static_cast<siginfo_t*>(raw_info);
+
+ // Try to handle an invalid access.
+ // TODO: handle accesses which split a page?
+ const Common::ProcessAddress addr =
+ (reinterpret_cast<u64>(info->si_addr) & ~Memory::YUZU_PAGEMASK);
+ if (guest_ctx->system->ApplicationMemory().InvalidateNCE(addr, Memory::YUZU_PAGESIZE)) {
+ // We handled the access successfully and are returning to guest code.
+ return true;
+ }
+
+ // We couldn't handle the access.
+ return HandleFailedGuestFault(guest_ctx, raw_info, raw_context);
+}
+
+void ArmNce::HandleHostAlignmentFault(int sig, void* raw_info, void* raw_context) {
+ return g_orig_bus_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context);
+}
+
+void ArmNce::HandleHostAccessFault(int sig, void* raw_info, void* raw_context) {
+ return g_orig_segv_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context);
}
void ArmNce::LockThread(Kernel::KThread* thread) {
@@ -220,6 +251,9 @@ void ArmNce::SetSvcArguments(std::span<const uint64_t, 8> args) {
ArmNce::ArmNce(System& system, bool uses_wall_clock, std::size_t core_index)
: ArmInterface{uses_wall_clock}, m_system{system}, m_core_index{core_index} {
m_guest_ctx.system = &m_system;
+
+ // Allocate signal stack.
+ m_stack = std::make_unique<u8[]>(StackSize);
}
ArmNce::~ArmNce() = default;
@@ -227,16 +261,23 @@ ArmNce::~ArmNce() = default;
void ArmNce::Initialize() {
m_thread_id = gettid();
- // Setup our signals
- static std::once_flag signals;
- std::call_once(signals, [] {
+ // Configure signal stack.
+ stack_t ss{};
+ ss.ss_sp = m_stack.get();
+ ss.ss_size = StackSize;
+ sigaltstack(&ss, nullptr);
+
+ // Set up signals.
+ static std::once_flag flag;
+ std::call_once(flag, [] {
using HandlerType = decltype(sigaction::sa_sigaction);
sigset_t signal_mask;
sigemptyset(&signal_mask);
sigaddset(&signal_mask, ReturnToRunCodeByExceptionLevelChangeSignal);
sigaddset(&signal_mask, BreakFromRunCodeSignal);
- sigaddset(&signal_mask, GuestFaultSignal);
+ sigaddset(&signal_mask, GuestAlignmentFaultSignal);
+ sigaddset(&signal_mask, GuestAccessFaultSignal);
struct sigaction return_to_run_code_action {};
return_to_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
@@ -253,18 +294,19 @@ void ArmNce::Initialize() {
break_from_run_code_action.sa_mask = signal_mask;
Common::SigAction(BreakFromRunCodeSignal, &break_from_run_code_action, nullptr);
- struct sigaction fault_action {};
- fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART;
- fault_action.sa_sigaction = reinterpret_cast<HandlerType>(&ArmNce::GuestFaultSignalHandler);
- fault_action.sa_mask = signal_mask;
- Common::SigAction(GuestFaultSignal, &fault_action, &g_orig_action);
-
- // Simplify call for g_orig_action.
- // These fields occupy the same space in memory, so this should be a no-op in practice.
- if (!(g_orig_action.sa_flags & SA_SIGINFO)) {
- g_orig_action.sa_sigaction =
- reinterpret_cast<decltype(g_orig_action.sa_sigaction)>(g_orig_action.sa_handler);
- }
+ struct sigaction alignment_fault_action {};
+ alignment_fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ alignment_fault_action.sa_sigaction =
+ reinterpret_cast<HandlerType>(&ArmNce::GuestAlignmentFaultSignalHandler);
+ alignment_fault_action.sa_mask = signal_mask;
+ Common::SigAction(GuestAlignmentFaultSignal, &alignment_fault_action, nullptr);
+
+ struct sigaction access_fault_action {};
+ access_fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART;
+ access_fault_action.sa_sigaction =
+ reinterpret_cast<HandlerType>(&ArmNce::GuestAccessFaultSignalHandler);
+ access_fault_action.sa_mask = signal_mask;
+ Common::SigAction(GuestAccessFaultSignal, &access_fault_action, &g_orig_segv_action);
});
}
diff --git a/src/core/arm/nce/arm_nce.h b/src/core/arm/nce/arm_nce.h
index f55c10d1d..be9b304c4 100644
--- a/src/core/arm/nce/arm_nce.h
+++ b/src/core/arm/nce/arm_nce.h
@@ -61,7 +61,8 @@ private:
static void ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* info,
void* raw_context);
static void BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context);
- static void GuestFaultSignalHandler(int sig, void* info, void* raw_context);
+ static void GuestAlignmentFaultSignalHandler(int sig, void* info, void* raw_context);
+ static void GuestAccessFaultSignalHandler(int sig, void* info, void* raw_context);
static void LockThreadParameters(void* tpidr);
static void UnlockThreadParameters(void* tpidr);
@@ -70,8 +71,11 @@ private:
// C++ implementation functions for assembly definitions.
static void* RestoreGuestContext(void* raw_context);
static void SaveGuestContext(GuestContext* ctx, void* raw_context);
- static bool HandleGuestFault(GuestContext* ctx, void* info, void* raw_context);
- static void HandleHostFault(int sig, void* info, void* raw_context);
+ static bool HandleFailedGuestFault(GuestContext* ctx, void* info, void* raw_context);
+ static bool HandleGuestAlignmentFault(GuestContext* ctx, void* info, void* raw_context);
+ static bool HandleGuestAccessFault(GuestContext* ctx, void* info, void* raw_context);
+ static void HandleHostAlignmentFault(int sig, void* info, void* raw_context);
+ static void HandleHostAccessFault(int sig, void* info, void* raw_context);
public:
Core::System& m_system;
@@ -83,6 +87,9 @@ public:
// Core context.
GuestContext m_guest_ctx{};
Kernel::KThread* m_running_thread{};
+
+ // Stack for signal processing.
+ std::unique_ptr<u8[]> m_stack{};
};
} // namespace Core
diff --git a/src/core/arm/nce/arm_nce.s b/src/core/arm/nce/arm_nce.s
index 4aeda4740..c68c05949 100644
--- a/src/core/arm/nce/arm_nce.s
+++ b/src/core/arm/nce/arm_nce.s
@@ -130,11 +130,11 @@ _ZN4Core6ArmNce29BreakFromRunCodeSignalHandlerEiPvS1_:
ret
-/* static void Core::ArmNce::GuestFaultSignalHandler(int sig, void* info, void* raw_context) */
-.section .text._ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_, "ax", %progbits
-.global _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_
-.type _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_, %function
-_ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_:
+/* static void Core::ArmNce::GuestAlignmentFaultSignalHandler(int sig, void* info, void* raw_context) */
+.section .text._ZN4Core6ArmNce32GuestAlignmentFaultSignalHandlerEiPvS1_, "ax", %progbits
+.global _ZN4Core6ArmNce32GuestAlignmentFaultSignalHandlerEiPvS1_
+.type _ZN4Core6ArmNce32GuestAlignmentFaultSignalHandlerEiPvS1_, %function
+_ZN4Core6ArmNce32GuestAlignmentFaultSignalHandlerEiPvS1_:
/* Check to see if we have the correct TLS magic. */
mrs x8, tpidr_el0
ldr w9, [x8, #(TpidrEl0TlsMagic)]
@@ -146,7 +146,7 @@ _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_:
/* Incorrect TLS magic, so this is a host fault. */
/* Tail call the handler. */
- b _ZN4Core6ArmNce15HandleHostFaultEiPvS1_
+ b _ZN4Core6ArmNce24HandleHostAlignmentFaultEiPvS1_
1:
/* Correct TLS magic, so this is a guest fault. */
@@ -163,7 +163,53 @@ _ZN4Core6ArmNce23GuestFaultSignalHandlerEiPvS1_:
msr tpidr_el0, x3
/* Call the handler. */
- bl _ZN4Core6ArmNce16HandleGuestFaultEPNS_12GuestContextEPvS3_
+ bl _ZN4Core6ArmNce25HandleGuestAlignmentFaultEPNS_12GuestContextEPvS3_
+
+ /* If the handler returned false, we want to preserve the host tpidr_el0. */
+ cbz x0, 2f
+
+ /* Otherwise, restore guest tpidr_el0. */
+ msr tpidr_el0, x19
+
+2:
+ ldr x19, [sp, #0x10]
+ ldp x29, x30, [sp], #0x20
+ ret
+
+/* static void Core::ArmNce::GuestAccessFaultSignalHandler(int sig, void* info, void* raw_context) */
+.section .text._ZN4Core6ArmNce29GuestAccessFaultSignalHandlerEiPvS1_, "ax", %progbits
+.global _ZN4Core6ArmNce29GuestAccessFaultSignalHandlerEiPvS1_
+.type _ZN4Core6ArmNce29GuestAccessFaultSignalHandlerEiPvS1_, %function
+_ZN4Core6ArmNce29GuestAccessFaultSignalHandlerEiPvS1_:
+ /* Check to see if we have the correct TLS magic. */
+ mrs x8, tpidr_el0
+ ldr w9, [x8, #(TpidrEl0TlsMagic)]
+
+ LOAD_IMMEDIATE_32(w10, TlsMagic)
+
+ cmp w9, w10
+ b.eq 1f
+
+ /* Incorrect TLS magic, so this is a host fault. */
+ /* Tail call the handler. */
+ b _ZN4Core6ArmNce21HandleHostAccessFaultEiPvS1_
+
+1:
+ /* Correct TLS magic, so this is a guest fault. */
+ stp x29, x30, [sp, #-0x20]!
+ str x19, [sp, #0x10]
+ mov x29, sp
+
+ /* Save the old tpidr_el0. */
+ mov x19, x8
+
+ /* Restore host tpidr_el0. */
+ ldr x0, [x8, #(TpidrEl0NativeContext)]
+ ldr x3, [x0, #(GuestContextHostContext + HostContextTpidrEl0)]
+ msr tpidr_el0, x3
+
+ /* Call the handler. */
+ bl _ZN4Core6ArmNce22HandleGuestAccessFaultEPNS_12GuestContextEPvS3_
/* If the handler returned false, we want to preserve the host tpidr_el0. */
cbz x0, 2f
diff --git a/src/core/arm/nce/arm_nce_asm_definitions.h b/src/core/arm/nce/arm_nce_asm_definitions.h
index 8a9b285b5..8ea4383f7 100644
--- a/src/core/arm/nce/arm_nce_asm_definitions.h
+++ b/src/core/arm/nce/arm_nce_asm_definitions.h
@@ -10,7 +10,8 @@
#define ReturnToRunCodeByExceptionLevelChangeSignal SIGUSR2
#define BreakFromRunCodeSignal SIGURG
-#define GuestFaultSignal SIGSEGV
+#define GuestAccessFaultSignal SIGSEGV
+#define GuestAlignmentFaultSignal SIGBUS
#define GuestContextSp 0xF8
#define GuestContextHostContext 0x320
diff --git a/src/core/arm/nce/interpreter_visitor.cpp b/src/core/arm/nce/interpreter_visitor.cpp
new file mode 100644
index 000000000..8e81c66a5
--- /dev/null
+++ b/src/core/arm/nce/interpreter_visitor.cpp
@@ -0,0 +1,825 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 merryhime <https://mary.rs>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/bit_cast.h"
+#include "core/arm/nce/interpreter_visitor.h"
+
+#include <dynarmic/frontend/A64/decoder/a64.h>
+
+namespace Core {
+
+template <u32 BitSize>
+u64 SignExtendToLong(u64 value) {
+ u64 mask = 1ULL << (BitSize - 1);
+ value &= (1ULL << BitSize) - 1;
+ return (value ^ mask) - mask;
+}
+
+static u64 SignExtendToLong(u64 value, u64 bitsize) {
+ switch (bitsize) {
+ case 8:
+ return SignExtendToLong<8>(value);
+ case 16:
+ return SignExtendToLong<16>(value);
+ case 32:
+ return SignExtendToLong<32>(value);
+ default:
+ return value;
+ }
+}
+
+template <u64 BitSize>
+u32 SignExtendToWord(u32 value) {
+ u32 mask = 1ULL << (BitSize - 1);
+ value &= (1ULL << BitSize) - 1;
+ return (value ^ mask) - mask;
+}
+
+static u32 SignExtendToWord(u32 value, u64 bitsize) {
+ switch (bitsize) {
+ case 8:
+ return SignExtendToWord<8>(value);
+ case 16:
+ return SignExtendToWord<16>(value);
+ default:
+ return value;
+ }
+}
+
+static u64 SignExtend(u64 value, u64 bitsize, u64 regsize) {
+ if (regsize == 64) {
+ return SignExtendToLong(value, bitsize);
+ } else {
+ return SignExtendToWord(static_cast<u32>(value), bitsize);
+ }
+}
+
+static u128 VectorGetElement(u128 value, u64 bitsize) {
+ switch (bitsize) {
+ case 8:
+ return {value[0] & ((1ULL << 8) - 1), 0};
+ case 16:
+ return {value[0] & ((1ULL << 16) - 1), 0};
+ case 32:
+ return {value[0] & ((1ULL << 32) - 1), 0};
+ case 64:
+ return {value[0], 0};
+ default:
+ return value;
+ }
+}
+
+u64 InterpreterVisitor::ExtendReg(size_t bitsize, Reg reg, Imm<3> option, u8 shift) {
+ ASSERT(shift <= 4);
+ ASSERT(bitsize == 32 || bitsize == 64);
+ u64 val = this->GetReg(reg);
+ size_t len;
+ u64 extended;
+ bool signed_extend;
+
+ switch (option.ZeroExtend()) {
+ case 0b000: { // UXTB
+ val &= ((1ULL << 8) - 1);
+ len = 8;
+ signed_extend = false;
+ break;
+ }
+ case 0b001: { // UXTH
+ val &= ((1ULL << 16) - 1);
+ len = 16;
+ signed_extend = false;
+ break;
+ }
+ case 0b010: { // UXTW
+ val &= ((1ULL << 32) - 1);
+ len = 32;
+ signed_extend = false;
+ break;
+ }
+ case 0b011: { // UXTX
+ len = 64;
+ signed_extend = false;
+ break;
+ }
+ case 0b100: { // SXTB
+ val &= ((1ULL << 8) - 1);
+ len = 8;
+ signed_extend = true;
+ break;
+ }
+ case 0b101: { // SXTH
+ val &= ((1ULL << 16) - 1);
+ len = 16;
+ signed_extend = true;
+ break;
+ }
+ case 0b110: { // SXTW
+ val &= ((1ULL << 32) - 1);
+ len = 32;
+ signed_extend = true;
+ break;
+ }
+ case 0b111: { // SXTX
+ len = 64;
+ signed_extend = true;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ if (len < bitsize && signed_extend) {
+ extended = SignExtend(val, len, bitsize);
+ } else {
+ extended = val;
+ }
+
+ return extended << shift;
+}
+
+u128 InterpreterVisitor::GetVec(Vec v) {
+ return m_fpsimd_regs[static_cast<u32>(v)];
+}
+
+u64 InterpreterVisitor::GetReg(Reg r) {
+ return m_regs[static_cast<u32>(r)];
+}
+
+u64 InterpreterVisitor::GetSp() {
+ return m_sp;
+}
+
+u64 InterpreterVisitor::GetPc() {
+ return m_pc;
+}
+
+void InterpreterVisitor::SetVec(Vec v, u128 value) {
+ m_fpsimd_regs[static_cast<u32>(v)] = value;
+}
+
+void InterpreterVisitor::SetReg(Reg r, u64 value) {
+ m_regs[static_cast<u32>(r)] = value;
+}
+
+void InterpreterVisitor::SetSp(u64 value) {
+ m_sp = value;
+}
+
+bool InterpreterVisitor::Ordered(size_t size, bool L, bool o0, Reg Rn, Reg Rt) {
+ const auto memop = L ? MemOp::Load : MemOp::Store;
+ const size_t elsize = 8 << size;
+ const size_t datasize = elsize;
+
+ // Operation
+ const size_t dbytes = datasize / 8;
+
+ u64 address;
+ if (Rn == Reg::SP) {
+ address = this->GetSp();
+ } else {
+ address = this->GetReg(Rn);
+ }
+
+ switch (memop) {
+ case MemOp::Store: {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ u64 value = this->GetReg(Rt);
+ m_memory.WriteBlock(address, &value, dbytes);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ break;
+ }
+ case MemOp::Load: {
+ u64 value = 0;
+ m_memory.ReadBlock(address, &value, dbytes);
+ this->SetReg(Rt, value);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ return true;
+}
+
+bool InterpreterVisitor::STLLR(Imm<2> sz, Reg Rn, Reg Rt) {
+ const size_t size = sz.ZeroExtend<size_t>();
+ const bool L = 0;
+ const bool o0 = 0;
+ return this->Ordered(size, L, o0, Rn, Rt);
+}
+
+bool InterpreterVisitor::STLR(Imm<2> sz, Reg Rn, Reg Rt) {
+ const size_t size = sz.ZeroExtend<size_t>();
+ const bool L = 0;
+ const bool o0 = 1;
+ return this->Ordered(size, L, o0, Rn, Rt);
+}
+
+bool InterpreterVisitor::LDLAR(Imm<2> sz, Reg Rn, Reg Rt) {
+ const size_t size = sz.ZeroExtend<size_t>();
+ const bool L = 1;
+ const bool o0 = 0;
+ return this->Ordered(size, L, o0, Rn, Rt);
+}
+
+bool InterpreterVisitor::LDAR(Imm<2> sz, Reg Rn, Reg Rt) {
+ const size_t size = sz.ZeroExtend<size_t>();
+ const bool L = 1;
+ const bool o0 = 1;
+ return this->Ordered(size, L, o0, Rn, Rt);
+}
+
+bool InterpreterVisitor::LDR_lit_gen(bool opc_0, Imm<19> imm19, Reg Rt) {
+ const size_t size = opc_0 == 0 ? 4 : 8;
+ const s64 offset = Dynarmic::concatenate(imm19, Imm<2>{0}).SignExtend<s64>();
+ const u64 address = this->GetPc() + offset;
+
+ u64 data = 0;
+ m_memory.ReadBlock(address, &data, size);
+
+ this->SetReg(Rt, data);
+ return true;
+}
+
+bool InterpreterVisitor::LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) {
+ if (opc == 0b11) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const u64 size = 4 << opc.ZeroExtend();
+ const u64 offset = imm19.SignExtend<u64>() << 2;
+ const u64 address = this->GetPc() + offset;
+
+ u128 data{};
+ m_memory.ReadBlock(address, &data, size);
+ this->SetVec(Vt, data);
+ return true;
+}
+
+bool InterpreterVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L,
+ Imm<7> imm7, Reg Rt2, Reg Rn, Reg Rt) {
+ if ((L == 0 && opc.Bit<0>() == 1) || opc == 0b11) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const auto memop = L == 1 ? MemOp::Load : MemOp::Store;
+ if (memop == MemOp::Load && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
+ // Unpredictable instruction
+ return false;
+ }
+ if (memop == MemOp::Store && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
+ // Unpredictable instruction
+ return false;
+ }
+ if (memop == MemOp::Load && Rt == Rt2) {
+ // Unpredictable instruction
+ return false;
+ }
+
+ u64 address;
+ if (Rn == Reg::SP) {
+ address = this->GetSp();
+ } else {
+ address = this->GetReg(Rn);
+ }
+
+ const bool postindex = !not_postindex;
+ const bool signed_ = opc.Bit<0>() != 0;
+ const size_t scale = 2 + opc.Bit<1>();
+ const size_t datasize = 8 << scale;
+ const u64 offset = imm7.SignExtend<u64>() << scale;
+
+ if (!postindex) {
+ address += offset;
+ }
+
+ const size_t dbytes = datasize / 8;
+ switch (memop) {
+ case MemOp::Store: {
+ u64 data1 = this->GetReg(Rt);
+ u64 data2 = this->GetReg(Rt2);
+ m_memory.WriteBlock(address, &data1, dbytes);
+ m_memory.WriteBlock(address + dbytes, &data2, dbytes);
+ break;
+ }
+ case MemOp::Load: {
+ u64 data1 = 0, data2 = 0;
+ m_memory.ReadBlock(address, &data1, dbytes);
+ m_memory.ReadBlock(address + dbytes, &data2, dbytes);
+ if (signed_) {
+ this->SetReg(Rt, SignExtend(data1, datasize, 64));
+ this->SetReg(Rt2, SignExtend(data2, datasize, 64));
+ } else {
+ this->SetReg(Rt, data1);
+ this->SetReg(Rt2, data2);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ if (wback) {
+ if (postindex) {
+ address += offset;
+ }
+
+ if (Rn == Reg::SP) {
+ this->SetSp(address);
+ } else {
+ this->SetReg(Rn, address);
+ }
+ }
+
+ return true;
+}
+
+bool InterpreterVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L,
+ Imm<7> imm7, Vec Vt2, Reg Rn, Vec Vt) {
+ if (opc == 0b11) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const auto memop = L == 1 ? MemOp::Load : MemOp::Store;
+ if (memop == MemOp::Load && Vt == Vt2) {
+ // Unpredictable instruction
+ return false;
+ }
+
+ u64 address;
+ if (Rn == Reg::SP) {
+ address = this->GetSp();
+ } else {
+ address = this->GetReg(Rn);
+ }
+
+ const bool postindex = !not_postindex;
+ const size_t scale = 2 + opc.ZeroExtend<size_t>();
+ const size_t datasize = 8 << scale;
+ const u64 offset = imm7.SignExtend<u64>() << scale;
+ const size_t dbytes = datasize / 8;
+
+ if (!postindex) {
+ address += offset;
+ }
+
+ switch (memop) {
+ case MemOp::Store: {
+ u128 data1 = VectorGetElement(this->GetVec(Vt), datasize);
+ u128 data2 = VectorGetElement(this->GetVec(Vt2), datasize);
+ m_memory.WriteBlock(address, &data1, dbytes);
+ m_memory.WriteBlock(address + dbytes, &data2, dbytes);
+ break;
+ }
+ case MemOp::Load: {
+ u128 data1{}, data2{};
+ m_memory.ReadBlock(address, &data1, dbytes);
+ m_memory.ReadBlock(address + dbytes, &data2, dbytes);
+ this->SetVec(Vt, data1);
+ this->SetVec(Vt2, data2);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ if (wback) {
+ if (postindex) {
+ address += offset;
+ }
+
+ if (Rn == Reg::SP) {
+ this->SetSp(address);
+ } else {
+ this->SetReg(Rn, address);
+ }
+ }
+
+ return true;
+}
+
+bool InterpreterVisitor::RegisterImmediate(bool wback, bool postindex, size_t scale, u64 offset,
+ Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt) {
+ MemOp memop;
+ bool signed_ = false;
+ size_t regsize = 0;
+
+ if (opc.Bit<1>() == 0) {
+ memop = opc.Bit<0>() ? MemOp::Load : MemOp::Store;
+ regsize = size == 0b11 ? 64 : 32;
+ signed_ = false;
+ } else if (size == 0b11) {
+ memop = MemOp::Prefetch;
+ ASSERT(!opc.Bit<0>());
+ } else {
+ memop = MemOp::Load;
+ ASSERT(!(size == 0b10 && opc.Bit<0>() == 1));
+ regsize = opc.Bit<0>() ? 32 : 64;
+ signed_ = true;
+ }
+
+ if (memop == MemOp::Load && wback && Rn == Rt && Rn != Reg::R31) {
+ // Unpredictable instruction
+ return false;
+ }
+ if (memop == MemOp::Store && wback && Rn == Rt && Rn != Reg::R31) {
+ // Unpredictable instruction
+ return false;
+ }
+
+ u64 address;
+ if (Rn == Reg::SP) {
+ address = this->GetSp();
+ } else {
+ address = this->GetReg(Rn);
+ }
+ if (!postindex) {
+ address += offset;
+ }
+
+ const size_t datasize = 8 << scale;
+ switch (memop) {
+ case MemOp::Store: {
+ u64 data = this->GetReg(Rt);
+ m_memory.WriteBlock(address, &data, datasize / 8);
+ break;
+ }
+ case MemOp::Load: {
+ u64 data = 0;
+ m_memory.ReadBlock(address, &data, datasize / 8);
+ if (signed_) {
+ this->SetReg(Rt, SignExtend(data, datasize, regsize));
+ } else {
+ this->SetReg(Rt, data);
+ }
+ break;
+ }
+ case MemOp::Prefetch:
+ // this->Prefetch(address, Rt)
+ break;
+ }
+
+ if (wback) {
+ if (postindex) {
+ address += offset;
+ }
+
+ if (Rn == Reg::SP) {
+ this->SetSp(address);
+ } else {
+ this->SetReg(Rn, address);
+ }
+ }
+
+ return true;
+}
+
+bool InterpreterVisitor::STRx_LDRx_imm_1(Imm<2> size, Imm<2> opc, Imm<9> imm9, bool not_postindex,
+ Reg Rn, Reg Rt) {
+ const bool wback = true;
+ const bool postindex = !not_postindex;
+ const size_t scale = size.ZeroExtend<size_t>();
+ const u64 offset = imm9.SignExtend<u64>();
+
+ return this->RegisterImmediate(wback, postindex, scale, offset, size, opc, Rn, Rt);
+}
+
+bool InterpreterVisitor::STRx_LDRx_imm_2(Imm<2> size, Imm<2> opc, Imm<12> imm12, Reg Rn, Reg Rt) {
+ const bool wback = false;
+ const bool postindex = false;
+ const size_t scale = size.ZeroExtend<size_t>();
+ const u64 offset = imm12.ZeroExtend<u64>() << scale;
+
+ return this->RegisterImmediate(wback, postindex, scale, offset, size, opc, Rn, Rt);
+}
+
+bool InterpreterVisitor::STURx_LDURx(Imm<2> size, Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
+ const bool wback = false;
+ const bool postindex = false;
+ const size_t scale = size.ZeroExtend<size_t>();
+ const u64 offset = imm9.SignExtend<u64>();
+
+ return this->RegisterImmediate(wback, postindex, scale, offset, size, opc, Rn, Rt);
+}
+
+bool InterpreterVisitor::SIMDImmediate(bool wback, bool postindex, size_t scale, u64 offset,
+ MemOp memop, Reg Rn, Vec Vt) {
+ const size_t datasize = 8 << scale;
+
+ u64 address;
+ if (Rn == Reg::SP) {
+ address = this->GetSp();
+ } else {
+ address = this->GetReg(Rn);
+ }
+
+ if (!postindex) {
+ address += offset;
+ }
+
+ switch (memop) {
+ case MemOp::Store: {
+ u128 data = VectorGetElement(this->GetVec(Vt), datasize);
+ m_memory.WriteBlock(address, &data, datasize / 8);
+ break;
+ }
+ case MemOp::Load: {
+ u128 data{};
+ m_memory.ReadBlock(address, &data, datasize);
+ this->SetVec(Vt, data);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ if (wback) {
+ if (postindex) {
+ address += offset;
+ }
+
+ if (Rn == Reg::SP) {
+ this->SetSp(address);
+ } else {
+ this->SetReg(Rn, address);
+ }
+ }
+
+ return true;
+}
+
+bool InterpreterVisitor::STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9,
+ bool not_postindex, Reg Rn, Vec Vt) {
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const bool wback = true;
+ const bool postindex = !not_postindex;
+ const u64 offset = imm9.SignExtend<u64>();
+
+ return this->SIMDImmediate(wback, postindex, scale, offset, MemOp::Store, Rn, Vt);
+}
+
+bool InterpreterVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn,
+ Vec Vt) {
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const bool wback = false;
+ const bool postindex = false;
+ const u64 offset = imm12.ZeroExtend<u64>() << scale;
+
+ return this->SIMDImmediate(wback, postindex, scale, offset, MemOp::Store, Rn, Vt);
+}
+
+bool InterpreterVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9,
+ bool not_postindex, Reg Rn, Vec Vt) {
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const bool wback = true;
+ const bool postindex = !not_postindex;
+ const u64 offset = imm9.SignExtend<u64>();
+
+ return this->SIMDImmediate(wback, postindex, scale, offset, MemOp::Load, Rn, Vt);
+}
+
+bool InterpreterVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn,
+ Vec Vt) {
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const bool wback = false;
+ const bool postindex = false;
+ const u64 offset = imm12.ZeroExtend<u64>() << scale;
+
+ return this->SIMDImmediate(wback, postindex, scale, offset, MemOp::Load, Rn, Vt);
+}
+
+bool InterpreterVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const bool wback = false;
+ const bool postindex = false;
+ const u64 offset = imm9.SignExtend<u64>();
+
+ return this->SIMDImmediate(wback, postindex, scale, offset, MemOp::Store, Rn, Vt);
+}
+
+bool InterpreterVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+
+ const bool wback = false;
+ const bool postindex = false;
+ const u64 offset = imm9.SignExtend<u64>();
+
+ return this->SIMDImmediate(wback, postindex, scale, offset, MemOp::Load, Rn, Vt);
+}
+
+bool InterpreterVisitor::RegisterOffset(size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1,
+ Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) {
+ MemOp memop;
+ size_t regsize = 64;
+ bool signed_ = false;
+
+ if (opc_1 == 0) {
+ memop = opc_0 == 1 ? MemOp::Load : MemOp::Store;
+ regsize = size == 0b11 ? 64 : 32;
+ signed_ = false;
+ } else if (size == 0b11) {
+ memop = MemOp::Prefetch;
+ if (opc_0 == 1) {
+ // Unallocated encoding
+ return false;
+ }
+ } else {
+ memop = MemOp::Load;
+ if (size == 0b10 && opc_0 == 1) {
+ // Unallocated encoding
+ return false;
+ }
+ regsize = opc_0 == 1 ? 32 : 64;
+ signed_ = true;
+ }
+
+ const size_t datasize = 8 << scale;
+
+ // Operation
+ const u64 offset = this->ExtendReg(64, Rm, option, shift);
+
+ u64 address;
+ if (Rn == Reg::SP) {
+ address = this->GetSp();
+ } else {
+ address = this->GetReg(Rn);
+ }
+ address += offset;
+
+ switch (memop) {
+ case MemOp::Store: {
+ u64 data = this->GetReg(Rt);
+ m_memory.WriteBlock(address, &data, datasize / 8);
+ break;
+ }
+ case MemOp::Load: {
+ u64 data = 0;
+ m_memory.ReadBlock(address, &data, datasize / 8);
+ if (signed_) {
+ this->SetReg(Rt, SignExtend(data, datasize, regsize));
+ } else {
+ this->SetReg(Rt, data);
+ }
+ break;
+ }
+ case MemOp::Prefetch:
+ break;
+ }
+
+ return true;
+}
+
+bool InterpreterVisitor::STRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Reg Rt) {
+ const Imm<1> opc_0{0};
+ const size_t scale = size.ZeroExtend<size_t>();
+ const u8 shift = S ? static_cast<u8>(scale) : 0;
+ if (!option.Bit<1>()) {
+ // Unallocated encoding
+ return false;
+ }
+ return this->RegisterOffset(scale, shift, size, opc_1, opc_0, Rm, option, Rn, Rt);
+}
+
+bool InterpreterVisitor::LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Reg Rt) {
+ const Imm<1> opc_0{1};
+ const size_t scale = size.ZeroExtend<size_t>();
+ const u8 shift = S ? static_cast<u8>(scale) : 0;
+ if (!option.Bit<1>()) {
+ // Unallocated encoding
+ return false;
+ }
+ return this->RegisterOffset(scale, shift, size, opc_1, opc_0, Rm, option, Rn, Rt);
+}
+
+bool InterpreterVisitor::SIMDOffset(size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option,
+ Reg Rn, Vec Vt) {
+ const auto memop = opc_0 == 1 ? MemOp::Load : MemOp::Store;
+ const size_t datasize = 8 << scale;
+
+ // Operation
+ const u64 offset = this->ExtendReg(64, Rm, option, shift);
+
+ u64 address;
+ if (Rn == Reg::SP) {
+ address = this->GetSp();
+ } else {
+ address = this->GetReg(Rn);
+ }
+ address += offset;
+
+ switch (memop) {
+ case MemOp::Store: {
+ u128 data = VectorGetElement(this->GetVec(Vt), datasize);
+ m_memory.WriteBlock(address, &data, datasize / 8);
+ break;
+ }
+ case MemOp::Load: {
+ u128 data{};
+ m_memory.ReadBlock(address, &data, datasize / 8);
+ this->SetVec(Vt, data);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ return true;
+}
+
+bool InterpreterVisitor::STR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S,
+ Reg Rn, Vec Vt) {
+ const Imm<1> opc_0{0};
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+ const u8 shift = S ? static_cast<u8>(scale) : 0;
+ if (!option.Bit<1>()) {
+ // Unallocated encoding
+ return false;
+ }
+ return this->SIMDOffset(scale, shift, opc_0, Rm, option, Rn, Vt);
+}
+
+bool InterpreterVisitor::LDR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S,
+ Reg Rn, Vec Vt) {
+ const Imm<1> opc_0{1};
+ const size_t scale = Dynarmic::concatenate(opc_1, size).ZeroExtend<size_t>();
+ if (scale > 4) {
+ // Unallocated encoding
+ return false;
+ }
+ const u8 shift = S ? static_cast<u8>(scale) : 0;
+ if (!option.Bit<1>()) {
+ // Unallocated encoding
+ return false;
+ }
+ return this->SIMDOffset(scale, shift, opc_0, Rm, option, Rn, Vt);
+}
+
+std::optional<u64> MatchAndExecuteOneInstruction(Core::Memory::Memory& memory, mcontext_t* context,
+ fpsimd_context* fpsimd_context) {
+ // Construct the interpreter.
+ std::span<u64, 31> regs(reinterpret_cast<u64*>(context->regs), 31);
+ std::span<u128, 32> vregs(reinterpret_cast<u128*>(fpsimd_context->vregs), 32);
+ u64& sp = *reinterpret_cast<u64*>(&context->sp);
+ const u64& pc = *reinterpret_cast<u64*>(&context->pc);
+
+ InterpreterVisitor visitor(memory, regs, vregs, sp, pc);
+
+ // Read the instruction at the program counter.
+ u32 instruction = memory.Read32(pc);
+ bool was_executed = false;
+
+ // Interpret the instruction.
+ if (auto decoder = Dynarmic::A64::Decode<VisitorBase>(instruction)) {
+ was_executed = decoder->get().call(visitor, instruction);
+ } else {
+ LOG_ERROR(Core_ARM, "Unallocated encoding: {:#x}", instruction);
+ }
+
+ if (was_executed) {
+ return pc + 4;
+ }
+
+ return std::nullopt;
+}
+
+} // namespace Core
diff --git a/src/core/arm/nce/interpreter_visitor.h b/src/core/arm/nce/interpreter_visitor.h
new file mode 100644
index 000000000..f90d876ab
--- /dev/null
+++ b/src/core/arm/nce/interpreter_visitor.h
@@ -0,0 +1,103 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 merryhime <https://mary.rs>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <signal.h>
+#include <unistd.h>
+
+#include "core/arm/nce/visitor_base.h"
+
+namespace Core {
+
+namespace Memory {
+class Memory;
+}
+
+class InterpreterVisitor final : public VisitorBase {
+public:
+ explicit InterpreterVisitor(Core::Memory::Memory& memory, std::span<u64, 31> regs,
+ std::span<u128, 32> fpsimd_regs, u64& sp, const u64& pc)
+ : m_memory(memory), m_regs(regs), m_fpsimd_regs(fpsimd_regs), m_sp(sp), m_pc(pc) {}
+ ~InterpreterVisitor() override = default;
+
+ enum class MemOp {
+ Load,
+ Store,
+ Prefetch,
+ };
+
+ u128 GetVec(Vec v);
+ u64 GetReg(Reg r);
+ u64 GetSp();
+ u64 GetPc();
+
+ void SetVec(Vec v, u128 value);
+ void SetReg(Reg r, u64 value);
+ void SetSp(u64 value);
+
+ u64 ExtendReg(size_t bitsize, Reg reg, Imm<3> option, u8 shift);
+
+ // Loads and stores - Load/Store Exclusive
+ bool Ordered(size_t size, bool L, bool o0, Reg Rn, Reg Rt);
+ bool STLLR(Imm<2> size, Reg Rn, Reg Rt) override;
+ bool STLR(Imm<2> size, Reg Rn, Reg Rt) override;
+ bool LDLAR(Imm<2> size, Reg Rn, Reg Rt) override;
+ bool LDAR(Imm<2> size, Reg Rn, Reg Rt) override;
+
+ // Loads and stores - Load register (literal)
+ bool LDR_lit_gen(bool opc_0, Imm<19> imm19, Reg Rt) override;
+ bool LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) override;
+
+ // Loads and stores - Load/Store register pair
+ bool STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L, Imm<7> imm7, Reg Rt2,
+ Reg Rn, Reg Rt) override;
+ bool STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L, Imm<7> imm7, Vec Vt2,
+ Reg Rn, Vec Vt) override;
+
+ // Loads and stores - Load/Store register (immediate)
+ bool RegisterImmediate(bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size,
+ Imm<2> opc, Reg Rn, Reg Rt);
+ bool STRx_LDRx_imm_1(Imm<2> size, Imm<2> opc, Imm<9> imm9, bool not_postindex, Reg Rn,
+ Reg Rt) override;
+ bool STRx_LDRx_imm_2(Imm<2> size, Imm<2> opc, Imm<12> imm12, Reg Rn, Reg Rt) override;
+ bool STURx_LDURx(Imm<2> size, Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) override;
+
+ bool SIMDImmediate(bool wback, bool postindex, size_t scale, u64 offset, MemOp memop, Reg Rn,
+ Vec Vt);
+ bool STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn,
+ Vec Vt) override;
+ bool STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) override;
+ bool LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn,
+ Vec Vt) override;
+ bool LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) override;
+ bool STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) override;
+ bool LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) override;
+
+ // Loads and stores - Load/Store register (register offset)
+ bool RegisterOffset(size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm,
+ Imm<3> option, Reg Rn, Reg Rt);
+ bool STRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Reg Rt) override;
+ bool LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Reg Rt) override;
+
+ bool SIMDOffset(size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt);
+ bool STR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Vec Vt) override;
+ bool LDR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Vec Vt) override;
+
+private:
+ Core::Memory::Memory& m_memory;
+ std::span<u64, 31> m_regs;
+ std::span<u128, 32> m_fpsimd_regs;
+ u64& m_sp;
+ const u64& m_pc;
+};
+
+std::optional<u64> MatchAndExecuteOneInstruction(Core::Memory::Memory& memory, mcontext_t* context,
+ fpsimd_context* fpsimd_context);
+
+} // namespace Core
diff --git a/src/core/arm/nce/visitor_base.h b/src/core/arm/nce/visitor_base.h
new file mode 100644
index 000000000..8fb032912
--- /dev/null
+++ b/src/core/arm/nce/visitor_base.h
@@ -0,0 +1,2777 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 merryhime <https://mary.rs>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <dynarmic/frontend/A64/a64_types.h>
+#include <dynarmic/frontend/imm.h>
+
+namespace Core {
+
+class VisitorBase {
+public:
+ using instruction_return_type = bool;
+
+ template <size_t BitSize>
+ using Imm = Dynarmic::Imm<BitSize>;
+ using Reg = Dynarmic::A64::Reg;
+ using Vec = Dynarmic::A64::Vec;
+ using Cond = Dynarmic::A64::Cond;
+
+ virtual ~VisitorBase() {}
+
+ virtual bool UnallocatedEncoding() {
+ return false;
+ }
+
+ // Data processing - Immediate - PC relative addressing
+ virtual bool ADR(Imm<2> immlo, Imm<19> immhi, Reg Rd) {
+ return false;
+ }
+ virtual bool ADRP(Imm<2> immlo, Imm<19> immhi, Reg Rd) {
+ return false;
+ }
+
+ // Data processing - Immediate - Add/Sub (with tag)
+ virtual bool ADDG(Imm<6> offset_imm, Imm<4> tag_offset, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUBG(Imm<6> offset_imm, Imm<4> tag_offset, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data processing - Immediate - Add/Sub
+ virtual bool ADD_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ADDS_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUB_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUBS_imm(bool sf, Imm<2> shift, Imm<12> imm12, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data processing - Immediate - Logical
+ virtual bool AND_imm(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ORR_imm(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool EOR_imm(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ANDS_imm(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data processing - Immediate - Move Wide
+ virtual bool MOVN(bool sf, Imm<2> hw, Imm<16> imm16, Reg Rd) {
+ return false;
+ }
+ virtual bool MOVZ(bool sf, Imm<2> hw, Imm<16> imm16, Reg Rd) {
+ return false;
+ }
+ virtual bool MOVK(bool sf, Imm<2> hw, Imm<16> imm16, Reg Rd) {
+ return false;
+ }
+
+ // Data processing - Immediate - Bitfield
+ virtual bool SBFM(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool BFM(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool UBFM(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ASR_1(Imm<5> immr, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ASR_2(Imm<6> immr, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SXTB_1(Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SXTB_2(Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SXTH_1(Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SXTH_2(Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SXTW(Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data processing - Immediate - Extract
+ virtual bool EXTR(bool sf, bool N, Reg Rm, Imm<6> imms, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Conditional branch
+ virtual bool B_cond(Imm<19> imm19, Cond cond) {
+ return false;
+ }
+
+ // Exception generation
+ virtual bool SVC(Imm<16> imm16) {
+ return false;
+ }
+ virtual bool HVC(Imm<16> imm16) {
+ return false;
+ }
+ virtual bool SMC(Imm<16> imm16) {
+ return false;
+ }
+ virtual bool BRK(Imm<16> imm16) {
+ return false;
+ }
+ virtual bool HLT(Imm<16> imm16) {
+ return false;
+ }
+ virtual bool DCPS1(Imm<16> imm16) {
+ return false;
+ }
+ virtual bool DCPS2(Imm<16> imm16) {
+ return false;
+ }
+ virtual bool DCPS3(Imm<16> imm16) {
+ return false;
+ }
+
+ // System
+ virtual bool MSR_imm(Imm<3> op1, Imm<4> CRm, Imm<3> op2) {
+ return false;
+ }
+ virtual bool HINT(Imm<4> CRm, Imm<3> op2) {
+ return false;
+ }
+ virtual bool NOP() {
+ return false;
+ }
+ virtual bool YIELD() {
+ return false;
+ }
+ virtual bool WFE() {
+ return false;
+ }
+ virtual bool WFI() {
+ return false;
+ }
+ virtual bool SEV() {
+ return false;
+ }
+ virtual bool SEVL() {
+ return false;
+ }
+ virtual bool XPAC_1(bool D, Reg Rd) {
+ return false;
+ }
+ virtual bool XPAC_2() {
+ return false;
+ }
+ virtual bool PACIA_1(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool PACIA_2() {
+ return false;
+ }
+ virtual bool PACIB_1(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool PACIB_2() {
+ return false;
+ }
+ virtual bool AUTIA_1(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool AUTIA_2() {
+ return false;
+ }
+ virtual bool AUTIB_1(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool AUTIB_2() {
+ return false;
+ }
+ virtual bool BTI(Imm<2> upper_op2) {
+ return false;
+ }
+ virtual bool ESB() {
+ return false;
+ }
+ virtual bool PSB() {
+ return false;
+ }
+ virtual bool TSB() {
+ return false;
+ }
+ virtual bool CSDB() {
+ return false;
+ }
+ virtual bool CLREX(Imm<4> CRm) {
+ return false;
+ }
+ virtual bool DSB(Imm<4> CRm) {
+ return false;
+ }
+ virtual bool SSBB() {
+ return false;
+ }
+ virtual bool PSSBB() {
+ return false;
+ }
+ virtual bool DMB(Imm<4> CRm) {
+ return false;
+ }
+ virtual bool ISB(Imm<4> CRm) {
+ return false;
+ }
+ virtual bool SYS(Imm<3> op1, Imm<4> CRn, Imm<4> CRm, Imm<3> op2, Reg Rt) {
+ return false;
+ }
+ virtual bool SB() {
+ return false;
+ }
+ virtual bool MSR_reg(Imm<1> o0, Imm<3> op1, Imm<4> CRn, Imm<4> CRm, Imm<3> op2, Reg Rt) {
+ return false;
+ }
+ virtual bool SYSL(Imm<3> op1, Imm<4> CRn, Imm<4> CRm, Imm<3> op2, Reg Rt) {
+ return false;
+ }
+ virtual bool MRS(Imm<1> o0, Imm<3> op1, Imm<4> CRn, Imm<4> CRm, Imm<3> op2, Reg Rt) {
+ return false;
+ }
+
+ // System - Flag manipulation instructions
+ virtual bool CFINV() {
+ return false;
+ }
+ virtual bool RMIF(Imm<6> lsb, Reg Rn, Imm<4> mask) {
+ return false;
+ }
+ virtual bool SETF8(Reg Rn) {
+ return false;
+ }
+ virtual bool SETF16(Reg Rn) {
+ return false;
+ }
+
+ // System - Flag format instructions
+ virtual bool XAFlag() {
+ return false;
+ }
+ virtual bool AXFlag() {
+ return false;
+ }
+
+ // SYS: Data Cache
+ virtual bool DC_IVAC(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_ISW(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_CSW(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_CISW(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_ZVA(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_CVAC(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_CVAU(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_CVAP(Reg Rt) {
+ return false;
+ }
+ virtual bool DC_CIVAC(Reg Rt) {
+ return false;
+ }
+
+ // SYS: Instruction Cache
+ virtual bool IC_IALLU() {
+ return false;
+ }
+ virtual bool IC_IALLUIS() {
+ return false;
+ }
+ virtual bool IC_IVAU(Reg Rt) {
+ return false;
+ }
+
+ // Unconditional branch (Register)
+ virtual bool BR(Reg Rn) {
+ return false;
+ }
+ virtual bool BRA(bool Z, bool M, Reg Rn, Reg Rm) {
+ return false;
+ }
+ virtual bool BLR(Reg Rn) {
+ return false;
+ }
+ virtual bool BLRA(bool Z, bool M, Reg Rn, Reg Rm) {
+ return false;
+ }
+ virtual bool RET(Reg Rn) {
+ return false;
+ }
+ virtual bool RETA(bool M) {
+ return false;
+ }
+ virtual bool ERET() {
+ return false;
+ }
+ virtual bool ERETA(bool M) {
+ return false;
+ }
+ virtual bool DRPS() {
+ return false;
+ }
+
+ // Unconditional branch (immediate)
+ virtual bool B_uncond(Imm<26> imm26) {
+ return false;
+ }
+ virtual bool BL(Imm<26> imm26) {
+ return false;
+ }
+
+ // Compare and branch (immediate)
+ virtual bool CBZ(bool sf, Imm<19> imm19, Reg Rt) {
+ return false;
+ }
+ virtual bool CBNZ(bool sf, Imm<19> imm19, Reg Rt) {
+ return false;
+ }
+ virtual bool TBZ(Imm<1> b5, Imm<5> b40, Imm<14> imm14, Reg Rt) {
+ return false;
+ }
+ virtual bool TBNZ(Imm<1> b5, Imm<5> b40, Imm<14> imm14, Reg Rt) {
+ return false;
+ }
+
+ // Loads and stores - Advanced SIMD Load/Store multiple structures
+ virtual bool STx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool STx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LDx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LDx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+
+ // Loads and stores - Advanced SIMD Load/Store single structures
+ virtual bool ST1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool ST1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool ST3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool ST3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool ST2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool ST2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool ST4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool ST4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool LD1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool LD3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool LD1R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD1R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD3R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD3R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool LD4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool LD2R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD2R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD4R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LD4R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store Exclusive
+ virtual bool STXR(Imm<2> size, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STLXR(Imm<2> size, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STXP(Imm<1> size, Reg Rs, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STLXP(Imm<1> size, Reg Rs, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDXR(Imm<2> size, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDAXR(Imm<2> size, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDXP(Imm<1> size, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDAXP(Imm<1> size, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STLLR(Imm<2> size, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STLR(Imm<2> size, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDLAR(Imm<2> size, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDAR(Imm<2> size, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool CASP(bool sz, bool L, Reg Rs, bool o0, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool CASB(bool L, Reg Rs, bool o0, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool CASH(bool L, Reg Rs, bool o0, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool CAS(bool sz, bool L, Reg Rs, bool o0, Reg Rn, Reg Rt) {
+ return false;
+ }
+
+ // Loads and stores - Load register (literal)
+ virtual bool LDR_lit_gen(bool opc_0, Imm<19> imm19, Reg Rt) {
+ return false;
+ }
+ virtual bool LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) {
+ return false;
+ }
+ virtual bool LDRSW_lit(Imm<19> imm19, Reg Rt) {
+ return false;
+ }
+ virtual bool PRFM_lit(Imm<19> imm19, Imm<5> prfop) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store no-allocate pair
+ virtual bool STNP_LDNP_gen(Imm<1> upper_opc, Imm<1> L, Imm<7> imm7, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STNP_LDNP_fpsimd(Imm<2> opc, Imm<1> L, Imm<7> imm7, Vec Vt2, Reg Rn, Vec Vt) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store register pair
+ virtual bool STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L, Imm<7> imm7,
+ Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L, Imm<7> imm7,
+ Vec Vt2, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool STGP_1(Imm<7> offset_imm, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STGP_2(Imm<7> offset_imm, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STGP_3(Imm<7> offset_imm, Reg Rt2, Reg Rn, Reg Rt) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store register (immediate)
+ virtual bool STRx_LDRx_imm_1(Imm<2> size, Imm<2> opc, Imm<9> imm9, bool not_postindex, Reg Rn,
+ Reg Rt) {
+ return false;
+ }
+ virtual bool STRx_LDRx_imm_2(Imm<2> size, Imm<2> opc, Imm<12> imm12, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STURx_LDURx(Imm<2> size, Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool PRFM_imm(Imm<12> imm12, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool PRFM_unscaled_imm(Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex,
+ Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex,
+ Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
+ return false;
+ }
+ virtual bool LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store register (unprivileged)
+ virtual bool STTRB(Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDTRB(Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDTRSB(Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STTRH(Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDTRH(Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDTRSH(Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STTR(Imm<2> size, Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDTR(Imm<2> size, Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDTRSW(Imm<9> imm9, Reg Rn, Reg Rt) {
+ return false;
+ }
+
+ // Loads and stores - Atomic memory options
+ virtual bool LDADDB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDCLRB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDEORB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSETB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSMAXB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSMINB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDUMAXB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDUMINB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool SWPB(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDAPRB(Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDADDH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDCLRH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDEORH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSETH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSMAXH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSMINH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDUMAXH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDUMINH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool SWPH(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDAPRH(Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDADD(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDCLR(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDEOR(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSET(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSMAX(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDSMIN(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDUMAX(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDUMIN(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool SWP(bool A, bool R, Reg Rs, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool LDAPR(Reg Rn, Reg Rt) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store register (register offset)
+ virtual bool STRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Reg Rt) {
+ return false;
+ }
+ virtual bool LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Reg Rt) {
+ return false;
+ }
+ virtual bool STR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+ virtual bool LDR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn,
+ Vec Vt) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store memory tags
+ virtual bool STG_1(Imm<9> imm9, Reg Rn) {
+ return false;
+ }
+ virtual bool STG_2(Imm<9> imm9, Reg Rn) {
+ return false;
+ }
+ virtual bool STG_3(Imm<9> imm9, Reg Rn) {
+ return false;
+ }
+ virtual bool LDG(Imm<9> offset_imm, Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STZG_1(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool STZG_2(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool STZG_3(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool ST2G_1(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool ST2G_2(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool ST2G_3(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool STGV(Reg Rn, Reg Rt) {
+ return false;
+ }
+ virtual bool STZ2G_1(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool STZ2G_2(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool STZ2G_3(Imm<9> offset_imm, Reg Rn) {
+ return false;
+ }
+ virtual bool LDGV(Reg Rn, Reg Rt) {
+ return false;
+ }
+
+ // Loads and stores - Load/Store register (pointer authentication)
+ virtual bool LDRA(bool M, bool S, Imm<9> imm9, bool W, Reg Rn, Reg Rt) {
+ return false;
+ }
+
+ // Data Processing - Register - 2 source
+ virtual bool UDIV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SDIV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool LSLV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool LSRV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ASRV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool RORV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool CRC32(bool sf, Reg Rm, Imm<2> sz, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool CRC32C(bool sf, Reg Rm, Imm<2> sz, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool PACGA(Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUBP(Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool IRG(Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool GMI(Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUBPS(Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - Register - 1 source
+ virtual bool RBIT_int(bool sf, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool REV16_int(bool sf, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool REV(bool sf, bool opc_0, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool CLZ_int(bool sf, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool CLS_int(bool sf, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool REV32_int(Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool PACDA(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool PACDB(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool AUTDA(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool AUTDB(bool Z, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - Register - Logical (shifted register)
+ virtual bool AND_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool BIC_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ORR_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ORN_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool EOR_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool EON(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ANDS_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool BICS(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - Register - Add/Sub (shifted register)
+ virtual bool ADD_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ADDS_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUB_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUBS_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - Register - Add/Sub (shifted register)
+ virtual bool ADD_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ADDS_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUB_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SUBS_ext(bool sf, Reg Rm, Imm<3> option, Imm<3> imm3, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - Register - Add/Sub (with carry)
+ virtual bool ADC(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool ADCS(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SBC(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SBCS(bool sf, Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - Register - Conditional compare
+ virtual bool CCMN_reg(bool sf, Reg Rm, Cond cond, Reg Rn, Imm<4> nzcv) {
+ return false;
+ }
+ virtual bool CCMP_reg(bool sf, Reg Rm, Cond cond, Reg Rn, Imm<4> nzcv) {
+ return false;
+ }
+ virtual bool CCMN_imm(bool sf, Imm<5> imm5, Cond cond, Reg Rn, Imm<4> nzcv) {
+ return false;
+ }
+ virtual bool CCMP_imm(bool sf, Imm<5> imm5, Cond cond, Reg Rn, Imm<4> nzcv) {
+ return false;
+ }
+
+ // Data Processing - Register - Conditional select
+ virtual bool CSEL(bool sf, Reg Rm, Cond cond, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool CSINC(bool sf, Reg Rm, Cond cond, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool CSINV(bool sf, Reg Rm, Cond cond, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool CSNEG(bool sf, Reg Rm, Cond cond, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - Register - 3 source
+ virtual bool MADD(bool sf, Reg Rm, Reg Ra, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool MSUB(bool sf, Reg Rm, Reg Ra, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SMADDL(Reg Rm, Reg Ra, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SMSUBL(Reg Rm, Reg Ra, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool SMULH(Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool UMADDL(Reg Rm, Reg Ra, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool UMSUBL(Reg Rm, Reg Ra, Reg Rn, Reg Rd) {
+ return false;
+ }
+ virtual bool UMULH(Reg Rm, Reg Rn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - AES
+ virtual bool AESE(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool AESD(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool AESMC(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool AESIMC(Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SHA
+ virtual bool SHA1C(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA1P(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA1M(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA1SU0(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA256H(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA256H2(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA256SU1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA1H(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA1SU1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA256SU0(Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Scalar copy
+ virtual bool DUP_elt_1(Imm<5> imm5, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Scalar three
+ virtual bool FMULX_vec_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMULX_vec_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_reg_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_reg_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPS_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPS_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTS_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTS_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_reg_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_reg_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGE_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGE_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FABD_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FABD_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_reg_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_reg_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGT_1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGT_2(bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Two register misc FP16
+ virtual bool FCVTNS_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMS_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAS_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SCVTF_int_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_zero_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_zero_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLT_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPS_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZS_int_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPE_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPX_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTNU_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMU_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAU_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_int_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_zero_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLE_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPU_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZU_int_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTE_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Two register misc
+ virtual bool FCVTNS_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMS_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAS_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SCVTF_int_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_zero_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_zero_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLT_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPS_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZS_int_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPE_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPX_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTNU_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMU_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAU_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_int_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_zero_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLE_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPU_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZU_int_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTE_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Scalar two register misc FP16
+ virtual bool FCVTNS_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMS_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAS_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SCVTF_int_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_zero_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_zero_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLT_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPS_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZS_int_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPE_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTNU_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMU_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAU_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_int_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_zero_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLE_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPU_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZU_int_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTE_3(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Scalar two register misc
+ virtual bool FCVTNS_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMS_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAS_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SCVTF_int_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_zero_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_zero_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLT_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPS_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZS_int_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPE_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTNU_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTMU_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAU_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_int_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_zero_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLE_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTPU_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZU_int_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTE_4(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Scalar three same extra
+ virtual bool SQRDMLAH_vec_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMLAH_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMLSH_vec_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMLSH_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Scalar two-register misc
+ virtual bool SUQADD_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQABS_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGT_zero_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMEQ_zero_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMLT_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ABS_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQXTN_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USQADD_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQNEG_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGE_zero_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMLE_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool NEG_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQXTUN_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQXTN_1(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTXN_1(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Scalar pairwise
+ virtual bool ADDP_pair(Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNMP_pair_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNMP_pair_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FADDP_pair_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FADDP_pair_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXP_pair_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXP_pair_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNMP_pair_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNMP_pair_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINP_pair_1(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINP_pair_2(bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Scalar three different
+ virtual bool SQDMLAL_vec_1(Imm<2> size, Reg Rm, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMLSL_vec_1(Imm<2> size, Reg Rm, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULL_vec_1(Imm<2> size, Reg Rm, Reg Rn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Scalar three same
+ virtual bool SQADD_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSUB_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGT_reg_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGE_reg_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SSHL_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHL_reg_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRSHL_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRSHL_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ADD_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMTST_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULH_vec_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQADD_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSUB_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMHI_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMHS_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USHL_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSHL_reg_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URSHL_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQRSHL_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SUB_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMEQ_reg_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMULH_vec_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Scalar shift by immediate
+ virtual bool SSHR_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SSRA_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRSHR_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRSRA_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHL_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHL_imm_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHRN_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRSHRN_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SCVTF_fix_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZS_fix_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USHR_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USRA_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URSHR_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URSRA_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRI_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SLI_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHLU_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSHL_imm_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHRUN_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRSHRUN_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSHRN_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQRSHRN_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_fix_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZU_fix_1(Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Scalar x indexed element
+ virtual bool SQDMLAL_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMLSL_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULL_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULH_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMULH_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMLA_elt_1(Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLA_elt_2(bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLS_elt_1(Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLS_elt_2(bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMUL_elt_1(Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMUL_elt_2(bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMLAH_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMLSH_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMULX_elt_1(Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMULX_elt_2(bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Table Lookup
+ virtual bool TBL(bool Q, Vec Vm, Imm<2> len, size_t Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool TBX(bool Q, Vec Vm, Imm<2> len, size_t Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Permute
+ virtual bool UZP1(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool TRN1(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ZIP1(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UZP2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool TRN2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ZIP2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Extract
+ virtual bool EXT(bool Q, Vec Vm, Imm<4> imm4, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Copy
+ virtual bool DUP_elt_2(bool Q, Imm<5> imm5, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool DUP_gen(bool Q, Imm<5> imm5, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMOV(bool Q, Imm<5> imm5, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool UMOV(bool Q, Imm<5> imm5, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool INS_gen(Imm<5> imm5, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool INS_elt(Imm<5> imm5, Imm<4> imm4, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Three same
+ virtual bool FMULX_vec_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_reg_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPS_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTS_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_reg_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGE_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FABD_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_reg_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGT_3(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNM_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLA_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FADD_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAX_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNM_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLS_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FSUB_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMIN_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNMP_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FADDP_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMUL_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXP_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FDIV_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNMP_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINP_vec_1(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Three same extra
+ virtual bool SDOT_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UDOT_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLA_vec(bool Q, Imm<2> size, Vec Vm, Imm<2> rot, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCADD_vec(bool Q, Imm<2> size, Vec Vm, Imm<1> rot, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Two register misc
+ virtual bool REV64_asimd(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool REV16_asimd(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SADDLP(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CLS_asimd(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CNT(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SADALP(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool XTN(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTN(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTL(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URECPE(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool REV32_asimd(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UADDLP(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CLZ_asimd(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UADALP(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHLL(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool NOT(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool RBIT_asimd(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URSQRTE(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SUQADD_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQABS_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGT_zero_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMEQ_zero_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMLT_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ABS_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQXTN_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USQADD_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQNEG_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGE_zero_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMLE_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool NEG_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQXTUN_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQXTN_2(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTXN_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTN_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTN_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTM_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTM_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FABS_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FABS_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTP_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTP_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTZ_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTZ_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTA_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTA_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTX_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTX_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FNEG_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FNEG_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTI_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTI_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FSQRT_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FSQRT_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT32X_1(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT64X_1(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT32Z_1(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT64Z_1(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD across lanes
+ virtual bool SADDLV(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMAXV(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMINV(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ADDV(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNMV_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNMV_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXV_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXV_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNMV_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNMV_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINV_1(bool Q, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINV_2(bool Q, bool sz, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UADDLV(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMAXV(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMINV(bool Q, Imm<2> size, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD three different
+ virtual bool SADDL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SADDW(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SSUBL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SSUBW(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ADDHN(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SABAL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SUBHN(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SABDL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMLAL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMLSL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMULL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool PMULL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UADDL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UADDW(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USUBL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USUBW(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool RADDHN(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UABAL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool RSUBHN(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UABDL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMLAL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMLSL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMULL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMLAL_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMLSL_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULL_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD three same
+ virtual bool SHADD(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRHADD(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHSUB(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMAX(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMIN(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SABD(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SABA(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool MLA_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool MUL_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMAXP(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMINP(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ADDP_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLAL_vec_1(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLAL_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool AND_asimd(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool BIC_asimd_reg(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLSL_vec_1(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLSL_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ORR_asimd_reg(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ORN_asimd(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UHADD(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URHADD(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UHSUB(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMAX(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMIN(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UABD(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UABA(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool MLS_vec(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool PMUL(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMAXP(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UMINP(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool EOR_asimd(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool BSL(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool BIT(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool BIF(bool Q, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNM_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLA_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FADD_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAX_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNM_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLS_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FSUB_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMIN_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNMP_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FADDP_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMUL_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXP_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FDIV_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNMP_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINP_vec_2(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMULX_vec_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMEQ_reg_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRECPS_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRSQRTS_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGE_reg_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGE_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FABD_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCMGT_reg_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FACGT_4(bool Q, bool sz, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQADD_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSUB_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGT_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMGE_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SSHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHL_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRSHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRSHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool ADD_vector(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMTST_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULH_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQADD_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSUB_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMHI_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMHS_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSHL_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URSHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQRSHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SUB_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool CMEQ_reg_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMULH_vec_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD modified immediate
+ virtual bool MOVI(bool Q, bool op, Imm<1> a, Imm<1> b, Imm<1> c, Imm<4> cmode, Imm<1> d,
+ Imm<1> e, Imm<1> f, Imm<1> g, Imm<1> h, Vec Vd) {
+ return false;
+ }
+ virtual bool FMOV_2(bool Q, bool op, Imm<1> a, Imm<1> b, Imm<1> c, Imm<1> d, Imm<1> e, Imm<1> f,
+ Imm<1> g, Imm<1> h, Vec Vd) {
+ return false;
+ }
+ virtual bool FMOV_3(bool Q, Imm<1> a, Imm<1> b, Imm<1> c, Imm<1> d, Imm<1> e, Imm<1> f,
+ Imm<1> g, Imm<1> h, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD Shift by immediate
+ virtual bool SSHR_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SSRA_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRSHR_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRSRA_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHL_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHL_imm_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHRN(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool RSHRN(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHRN_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRSHRN_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SSHLL(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SCVTF_fix_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZS_fix_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USHR_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USRA_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URSHR_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool URSRA_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SRI_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SLI_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHLU_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSHL_imm_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQSHRUN_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRSHRUN_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQSHRN_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UQRSHRN_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool USHLL(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_fix_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZU_fix_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SIMD vector x indexed element
+ virtual bool SMLAL_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMLAL_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H,
+ Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SMLSL_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMLSL_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H,
+ Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool MUL_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SMULL_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vm, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULL_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H,
+ Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQDMULH_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H,
+ Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMULH_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H,
+ Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SDOT_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMLA_elt_3(bool Q, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLA_elt_4(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMLS_elt_3(bool Q, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMLS_elt_4(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMUL_elt_3(bool Q, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMUL_elt_4(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMLAL_elt_1(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMLAL_elt_2(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMLSL_elt_1(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FMLSL_elt_2(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool MLA_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool UMLAL_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool MLS_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool UMLSL_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool UMULL_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMLAH_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H,
+ Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool UDOT_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool SQRDMLSH_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H,
+ Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMULX_elt_3(bool Q, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMULX_elt_4(bool Q, bool sz, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<1> H, Vec Vn,
+ Vec Vd) {
+ return false;
+ }
+ virtual bool FCMLA_elt(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vmlo, Imm<2> rot,
+ Imm<1> H, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Cryptographic three register
+ virtual bool SM3TT1A(Vec Vm, Imm<2> imm2, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM3TT1B(Vec Vm, Imm<2> imm2, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM3TT2A(Vec Vm, Imm<2> imm2, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM3TT2B(Vec Vm, Imm<2> imm2, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SHA512 three register
+ virtual bool SHA512H(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA512H2(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SHA512SU1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool RAX1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool XAR(Vec Vm, Imm<6> imm6, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM3PARTW1(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM3PARTW2(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM4EKEY(Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Cryptographic four register
+ virtual bool EOR3(Vec Vm, Vec Va, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool BCAX(Vec Vm, Vec Va, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM3SS1(Vec Vm, Vec Va, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - SHA512 two register
+ virtual bool SHA512SU0(Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool SM4E(Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Conversion between floating point and fixed point
+ virtual bool SCVTF_float_fix(bool sf, Imm<2> type, Imm<6> scale, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_float_fix(bool sf, Imm<2> type, Imm<6> scale, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTZS_float_fix(bool sf, Imm<2> type, Imm<6> scale, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTZU_float_fix(bool sf, Imm<2> type, Imm<6> scale, Vec Vn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Conversion between floating point and integer
+ virtual bool FCVTNS_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTNU_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool SCVTF_float_int(bool sf, Imm<2> type, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool UCVTF_float_int(bool sf, Imm<2> type, Reg Rn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVTAS_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTAU_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FMOV_float_gen(bool sf, Imm<2> type, Imm<1> rmode_0, Imm<1> opc_0, size_t n,
+ size_t d) {
+ return false;
+ }
+ virtual bool FCVTPS_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTPU_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTMS_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTMU_float(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTZS_float_int(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FCVTZU_float_int(bool sf, Imm<2> type, Vec Vn, Reg Rd) {
+ return false;
+ }
+ virtual bool FJCVTZS(Vec Vn, Reg Rd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Floating point data processing
+ virtual bool FMOV_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FABS_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FNEG_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FSQRT_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FCVT_float(Imm<2> type, Imm<2> opc, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTN_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTP_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTM_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTZ_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTA_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTX_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINTI_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT32X_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT64X_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT32Z_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FRINT64Z_float(Imm<2> type, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Floating point compare
+ virtual bool FCMP_float(Imm<2> type, Vec Vm, Vec Vn, bool cmp_with_zero) {
+ return false;
+ }
+ virtual bool FCMPE_float(Imm<2> type, Vec Vm, Vec Vn, bool cmp_with_zero) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Floating point immediate
+ virtual bool FMOV_float_imm(Imm<2> type, Imm<8> imm8, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Floating point conditional compare
+ virtual bool FCCMP_float(Imm<2> type, Vec Vm, Cond cond, Vec Vn, Imm<4> nzcv) {
+ return false;
+ }
+ virtual bool FCCMPE_float(Imm<2> type, Vec Vm, Cond cond, Vec Vn, Imm<4> nzcv) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Floating point data processing two register
+ virtual bool FMUL_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FDIV_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FADD_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FSUB_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAX_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMIN_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMAXNM_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMINNM_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FNMUL_float(Imm<2> type, Vec Vm, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Floating point conditional select
+ virtual bool FCSEL_float(Imm<2> type, Vec Vm, Cond cond, Vec Vn, Vec Vd) {
+ return false;
+ }
+
+ // Data Processing - FP and SIMD - Floating point data processing three register
+ virtual bool FMADD_float(Imm<2> type, Vec Vm, Vec Va, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FMSUB_float(Imm<2> type, Vec Vm, Vec Va, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FNMADD_float(Imm<2> type, Vec Vm, Vec Va, Vec Vn, Vec Vd) {
+ return false;
+ }
+ virtual bool FNMSUB_float(Imm<2> type, Vec Vm, Vec Va, Vec Vn, Vec Vd) {
+ return false;
+ }
+};
+
+} // namespace Core