summaryrefslogtreecommitdiffstats
path: root/src/core/arm
diff options
context:
space:
mode:
authorLioncash <mathew1800@gmail.com>2015-12-06 21:11:09 +0100
committerLioncash <mathew1800@gmail.com>2015-12-06 21:14:51 +0100
commit56e22e6aac513e65acfec208a5d5ba2170eceb7a (patch)
tree27fa840b335e0898bd7220638e5e6b9ffeef79f9 /src/core/arm
parentarm_interface: Make GetNumInstructions const (diff)
downloadyuzu-56e22e6aac513e65acfec208a5d5ba2170eceb7a.tar
yuzu-56e22e6aac513e65acfec208a5d5ba2170eceb7a.tar.gz
yuzu-56e22e6aac513e65acfec208a5d5ba2170eceb7a.tar.bz2
yuzu-56e22e6aac513e65acfec208a5d5ba2170eceb7a.tar.lz
yuzu-56e22e6aac513e65acfec208a5d5ba2170eceb7a.tar.xz
yuzu-56e22e6aac513e65acfec208a5d5ba2170eceb7a.tar.zst
yuzu-56e22e6aac513e65acfec208a5d5ba2170eceb7a.zip
Diffstat (limited to 'src/core/arm')
-rw-r--r--src/core/arm/dyncom/arm_dyncom_run.h4
-rw-r--r--src/core/arm/dyncom/arm_dyncom_thumb.h2
-rw-r--r--src/core/arm/skyeye_common/vfp/vfp_helper.h32
3 files changed, 19 insertions, 19 deletions
diff --git a/src/core/arm/dyncom/arm_dyncom_run.h b/src/core/arm/dyncom/arm_dyncom_run.h
index 85c8d798e..8eb694fee 100644
--- a/src/core/arm/dyncom/arm_dyncom_run.h
+++ b/src/core/arm/dyncom/arm_dyncom_run.h
@@ -30,7 +30,7 @@
* @return If the PC is being read, then the word-aligned PC value is returned.
* If the PC is not being read, then the value stored in the register is returned.
*/
-static inline u32 CHECK_READ_REG15_WA(const ARMul_State* cpu, int Rn) {
+inline u32 CHECK_READ_REG15_WA(const ARMul_State* cpu, int Rn) {
return (Rn == 15) ? ((cpu->Reg[15] & ~0x3) + cpu->GetInstructionSize() * 2) : cpu->Reg[Rn];
}
@@ -43,6 +43,6 @@ static inline u32 CHECK_READ_REG15_WA(const ARMul_State* cpu, int Rn) {
* @return If the PC is being read, then the incremented PC value is returned.
* If the PC is not being read, then the values stored in the register is returned.
*/
-static inline u32 CHECK_READ_REG15(const ARMul_State* cpu, int Rn) {
+inline u32 CHECK_READ_REG15(const ARMul_State* cpu, int Rn) {
return (Rn == 15) ? ((cpu->Reg[15] & ~0x1) + cpu->GetInstructionSize() * 2) : cpu->Reg[Rn];
}
diff --git a/src/core/arm/dyncom/arm_dyncom_thumb.h b/src/core/arm/dyncom/arm_dyncom_thumb.h
index 447974363..c1be3c735 100644
--- a/src/core/arm/dyncom/arm_dyncom_thumb.h
+++ b/src/core/arm/dyncom/arm_dyncom_thumb.h
@@ -38,7 +38,7 @@ enum class ThumbDecodeStatus {
// Translates a Thumb mode instruction into its ARM equivalent.
ThumbDecodeStatus TranslateThumbInstruction(u32 addr, u32 instr, u32* ainstr, u32* inst_size);
-static inline u32 GetThumbInstruction(u32 instr, u32 address) {
+inline u32 GetThumbInstruction(u32 instr, u32 address) {
// Normally you would need to handle instruction endianness,
// however, it is fixed to little-endian on the MPCore, so
// there's no need to check for this beforehand.
diff --git a/src/core/arm/skyeye_common/vfp/vfp_helper.h b/src/core/arm/skyeye_common/vfp/vfp_helper.h
index 3358ff1d9..210972917 100644
--- a/src/core/arm/skyeye_common/vfp/vfp_helper.h
+++ b/src/core/arm/skyeye_common/vfp/vfp_helper.h
@@ -85,7 +85,7 @@ enum : u32 {
#define vfp_single(inst) (((inst) & 0x0000f00) == 0xa00)
-static inline u32 vfp_shiftright32jamming(u32 val, unsigned int shift)
+inline u32 vfp_shiftright32jamming(u32 val, unsigned int shift)
{
if (shift) {
if (shift < 32)
@@ -96,7 +96,7 @@ static inline u32 vfp_shiftright32jamming(u32 val, unsigned int shift)
return val;
}
-static inline u64 vfp_shiftright64jamming(u64 val, unsigned int shift)
+inline u64 vfp_shiftright64jamming(u64 val, unsigned int shift)
{
if (shift) {
if (shift < 64)
@@ -107,7 +107,7 @@ static inline u64 vfp_shiftright64jamming(u64 val, unsigned int shift)
return val;
}
-static inline u32 vfp_hi64to32jamming(u64 val)
+inline u32 vfp_hi64to32jamming(u64 val)
{
u32 v;
u32 highval = val >> 32;
@@ -121,7 +121,7 @@ static inline u32 vfp_hi64to32jamming(u64 val)
return v;
}
-static inline void add128(u64* resh, u64* resl, u64 nh, u64 nl, u64 mh, u64 ml)
+inline void add128(u64* resh, u64* resl, u64 nh, u64 nl, u64 mh, u64 ml)
{
*resl = nl + ml;
*resh = nh + mh;
@@ -129,7 +129,7 @@ static inline void add128(u64* resh, u64* resl, u64 nh, u64 nl, u64 mh, u64 ml)
*resh += 1;
}
-static inline void sub128(u64* resh, u64* resl, u64 nh, u64 nl, u64 mh, u64 ml)
+inline void sub128(u64* resh, u64* resl, u64 nh, u64 nl, u64 mh, u64 ml)
{
*resl = nl - ml;
*resh = nh - mh;
@@ -137,7 +137,7 @@ static inline void sub128(u64* resh, u64* resl, u64 nh, u64 nl, u64 mh, u64 ml)
*resh -= 1;
}
-static inline void mul64to128(u64* resh, u64* resl, u64 n, u64 m)
+inline void mul64to128(u64* resh, u64* resl, u64 n, u64 m)
{
u32 nh, nl, mh, ml;
u64 rh, rma, rmb, rl;
@@ -164,20 +164,20 @@ static inline void mul64to128(u64* resh, u64* resl, u64 n, u64 m)
*resh = rh;
}
-static inline void shift64left(u64* resh, u64* resl, u64 n)
+inline void shift64left(u64* resh, u64* resl, u64 n)
{
*resh = n >> 63;
*resl = n << 1;
}
-static inline u64 vfp_hi64multiply64(u64 n, u64 m)
+inline u64 vfp_hi64multiply64(u64 n, u64 m)
{
u64 rh, rl;
mul64to128(&rh, &rl, n, m);
return rh | (rl != 0);
}
-static inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m)
+inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m)
{
u64 mh, ml, remh, reml, termh, terml, z;
@@ -249,7 +249,7 @@ enum : u32 {
VFP_SNAN = (VFP_NAN|VFP_NAN_SIGNAL)
};
-static inline int vfp_single_type(const vfp_single* s)
+inline int vfp_single_type(const vfp_single* s)
{
int type = VFP_NUMBER;
if (s->exponent == 255) {
@@ -271,7 +271,7 @@ static inline int vfp_single_type(const vfp_single* s)
// Unpack a single-precision float. Note that this returns the magnitude
// of the single-precision float mantissa with the 1. if necessary,
// aligned to bit 30.
-static inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr)
+inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr)
{
s->sign = vfp_single_packed_sign(val) >> 16,
s->exponent = vfp_single_packed_exponent(val);
@@ -293,7 +293,7 @@ static inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr)
// Re-pack a single-precision float. This assumes that the float is
// already normalised such that the MSB is bit 30, _not_ bit 31.
-static inline s32 vfp_single_pack(const vfp_single* s)
+inline s32 vfp_single_pack(const vfp_single* s)
{
u32 val = (s->sign << 16) +
(s->exponent << VFP_SINGLE_MANTISSA_BITS) +
@@ -335,7 +335,7 @@ struct vfp_double {
#define vfp_double_packed_exponent(v) (((v) >> VFP_DOUBLE_MANTISSA_BITS) & ((1 << VFP_DOUBLE_EXPONENT_BITS) - 1))
#define vfp_double_packed_mantissa(v) ((v) & ((1ULL << VFP_DOUBLE_MANTISSA_BITS) - 1))
-static inline int vfp_double_type(const vfp_double* s)
+inline int vfp_double_type(const vfp_double* s)
{
int type = VFP_NUMBER;
if (s->exponent == 2047) {
@@ -357,7 +357,7 @@ static inline int vfp_double_type(const vfp_double* s)
// Unpack a double-precision float. Note that this returns the magnitude
// of the double-precision float mantissa with the 1. if necessary,
// aligned to bit 62.
-static inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
+inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
{
s->sign = vfp_double_packed_sign(val) >> 48;
s->exponent = vfp_double_packed_exponent(val);
@@ -379,7 +379,7 @@ static inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
// Re-pack a double-precision float. This assumes that the float is
// already normalised such that the MSB is bit 30, _not_ bit 31.
-static inline s64 vfp_double_pack(const vfp_double* s)
+inline s64 vfp_double_pack(const vfp_double* s)
{
u64 val = ((u64)s->sign << 48) +
((u64)s->exponent << VFP_DOUBLE_MANTISSA_BITS) +
@@ -415,7 +415,7 @@ struct op {
u32 flags;
};
-static inline u32 fls(u32 x)
+inline u32 fls(u32 x)
{
int r = 32;