diff options
Diffstat (limited to '')
-rw-r--r-- | src/core/arm/skyeye_common/vfp/vfp_helper.h | 20 | ||||
-rw-r--r-- | src/core/arm/skyeye_common/vfp/vfpdouble.cpp | 101 | ||||
-rw-r--r-- | src/core/arm/skyeye_common/vfp/vfpsingle.cpp | 100 |
3 files changed, 130 insertions, 91 deletions
diff --git a/src/core/arm/skyeye_common/vfp/vfp_helper.h b/src/core/arm/skyeye_common/vfp/vfp_helper.h index 210972917..68714800c 100644 --- a/src/core/arm/skyeye_common/vfp/vfp_helper.h +++ b/src/core/arm/skyeye_common/vfp/vfp_helper.h @@ -271,8 +271,9 @@ inline int vfp_single_type(const vfp_single* s) // Unpack a single-precision float. Note that this returns the magnitude // of the single-precision float mantissa with the 1. if necessary, // aligned to bit 30. -inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr) +inline u32 vfp_single_unpack(vfp_single* s, s32 val, u32 fpscr) { + u32 exceptions = 0; s->sign = vfp_single_packed_sign(val) >> 16, s->exponent = vfp_single_packed_exponent(val); @@ -283,12 +284,13 @@ inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr) // If flush-to-zero mode is enabled, turn the denormal into zero. // On a VFPv2 architecture, the sign of the zero is always positive. - if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) { + if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) { s->sign = 0; s->exponent = 0; s->significand = 0; - *fpscr |= FPSCR_IDC; + exceptions |= FPSCR_IDC; } + return exceptions; } // Re-pack a single-precision float. This assumes that the float is @@ -302,7 +304,7 @@ inline s32 vfp_single_pack(const vfp_single* s) } -u32 vfp_single_normaliseround(ARMul_State* state, int sd, vfp_single* vs, u32 fpscr, u32 exceptions, const char* func); +u32 vfp_single_normaliseround(ARMul_State* state, int sd, vfp_single* vs, u32 fpscr, const char* func); // Double-precision struct vfp_double { @@ -357,8 +359,9 @@ inline int vfp_double_type(const vfp_double* s) // Unpack a double-precision float. Note that this returns the magnitude // of the double-precision float mantissa with the 1. if necessary, // aligned to bit 62. -inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr) +inline u32 vfp_double_unpack(vfp_double* s, s64 val, u32 fpscr) { + u32 exceptions = 0; s->sign = vfp_double_packed_sign(val) >> 48; s->exponent = vfp_double_packed_exponent(val); @@ -369,12 +372,13 @@ inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr) // If flush-to-zero mode is enabled, turn the denormal into zero. // On a VFPv2 architecture, the sign of the zero is always positive. - if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) { + if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) { s->sign = 0; s->exponent = 0; s->significand = 0; - *fpscr |= FPSCR_IDC; + exceptions |= FPSCR_IDC; } + return exceptions; } // Re-pack a double-precision float. This assumes that the float is @@ -447,4 +451,4 @@ inline u32 fls(u32 x) u32 vfp_double_multiply(vfp_double* vdd, vfp_double* vdn, vfp_double* vdm, u32 fpscr); u32 vfp_double_add(vfp_double* vdd, vfp_double* vdn, vfp_double *vdm, u32 fpscr); -u32 vfp_double_normaliseround(ARMul_State* state, int dd, vfp_double* vd, u32 fpscr, u32 exceptions, const char* func); +u32 vfp_double_normaliseround(ARMul_State* state, int dd, vfp_double* vd, u32 fpscr, const char* func); diff --git a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp index 45914d479..580e60c85 100644 --- a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp +++ b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp @@ -85,11 +85,12 @@ static void vfp_double_normalise_denormal(struct vfp_double *vd) vfp_double_dump("normalise_denormal: out", vd); } -u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func) +u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, const char *func) { u64 significand, incr; int exponent, shift, underflow; u32 rmode; + u32 exceptions = 0; vfp_double_dump("pack: in", vd); @@ -291,8 +292,9 @@ static u32 vfp_double_fsqrt(ARMul_State* state, int dd, int unused, int dm, u32 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); vfp_double vdm, vdd, *vdp; int ret, tm; + u32 exceptions = 0; - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); tm = vfp_double_type(&vdm); if (tm & (VFP_NAN|VFP_INFINITY)) { @@ -369,7 +371,8 @@ sqrt_invalid: } vdd.significand = vfp_shiftright64jamming(vdd.significand, 1); - return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fsqrt"); + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fsqrt"); + return exceptions; } /* @@ -475,7 +478,7 @@ static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32 u32 exceptions = 0; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); tm = vfp_double_type(&vdm); @@ -504,7 +507,8 @@ static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32 else vsd.exponent = vdm.exponent - (1023 - 127); - return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fcvts"); + exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fcvts"); + return exceptions; pack_nan: vfp_put_float(state, vfp_single_pack(&vsd), sd); @@ -514,6 +518,7 @@ pack_nan: static u32 vfp_double_fuito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr) { struct vfp_double vdm; + u32 exceptions = 0; u32 m = vfp_get_float(state, dm); LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); @@ -521,12 +526,14 @@ static u32 vfp_double_fuito(ARMul_State* state, int dd, int unused, int dm, u32 vdm.exponent = 1023 + 63 - 1; vdm.significand = (u64)m; - return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fuito"); + exceptions |= vfp_double_normaliseround(state, dd, &vdm, fpscr, "fuito"); + return exceptions; } static u32 vfp_double_fsito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr) { struct vfp_double vdm; + u32 exceptions = 0; u32 m = vfp_get_float(state, dm); LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); @@ -534,7 +541,8 @@ static u32 vfp_double_fsito(ARMul_State* state, int dd, int unused, int dm, u32 vdm.exponent = 1023 + 63 - 1; vdm.significand = vdm.sign ? (~m + 1) : m; - return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fsito"); + exceptions |= vfp_double_normaliseround(state, dd, &vdm, fpscr, "fsito"); + return exceptions; } static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32 fpscr) @@ -545,7 +553,7 @@ static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32 int tm; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); /* * Do we have a denormalised number? @@ -626,7 +634,7 @@ static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32 int tm; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); vfp_double_dump("VDM", &vdm); /* @@ -892,21 +900,21 @@ static u32 vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 fpscr, u32 negate, const char *func) { struct vfp_double vdd, vdp, vdn, vdm; - u32 exceptions; + u32 exceptions = 0; - vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); + exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); - exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr); + exceptions |= vfp_double_multiply(&vdp, &vdn, &vdm, fpscr); if (negate & NEG_MULTIPLY) vdp.sign = vfp_sign_negate(vdp.sign); - vfp_double_unpack(&vdn, vfp_get_double(state, dd), &fpscr); + exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dd), fpscr); if (vdn.exponent == 0 && vdn.significand != 0) vfp_double_normalise_denormal(&vdn); @@ -915,7 +923,8 @@ vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 f exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr); - return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, func); + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, func); + return exceptions; } /* @@ -964,19 +973,21 @@ static u32 vfp_double_fnmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpsc static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; - u32 exceptions; + u32 exceptions = 0; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); + exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); - exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); - return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fmul"); + exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); + + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fmul"); + return exceptions; } /* @@ -985,21 +996,22 @@ static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; - u32 exceptions; + u32 exceptions = 0; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); + exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); - exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); + exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); vdd.sign = vfp_sign_negate(vdd.sign); - return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fnmul"); + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fnmul"); + return exceptions; } /* @@ -1008,20 +1020,21 @@ static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpsc static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; - u32 exceptions; + u32 exceptions = 0; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); + exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); - exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); + exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr); - return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fadd"); + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fadd"); + return exceptions; } /* @@ -1030,14 +1043,14 @@ static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) { struct vfp_double vdd, vdn, vdm; - u32 exceptions; + u32 exceptions = 0; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); + exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr); if (vdn.exponent == 0 && vdn.significand) vfp_double_normalise_denormal(&vdn); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); if (vdm.exponent == 0 && vdm.significand) vfp_double_normalise_denormal(&vdm); @@ -1046,9 +1059,10 @@ static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr */ vdm.sign = vfp_sign_negate(vdm.sign); - exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); + exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr); - return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fsub"); + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fsub"); + return exceptions; } /* @@ -1061,8 +1075,8 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr int tm, tn; LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); - vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); - vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); + exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr); + exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr); vdd.sign = vdn.sign ^ vdm.sign; @@ -1131,16 +1145,18 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr } vdd.significand |= (reml != 0); } - return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fdiv"); + + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fdiv"); + return exceptions; vdn_nan: - exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr); + exceptions |= vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr); pack: vfp_put_double(state, vfp_double_pack(&vdd), dd); return exceptions; vdm_nan: - exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr); + exceptions |= vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr); goto pack; zero: @@ -1149,7 +1165,7 @@ zero: goto pack; divzero: - exceptions = FPSCR_DZC; + exceptions |= FPSCR_DZC; infinity: vdd.exponent = 2047; vdd.significand = 0; @@ -1157,7 +1173,8 @@ infinity: invalid: vfp_put_double(state, vfp_double_pack(&vfp_double_default_qnan), dd); - return FPSCR_IOC; + exceptions |= FPSCR_IOC; + return exceptions; } static struct op fops[] = { diff --git a/src/core/arm/skyeye_common/vfp/vfpsingle.cpp b/src/core/arm/skyeye_common/vfp/vfpsingle.cpp index e47ad2760..23e0cdf26 100644 --- a/src/core/arm/skyeye_common/vfp/vfpsingle.cpp +++ b/src/core/arm/skyeye_common/vfp/vfpsingle.cpp @@ -89,10 +89,11 @@ static void vfp_single_normalise_denormal(struct vfp_single *vs) } -u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions, const char *func) +u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs, u32 fpscr, const char *func) { u32 significand, incr, rmode; int exponent, shift, underflow; + u32 exceptions = 0; vfp_single_dump("pack: in", vs); @@ -334,8 +335,9 @@ static u32 vfp_single_fsqrt(ARMul_State* state, int sd, int unused, s32 m, u32 f { struct vfp_single vsm, vsd, *vsp; int ret, tm; + u32 exceptions = 0; - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); tm = vfp_single_type(&vsm); if (tm & (VFP_NAN|VFP_INFINITY)) { vsp = &vsd; @@ -408,7 +410,8 @@ sqrt_invalid: } vsd.significand = vfp_shiftright32jamming(vsd.significand, 1); - return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt"); + exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fsqrt"); + return exceptions; } /* @@ -503,7 +506,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f int tm; u32 exceptions = 0; - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); tm = vfp_single_type(&vsm); @@ -511,7 +514,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f * If we have a signalling NaN, signal invalid operation. */ if (tm == VFP_SNAN) - exceptions = FPSCR_IOC; + exceptions |= FPSCR_IOC; if (tm & VFP_DENORMAL) vfp_single_normalise_denormal(&vsm); @@ -532,7 +535,8 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f else vdd.exponent = vsm.exponent + (1023 - 127); - return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fcvtd"); + exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fcvtd"); + return exceptions; pack_nan: vfp_put_double(state, vfp_double_pack(&vdd), dd); @@ -542,23 +546,27 @@ pack_nan: static u32 vfp_single_fuito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr) { struct vfp_single vs; + u32 exceptions = 0; vs.sign = 0; vs.exponent = 127 + 31 - 1; vs.significand = (u32)m; - return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fuito"); + exceptions |= vfp_single_normaliseround(state, sd, &vs, fpscr, "fuito"); + return exceptions; } static u32 vfp_single_fsito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr) { struct vfp_single vs; + u32 exceptions = 0; vs.sign = (m & 0x80000000) >> 16; vs.exponent = 127 + 31 - 1; vs.significand = vs.sign ? -m : m; - return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fsito"); + exceptions |= vfp_single_normaliseround(state, sd, &vs, fpscr, "fsito"); + return exceptions; } static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr) @@ -568,7 +576,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f int rmode = fpscr & FPSCR_RMODE_MASK; int tm; - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); vfp_single_dump("VSM", &vsm); /* @@ -583,7 +591,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f if (vsm.exponent >= 127 + 32) { d = vsm.sign ? 0 : 0xffffffff; - exceptions = FPSCR_IOC; + exceptions |= FPSCR_IOC; } else if (vsm.exponent >= 127) { int shift = 127 + 31 - vsm.exponent; u32 rem, incr = 0; @@ -648,7 +656,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f int rmode = fpscr & FPSCR_RMODE_MASK; int tm; - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); vfp_single_dump("VSM", &vsm); /* @@ -774,7 +782,7 @@ vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn, /* * different signs -> invalid */ - exceptions = FPSCR_IOC; + exceptions |= FPSCR_IOC; vsp = &vfp_single_default_qnan; } else { /* @@ -921,27 +929,27 @@ static u32 vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func) { vfp_single vsd, vsp, vsn, vsm; - u32 exceptions; + u32 exceptions = 0; s32 v; v = vfp_get_float(state, sn); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v); - vfp_single_unpack(&vsn, v, &fpscr); + exceptions |= vfp_single_unpack(&vsn, v, fpscr); if (vsn.exponent == 0 && vsn.significand) vfp_single_normalise_denormal(&vsn); - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); if (vsm.exponent == 0 && vsm.significand) vfp_single_normalise_denormal(&vsm); - exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr); + exceptions |= vfp_single_multiply(&vsp, &vsn, &vsm, fpscr); if (negate & NEG_MULTIPLY) vsp.sign = vfp_sign_negate(vsp.sign); v = vfp_get_float(state, sd); LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v); - vfp_single_unpack(&vsn, v, &fpscr); + exceptions |= vfp_single_unpack(&vsn, v, fpscr); if (vsn.exponent == 0 && vsn.significand != 0) vfp_single_normalise_denormal(&vsn); @@ -950,7 +958,8 @@ vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fp exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr); - return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, func); + exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, func); + return exceptions; } /* @@ -962,8 +971,10 @@ vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fp */ static u32 vfp_single_fmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) { + u32 exceptions = 0; LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd); - return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac"); + exceptions |= vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac"); + return exceptions; } /* @@ -1000,21 +1011,23 @@ static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) { struct vfp_single vsd, vsn, vsm; - u32 exceptions; + u32 exceptions = 0; s32 n = vfp_get_float(state, sn); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); - vfp_single_unpack(&vsn, n, &fpscr); + exceptions |= vfp_single_unpack(&vsn, n, fpscr); if (vsn.exponent == 0 && vsn.significand) vfp_single_normalise_denormal(&vsn); - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); if (vsm.exponent == 0 && vsm.significand) vfp_single_normalise_denormal(&vsm); - exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); - return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul"); + exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); + + exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fmul"); + return exceptions; } /* @@ -1023,22 +1036,24 @@ static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) { struct vfp_single vsd, vsn, vsm; - u32 exceptions; + u32 exceptions = 0; s32 n = vfp_get_float(state, sn); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); - vfp_single_unpack(&vsn, n, &fpscr); + exceptions |= vfp_single_unpack(&vsn, n, fpscr); if (vsn.exponent == 0 && vsn.significand) vfp_single_normalise_denormal(&vsn); - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); if (vsm.exponent == 0 && vsm.significand) vfp_single_normalise_denormal(&vsm); - exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); + exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); vsd.sign = vfp_sign_negate(vsd.sign); - return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul"); + + exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fnmul"); + return exceptions; } /* @@ -1047,7 +1062,7 @@ static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) { struct vfp_single vsd, vsn, vsm; - u32 exceptions; + u32 exceptions = 0; s32 n = vfp_get_float(state, sn); LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); @@ -1055,17 +1070,18 @@ static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) /* * Unpack and normalise denormals. */ - vfp_single_unpack(&vsn, n, &fpscr); + exceptions |= vfp_single_unpack(&vsn, n, fpscr); if (vsn.exponent == 0 && vsn.significand) vfp_single_normalise_denormal(&vsn); - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); if (vsm.exponent == 0 && vsm.significand) vfp_single_normalise_denormal(&vsm); - exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr); + exceptions |= vfp_single_add(&vsd, &vsn, &vsm, fpscr); - return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd"); + exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fadd"); + return exceptions; } /* @@ -1095,8 +1111,8 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); - vfp_single_unpack(&vsn, n, &fpscr); - vfp_single_unpack(&vsm, m, &fpscr); + exceptions |= vfp_single_unpack(&vsn, n, fpscr); + exceptions |= vfp_single_unpack(&vsm, m, fpscr); vsd.sign = vsn.sign ^ vsm.sign; @@ -1162,16 +1178,17 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) if ((vsd.significand & 0x3f) == 0) vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32); - return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv"); + exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fdiv"); + return exceptions; vsn_nan: - exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); + exceptions |= vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); pack: vfp_put_float(state, vfp_single_pack(&vsd), sd); return exceptions; vsm_nan: - exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr); + exceptions |= vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr); goto pack; zero: @@ -1180,7 +1197,7 @@ zero: goto pack; divzero: - exceptions = FPSCR_DZC; + exceptions |= FPSCR_DZC; infinity: vsd.exponent = 255; vsd.significand = 0; @@ -1188,7 +1205,8 @@ infinity: invalid: vfp_put_float(state, vfp_single_pack(&vfp_single_default_qnan), sd); - return FPSCR_IOC; + exceptions |= FPSCR_IOC; + return exceptions; } static struct op fops[] = { |