Fix __floatsitf() for negative input

Negative numbers were handled properly initially, but got broken
during addressing review, so none of them did actually work.  Issues:
 * Wrong negation.
 * Wrong exponent calculation.

llvm-svn: 243746
This commit is contained in:
Sergey Dmitrouk 2015-07-31 13:32:09 +00:00
parent e82f2947fd
commit a2ce083d2d
2 changed files with 5 additions and 5 deletions

View File

@ -30,16 +30,14 @@ COMPILER_RT_ABI fp_t __floatsitf(int a) {
unsigned aAbs = (unsigned)a;
if (a < 0) {
sign = signBit;
aAbs += 0x80000000;
aAbs = ~(unsigned)a + 1U;
}
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - __builtin_clz(aAbs);
rep_t result;
// Shift a into the significand field and clear the implicit bit. Extra
// cast to unsigned int is necessary to get the correct behavior for
// the input INT_MIN.
// Shift a into the significand field and clear the implicit bit.
const int shift = significandBits - exponent;
result = (rep_t)aAbs << shift ^ implicitBit;

View File

@ -40,6 +40,8 @@ char assumption_1[sizeof(long double) * CHAR_BIT == 128] = {0};
int main()
{
#if __LDBL_MANT_DIG__ == 113
if (test__floatsitf(0x80000000, UINT64_C(0xc01e000000000000), UINT64_C(0x0)))
return 1;
if (test__floatsitf(0x7fffffff, UINT64_C(0x401dfffffffc0000), UINT64_C(0x0)))
return 1;
if (test__floatsitf(0, UINT64_C(0x0), UINT64_C(0x0)))