aboutsummaryrefslogtreecommitdiff
path: root/libgcc/config/libbid/bid64_to_uint32.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgcc/config/libbid/bid64_to_uint32.c')
-rw-r--r--libgcc/config/libbid/bid64_to_uint32.c1068
1 files changed, 534 insertions, 534 deletions
diff --git a/libgcc/config/libbid/bid64_to_uint32.c b/libgcc/config/libbid/bid64_to_uint32.c
index 041cca68f6a..cf11e65748c 100644
--- a/libgcc/config/libbid/bid64_to_uint32.c
+++ b/libgcc/config/libbid/bid64_to_uint32.c
@@ -34,27 +34,27 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_rnint (unsigned int *pres, UINT64 * px
+bid64_to_uint32_rnint (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_rnint (UINT64 x
+bid64_to_uint32_rnint (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 fstar;
UINT128 P128;
@@ -67,17 +67,17 @@ __bid64_to_uint32_rnint (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -91,56 +91,56 @@ __bid64_to_uint32_rnint (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=16
// <=> C * 10^(11-q) >= 0x9fffffffb, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0x9fffffffb has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
@@ -151,12 +151,12 @@ __bid64_to_uint32_rnint (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0x9fffffffbull * __bid_ten2k64[q - 11];
+ tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -171,11 +171,11 @@ __bid64_to_uint32_rnint (UINT64 x
}
// n is not too large to be converted to int32 if -1/2 <= n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
+ if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
+ } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1)
// res = 0
// else if x > 0
@@ -183,21 +183,21 @@ __bid64_to_uint32_rnint (UINT64 x
// else // if x < 0
// invalid exc
ind = q - 1;
- if (C1 <= __bid_midpoint64[ind]) {
- res = 0x00000000; // return 0
- } else if (x_sign) { // n < 0
+ if (C1 <= midpoint64[ind]) {
+ res = 0x00000000; // return 0
+ } else if (x_sign) { // n < 0
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // n > 0
- res = 0x00000001; // return +1
+ } else { // n > 0
+ res = 0x00000001; // return +1
}
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -206,25 +206,25 @@ __bid64_to_uint32_rnint (UINT64 x
}
// 1 <= x < 2^32-1/2 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 64 bits
- C1 = C1 + __bid_midpoint64[ind - 1];
+ C1 = C1 + midpoint64[ind - 1];
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- fstar.w[1] = P128.w[1] & __bid_maskhigh128[ind - 1];
+ fstar.w[1] = P128.w[1] & maskhigh128[ind - 1];
fstar.w[0] = P128.w[0];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
@@ -235,31 +235,31 @@ __bid64_to_uint32_rnint (UINT64 x
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// if the result was a midpoint it was rounded away from zero, so
// it will need a correction
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
- (fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[1])) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ (fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
- if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
+ if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
- Cstar--; // Cstar is now even
- } // else MP in [ODD, EVEN]
+ Cstar--; // Cstar is now even
+ } // else MP in [ODD, EVEN]
}
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -271,27 +271,27 @@ __bid64_to_uint32_rnint (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_xrnint (unsigned int *pres, UINT64 * px
+bid64_to_uint32_xrnint (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_xrnint (UINT64 x
+bid64_to_uint32_xrnint (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 fstar;
UINT128 P128;
@@ -304,17 +304,17 @@ __bid64_to_uint32_xrnint (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -328,56 +328,56 @@ __bid64_to_uint32_xrnint (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=16
// <=> C * 10^(11-q) >= 0x9fffffffb, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0x9fffffffb has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
@@ -388,12 +388,12 @@ __bid64_to_uint32_xrnint (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0x9fffffffbull * __bid_ten2k64[q - 11];
+ tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -408,13 +408,13 @@ __bid64_to_uint32_xrnint (UINT64 x
}
// n is not too large to be converted to int32 if -1/2 <= n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
+ if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
+ } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) <= 0.5 <=> c(0)c(1)...c(q-1) <= 5 * 10^(q-1)
// res = 0
// else if x > 0
@@ -422,23 +422,23 @@ __bid64_to_uint32_xrnint (UINT64 x
// else // if x < 0
// invalid exc
ind = q - 1;
- if (C1 <= __bid_midpoint64[ind]) {
- res = 0x00000000; // return 0
- } else if (x_sign) { // n < 0
+ if (C1 <= midpoint64[ind]) {
+ res = 0x00000000; // return 0
+ } else if (x_sign) { // n < 0
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // n > 0
- res = 0x00000001; // return +1
+ } else { // n > 0
+ res = 0x00000001; // return +1
}
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -447,25 +447,25 @@ __bid64_to_uint32_xrnint (UINT64 x
}
// 1 <= x < 2^32-1/2 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 64 bits
- C1 = C1 + __bid_midpoint64[ind - 1];
+ C1 = C1 + midpoint64[ind - 1];
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- fstar.w[1] = P128.w[1] & __bid_maskhigh128[ind - 1];
+ fstar.w[1] = P128.w[1] & maskhigh128[ind - 1];
fstar.w[0] = P128.w[0];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// if (0 < f* < 10^(-x)) then the result is a midpoint
// if floor(C*) is even then C* = floor(C*) - logical right
// shift; C* has p decimal digits, correct by Prop. 1)
@@ -476,41 +476,41 @@ __bid64_to_uint32_xrnint (UINT64 x
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// determine inexactness of the rounding of C*
// if (0 < f* - 1/2 < 10^(-x)) then
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
- if (ind - 1 <= 2) { // fstar.w[1] is 0
+ if (ind - 1 <= 2) { // fstar.w[1] is 0
if (fstar.w[0] > 0x8000000000000000ull) {
// f* > 1/2 and the result may be exact
- tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
- if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1])) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
+ if ((tmp64 > ten2mk128trunc[ind - 1].w[1])) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // the result is inexact; f2* <= 1/2
+ } // else the result is exact
+ } else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
- } else { // if 3 <= ind - 1 <= 14
- if (fstar.w[1] > __bid_one_half128[ind - 1] ||
- (fstar.w[1] == __bid_one_half128[ind - 1] && fstar.w[0])) {
+ } else { // if 3 <= ind - 1 <= 14
+ if (fstar.w[1] > onehalf128[ind - 1] ||
+ (fstar.w[1] == onehalf128[ind - 1] && fstar.w[0])) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
- tmp64 = fstar.w[1] - __bid_one_half128[ind - 1];
- if (tmp64 || fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ tmp64 = fstar.w[1] - onehalf128[ind - 1];
+ if (tmp64 || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // the result is inexact; f2* <= 1/2
+ } // else the result is exact
+ } else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
@@ -520,23 +520,23 @@ __bid64_to_uint32_xrnint (UINT64 x
// it will need a correction
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
- (fstar.w[0] <= __bid_ten2mk128trunc[ind - 1].w[1])) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ (fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
- if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
+ if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// if floor(C*) is odd C = floor(C*) - 1; the result >= 1
- Cstar--; // Cstar is now even
- } // else MP in [ODD, EVEN]
+ Cstar--; // Cstar is now even
+ } // else MP in [ODD, EVEN]
}
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -548,27 +548,27 @@ __bid64_to_uint32_xrnint (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_floor (unsigned int *pres, UINT64 * px
+bid64_to_uint32_floor (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_floor (UINT64 x
+bid64_to_uint32_floor (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 P128;
// check for NaN or Infinity
@@ -580,17 +580,17 @@ __bid64_to_uint32_floor (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -602,7 +602,7 @@ __bid64_to_uint32_floor (UINT64 x
}
// x is not special and is not zero
- if (x_sign) { // if n < 0 the conversion is invalid
+ if (x_sign) { // if n < 0 the conversion is invalid
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -611,37 +611,37 @@ __bid64_to_uint32_floor (UINT64 x
}
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
@@ -654,7 +654,7 @@ __bid64_to_uint32_floor (UINT64 x
// <=> C * 10^(11-q) >= 0xa00000000, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0xa00000000 has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
@@ -665,12 +665,12 @@ __bid64_to_uint32_floor (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0xa00000000ull * __bid_ten2k64[q - 11];
+ tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -684,45 +684,45 @@ __bid64_to_uint32_floor (UINT64 x
}
// n is not too large to be converted to int32 if -1 < n < 2^32
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) <= 0) { // n = +0.[0...0]c(0)c(1)...c(q-1)
+ if ((q + exp) <= 0) { // n = +0.[0...0]c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// 1 <= x < 2^32 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -734,27 +734,27 @@ __bid64_to_uint32_floor (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_xfloor (unsigned int *pres, UINT64 * px
+bid64_to_uint32_xfloor (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_xfloor (UINT64 x
+bid64_to_uint32_xfloor (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 fstar;
UINT128 P128;
@@ -767,17 +767,17 @@ __bid64_to_uint32_xfloor (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -789,7 +789,7 @@ __bid64_to_uint32_xfloor (UINT64 x
}
// x is not special and is not zero
- if (x_sign) { // if n < 0 the conversion is invalid
+ if (x_sign) { // if n < 0 the conversion is invalid
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -798,37 +798,37 @@ __bid64_to_uint32_xfloor (UINT64 x
}
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
@@ -841,7 +841,7 @@ __bid64_to_uint32_xfloor (UINT64 x
// <=> C * 10^(11-q) >= 0xa00000000, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0xa00000000 has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
@@ -852,12 +852,12 @@ __bid64_to_uint32_xfloor (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0xa00000000ull * __bid_ten2k64[q - 11];
+ tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -871,39 +871,39 @@ __bid64_to_uint32_xfloor (UINT64 x
}
// n is not too large to be converted to int32 if -1 < n < 2^32
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
+ if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// 1 <= x < 2^32 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- fstar.w[1] = P128.w[1] & __bid_maskhigh128[ind - 1];
+ fstar.w[1] = P128.w[1] & maskhigh128[ind - 1];
fstar.w[0] = P128.w[0];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// determine inexactness of the rounding of C*
// if (0 < f* < 10^(-x)) then
@@ -911,29 +911,29 @@ __bid64_to_uint32_xfloor (UINT64 x
// else // if (f* > T*) then
// the result is inexact
if (ind - 1 <= 2) {
- if (fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ if (fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // if 3 <= ind - 1 <= 14
- if (fstar.w[1] || fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ } // else the result is exact
+ } else { // if 3 <= ind - 1 <= 14
+ if (fstar.w[1] || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
+ } // else the result is exact
}
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -945,27 +945,27 @@ __bid64_to_uint32_xfloor (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_ceil (unsigned int *pres, UINT64 * px
+bid64_to_uint32_ceil (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_ceil (UINT64 x
+bid64_to_uint32_ceil (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 fstar;
UINT128 P128;
@@ -978,17 +978,17 @@ __bid64_to_uint32_ceil (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -1002,56 +1002,56 @@ __bid64_to_uint32_ceil (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n > 2^32 - 1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^32 - 1
// <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x9fffffff6, 1<=q<=16
// <=> C * 10^(11-q) > 0x9fffffff6, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0x9fffffff6 has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 > 0x9fffffff6ull) {
// set invalid flag
@@ -1062,12 +1062,12 @@ __bid64_to_uint32_ceil (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) > 0x9fffffff6 <=>
// C > 0x9fffffff6 * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1 up)
// Note: 0x9fffffff6*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0x9fffffff6ull * __bid_ten2k64[q - 11];
+ tmp64 = 0x9fffffff6ull * ten2k64[q - 11];
if (C1 > tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -1082,17 +1082,17 @@ __bid64_to_uint32_ceil (UINT64 x
}
// n is not too large to be converted to int32 if -1 < n < 2^32
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
+ if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
// return 0 or 1
if (x_sign)
res = 0x00000000;
else
res = 0x00000001;
BID_RETURN (res);
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// x <= -1 or 1 <= x <= 2^32 - 1 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -1101,58 +1101,58 @@ __bid64_to_uint32_ceil (UINT64 x
}
// 1 <= x <= 2^32 - 1 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- fstar.w[1] = P128.w[1] & __bid_maskhigh128[ind - 1];
+ fstar.w[1] = P128.w[1] & maskhigh128[ind - 1];
fstar.w[0] = P128.w[0];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// determine inexactness of the rounding of C*
// if (0 < f* < 10^(-x)) then
// the result is exact
// else // if (f* > T*) then
// the result is inexact
- if (ind - 1 <= 2) { // fstar.w[1] is 0
- if (fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ if (ind - 1 <= 2) { // fstar.w[1] is 0
+ if (fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
Cstar++;
- } // else the result is exact
- } else { // if 3 <= ind - 1 <= 14
- if (fstar.w[1] || fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ } // else the result is exact
+ } else { // if 3 <= ind - 1 <= 14
+ if (fstar.w[1] || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
Cstar++;
- } // else the result is exact
+ } // else the result is exact
}
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -1164,27 +1164,27 @@ __bid64_to_uint32_ceil (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_xceil (unsigned int *pres, UINT64 * px
+bid64_to_uint32_xceil (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_xceil (UINT64 x
+bid64_to_uint32_xceil (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 fstar;
UINT128 P128;
@@ -1197,17 +1197,17 @@ __bid64_to_uint32_xceil (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -1221,56 +1221,56 @@ __bid64_to_uint32_xceil (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n > 2^32 - 1 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) > 2^32 - 1
// <=> 0.c(0)c(1)...c(q-1) * 10^11 > 0x9fffffff6, 1<=q<=16
// <=> C * 10^(11-q) > 0x9fffffff6, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0x9fffffff6 has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 > 0x9fffffff6ull) {
// set invalid flag
@@ -1281,12 +1281,12 @@ __bid64_to_uint32_xceil (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) > 0x9fffffff6 <=>
// C > 0x9fffffff6 * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1 up)
// Note: 0x9fffffff6*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0x9fffffff6ull * __bid_ten2k64[q - 11];
+ tmp64 = 0x9fffffff6ull * ten2k64[q - 11];
if (C1 > tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -1301,7 +1301,7 @@ __bid64_to_uint32_xceil (UINT64 x
}
// n is not too large to be converted to int32 if -1 < n < 2^32
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
+ if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0 or 1
@@ -1310,10 +1310,10 @@ __bid64_to_uint32_xceil (UINT64 x
else
res = 0x00000001;
BID_RETURN (res);
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// x <= -1 or 1 <= x < 2^32 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -1322,62 +1322,62 @@ __bid64_to_uint32_xceil (UINT64 x
}
// 1 <= x < 2^32 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- fstar.w[1] = P128.w[1] & __bid_maskhigh128[ind - 1];
+ fstar.w[1] = P128.w[1] & maskhigh128[ind - 1];
fstar.w[0] = P128.w[0];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// determine inexactness of the rounding of C*
// if (0 < f* < 10^(-x)) then
// the result is exact
// else // if (f* > T*) then
// the result is inexact
- if (ind - 1 <= 2) { // fstar.w[1] is 0
- if (fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ if (ind - 1 <= 2) { // fstar.w[1] is 0
+ if (fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
Cstar++;
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // if 3 <= ind - 1 <= 14
- if (fstar.w[1] || fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ } // else the result is exact
+ } else { // if 3 <= ind - 1 <= 14
+ if (fstar.w[1] || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
Cstar++;
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
+ } // else the result is exact
}
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -1389,27 +1389,27 @@ __bid64_to_uint32_xceil (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_int (unsigned int *pres, UINT64 * px
+bid64_to_uint32_int (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_int (UINT64 x
+bid64_to_uint32_int (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 P128;
// check for NaN or Infinity
@@ -1421,17 +1421,17 @@ __bid64_to_uint32_int (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -1445,56 +1445,56 @@ __bid64_to_uint32_int (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n >= 2^32 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000, 1<=q<=16
// <=> C * 10^(11-q) >= 0xa00000000, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0xa00000000 has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
@@ -1505,12 +1505,12 @@ __bid64_to_uint32_int (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0xa00000000ull * __bid_ten2k64[q - 11];
+ tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -1525,14 +1525,14 @@ __bid64_to_uint32_int (UINT64 x
}
// n is not too large to be converted to int32 if -1 < n < 2^32
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
+ if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// x <= -1 or 1 <= x < 2^32 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -1541,38 +1541,38 @@ __bid64_to_uint32_int (UINT64 x
}
// 1 <= x < 2^32 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -1584,27 +1584,27 @@ __bid64_to_uint32_int (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_xint (unsigned int *pres, UINT64 * px
+bid64_to_uint32_xint (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_xint (UINT64 x
+bid64_to_uint32_xint (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 fstar;
UINT128 P128;
@@ -1617,17 +1617,17 @@ __bid64_to_uint32_xint (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -1641,56 +1641,56 @@ __bid64_to_uint32_xint (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n >= 2^32 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0xa00000000, 1<=q<=16
// <=> C * 10^(11-q) >= 0xa00000000, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0xa00000000 has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0xa00000000ull) {
// set invalid flag
@@ -1701,12 +1701,12 @@ __bid64_to_uint32_xint (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0xa00000000 <=>
// C >= 0xa00000000 * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0xa00000000ull * __bid_ten2k64[q - 11];
+ tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -1721,16 +1721,16 @@ __bid64_to_uint32_xint (UINT64 x
}
// n is not too large to be converted to int32 if -1 < n < 2^32
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
+ if ((q + exp) <= 0) { // n = +/-0.[0...0]c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// x <= -1 or 1 <= x < 2^32 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -1739,60 +1739,60 @@ __bid64_to_uint32_xint (UINT64 x
}
// 1 <= x < 2^32 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- fstar.w[1] = P128.w[1] & __bid_maskhigh128[ind - 1];
+ fstar.w[1] = P128.w[1] & maskhigh128[ind - 1];
fstar.w[0] = P128.w[0];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// determine inexactness of the rounding of C*
// if (0 < f* < 10^(-x)) then
// the result is exact
// else // if (f* > T*) then
// the result is inexact
- if (ind - 1 <= 2) { // fstar.w[1] is 0
- if (fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ if (ind - 1 <= 2) { // fstar.w[1] is 0
+ if (fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // if 3 <= ind - 1 <= 14
- if (fstar.w[1] || fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ } // else the result is exact
+ } else { // if 3 <= ind - 1 <= 14
+ if (fstar.w[1] || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
+ } // else the result is exact
}
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -1804,27 +1804,27 @@ __bid64_to_uint32_xint (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_rninta (unsigned int *pres, UINT64 * px
+bid64_to_uint32_rninta (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_rninta (UINT64 x
+bid64_to_uint32_rninta (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 P128;
// check for NaN or Infinity
@@ -1836,17 +1836,17 @@ __bid64_to_uint32_rninta (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -1860,56 +1860,56 @@ __bid64_to_uint32_rninta (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=16
// <=> C * 10^(11-q) >= 0x9fffffffb, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0x9fffffffb has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
@@ -1920,12 +1920,12 @@ __bid64_to_uint32_rninta (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0x9fffffffbull * __bid_ten2k64[q - 11];
+ tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -1940,11 +1940,11 @@ __bid64_to_uint32_rninta (UINT64 x
}
// n is not too large to be converted to int32 if -1/2 < n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
+ if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
+ } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1)
// res = 0
// else if x > 0
@@ -1952,21 +1952,21 @@ __bid64_to_uint32_rninta (UINT64 x
// else // if x < 0
// invalid exc
ind = q - 1;
- if (C1 < __bid_midpoint64[ind]) {
- res = 0x00000000; // return 0
- } else if (x_sign) { // n < 0
+ if (C1 < midpoint64[ind]) {
+ res = 0x00000000; // return 0
+ } else if (x_sign) { // n < 0
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // n > 0
- res = 0x00000001; // return +1
+ } else { // n > 0
+ res = 0x00000001; // return +1
}
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -1975,40 +1975,40 @@ __bid64_to_uint32_rninta (UINT64 x
}
// 1 <= x < 2^32-1/2 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 64 bits
- C1 = C1 + __bid_midpoint64[ind - 1];
+ C1 = C1 + midpoint64[ind - 1];
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// if the result was a midpoint it was rounded away from zero
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);
@@ -2020,27 +2020,27 @@ __bid64_to_uint32_rninta (UINT64 x
#if DECIMAL_CALL_BY_REFERENCE
void
-__bid64_to_uint32_xrninta (unsigned int *pres, UINT64 * px
+bid64_to_uint32_xrninta (unsigned int *pres, UINT64 * px
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
UINT64 x = *px;
#else
unsigned int
-__bid64_to_uint32_xrninta (UINT64 x
+bid64_to_uint32_xrninta (UINT64 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM
_EXC_INFO_PARAM) {
#endif
unsigned int res;
UINT64 x_sign;
UINT64 x_exp;
- int exp; // unbiased exponent
+ int exp; // unbiased exponent
// Note: C1 represents x_significand (UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT64 C1;
- UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
+ UINT64 Cstar; // C* represents up to 16 decimal digits ~ 54 bits
UINT128 fstar;
UINT128 P128;
@@ -2053,17 +2053,17 @@ __bid64_to_uint32_xrninta (UINT64 x
BID_RETURN (res);
}
// unpack x
- x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
+ x_sign = x & MASK_SIGN; // 0 for positive, MASK_SIGN for negative
// if steering bits are 11 (condition will be 0), then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT2) >> 51; // biased
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
- if (C1 > 9999999999999999ull) { // non-canonical
+ if (C1 > 9999999999999999ull) { // non-canonical
x_exp = 0;
C1 = 0;
}
} else {
- x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
+ x_exp = (x & MASK_BINARY_EXPONENT1) >> 53; // biased
C1 = x & MASK_BINARY_SIG1;
}
@@ -2077,56 +2077,56 @@ __bid64_to_uint32_xrninta (UINT64 x
// q = nr. of decimal digits in x (1 <= q <= 54)
// determine first the nr. of bits in x
- if (C1 >= 0x0020000000000000ull) { // x >= 2^53
+ if (C1 >= 0x0020000000000000ull) { // x >= 2^53
// split the 64-bit value in two 32-bit halves to avoid rounding errors
- if (C1 >= 0x0000000100000000ull) { // x >= 2^32
- tmp1.d = (double) (C1 >> 32); // exact conversion
+ if (C1 >= 0x0000000100000000ull) { // x >= 2^32
+ tmp1.d = (double) (C1 >> 32); // exact conversion
x_nr_bits =
33 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
- } else { // x < 2^32
- tmp1.d = (double) C1; // exact conversion
+ } else { // x < 2^32
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- } else { // if x < 2^53
- tmp1.d = (double) C1; // exact conversion
+ } else { // if x < 2^53
+ tmp1.d = (double) C1; // exact conversion
x_nr_bits =
1 + ((((unsigned int) (tmp1.ui64 >> 52)) & 0x7ff) - 0x3ff);
}
- q = __bid_nr_digits[x_nr_bits - 1].digits;
+ q = nr_digits[x_nr_bits - 1].digits;
if (q == 0) {
- q = __bid_nr_digits[x_nr_bits - 1].digits1;
- if (C1 >= __bid_nr_digits[x_nr_bits - 1].threshold_lo)
+ q = nr_digits[x_nr_bits - 1].digits1;
+ if (C1 >= nr_digits[x_nr_bits - 1].threshold_lo)
q++;
}
- exp = x_exp - 398; // unbiased exponent
+ exp = x_exp - 398; // unbiased exponent
- if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
+ if ((q + exp) > 10) { // x >= 10^10 ~= 2^33.2... (cannot fit in 32 bits)
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
+ } else if ((q + exp) == 10) { // x = c(0)c(1)...c(9).c(10)...c(q-1)
// in this case 2^29.89... ~= 10^9 <= x < 10^10 ~= 2^33.2...
// so x rounded to an integer may or may not fit in an unsigned 32-bit int
// the cases that do not fit are identified here; the ones that fit
// fall through and will be handled with other cases further,
// under '1 <= q + exp <= 10'
- if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
+ if (x_sign) { // if n < 0 and q + exp = 10 then x is much less than -1/2
// => set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // if n > 0 and q + exp = 10
+ } else { // if n > 0 and q + exp = 10
// if n >= 2^32 - 1/2 then n is too large
// too large if c(0)c(1)...c(9).c(10)...c(q-1) >= 2^32-1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^11 >= 0x9fffffffb, 1<=q<=16
// <=> C * 10^(11-q) >= 0x9fffffffb, 1<=q<=16
if (q <= 11) {
// Note: C * 10^(11-q) has 10 or 11 digits; 0x9fffffffb has 11 digits
- tmp64 = C1 * __bid_ten2k64[11 - q]; // C scaled up to 11-digit int
+ tmp64 = C1 * ten2k64[11 - q]; // C scaled up to 11-digit int
// c(0)c(1)...c(9)c(10) or c(0)c(1)...c(q-1)0...0 (11 digits)
if (tmp64 >= 0x9fffffffbull) {
// set invalid flag
@@ -2137,12 +2137,12 @@ __bid64_to_uint32_xrninta (UINT64 x
}
// else cases that can be rounded to a 32-bit unsigned int fall through
// to '1 <= q + exp <= 10'
- } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
+ } else { // if (q > 11), i.e. 12 <= q <= 16 and so -15 <= exp <= -2
// C * 10^(11-q) >= 0x9fffffffb <=>
// C >= 0x9fffffffb * 10^(q-11) where 1 <= q - 11 <= 5
// (scale 2^32-1/2 up)
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
- tmp64 = 0x9fffffffbull * __bid_ten2k64[q - 11];
+ tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
@@ -2157,13 +2157,13 @@ __bid64_to_uint32_xrninta (UINT64 x
}
// n is not too large to be converted to int32 if -1/2 < n < 2^32 - 1/2
// Note: some of the cases tested for above fall through to this point
- if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
+ if ((q + exp) < 0) { // n = +/-0.0...c(0)c(1)...c(q-1)
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// return 0
res = 0x00000000;
BID_RETURN (res);
- } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
+ } else if ((q + exp) == 0) { // n = +/-0.c(0)c(1)...c(q-1)
// if 0.c(0)c(1)...c(q-1) < 0.5 <=> c(0)c(1)...c(q-1) < 5 * 10^(q-1)
// res = 0
// else if x > 0
@@ -2171,23 +2171,23 @@ __bid64_to_uint32_xrninta (UINT64 x
// else // if x < 0
// invalid exc
ind = q - 1;
- if (C1 < __bid_midpoint64[ind]) {
- res = 0x00000000; // return 0
- } else if (x_sign) { // n < 0
+ if (C1 < midpoint64[ind]) {
+ res = 0x00000000; // return 0
+ } else if (x_sign) { // n < 0
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
- } else { // n > 0
- res = 0x00000001; // return +1
+ } else { // n > 0
+ res = 0x00000001; // return +1
}
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
+ } else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
// -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
- if (x_sign) { // x <= -1
+ if (x_sign) { // x <= -1
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return Integer Indefinite
@@ -2196,31 +2196,31 @@ __bid64_to_uint32_xrninta (UINT64 x
}
// 1 <= x < 2^32-1/2 so x can be rounded
// to nearest to a 32-bit unsigned integer
- if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
- ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
+ if (exp < 0) { // 2 <= q <= 16, -15 <= exp <= -1, 1 <= q + exp <= 10
+ ind = -exp; // 1 <= ind <= 15; ind is a synonym for 'x'
// chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^ind where the result C1 fits in 64 bits
- C1 = C1 + __bid_midpoint64[ind - 1];
+ C1 = C1 + midpoint64[ind - 1];
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
- // __bid_shiftright128[] and __bid_maskhigh128[]
+ // shiftright128[] and maskhigh128[]
// 1 <= x <= 15
- // kx = 10^(-x) = __bid_ten2mk64[ind - 1]
+ // kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
- __mul_64x64_to_128MACH (P128, C1, __bid_ten2mk64[ind - 1]);
+ __mul_64x64_to_128MACH (P128, C1, ten2mk64[ind - 1]);
Cstar = P128.w[1];
- fstar.w[1] = P128.w[1] & __bid_maskhigh128[ind - 1];
+ fstar.w[1] = P128.w[1] & maskhigh128[ind - 1];
fstar.w[0] = P128.w[0];
- // the top Ex bits of 10^(-x) are T* = __bid_ten2mk128trunc[ind].w[0], e.g.
- // if x=1, T*=__bid_ten2mk128trunc[0].w[0]=0x1999999999999999
+ // the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
+ // if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// n = C* * 10^(e+x)
- // shift right C* by Ex-64 = __bid_shiftright128[ind]
- shift = __bid_shiftright128[ind - 1]; // 0 <= shift <= 39
+ // shift right C* by Ex-64 = shiftright128[ind]
+ shift = shiftright128[ind - 1]; // 0 <= shift <= 39
Cstar = Cstar >> shift;
// determine inexactness of the rounding of C*
@@ -2228,47 +2228,47 @@ __bid64_to_uint32_xrninta (UINT64 x
// the result is exact
// else // if (f* - 1/2 > T*) then
// the result is inexact
- if (ind - 1 <= 2) { // fstar.w[1] is 0
+ if (ind - 1 <= 2) { // fstar.w[1] is 0
if (fstar.w[0] > 0x8000000000000000ull) {
// f* > 1/2 and the result may be exact
- tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
- if ((tmp64 > __bid_ten2mk128trunc[ind - 1].w[1])) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
+ if ((tmp64 > ten2mk128trunc[ind - 1].w[1])) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // the result is inexact; f2* <= 1/2
+ } // else the result is exact
+ } else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
- } else { // if 3 <= ind - 1 <= 14
- if (fstar.w[1] > __bid_one_half128[ind - 1] ||
- (fstar.w[1] == __bid_one_half128[ind - 1] && fstar.w[0])) {
+ } else { // if 3 <= ind - 1 <= 14
+ if (fstar.w[1] > onehalf128[ind - 1] ||
+ (fstar.w[1] == onehalf128[ind - 1] && fstar.w[0])) {
// f2* > 1/2 and the result may be exact
// Calculate f2* - 1/2
- tmp64 = fstar.w[1] - __bid_one_half128[ind - 1];
- if (tmp64 || fstar.w[0] > __bid_ten2mk128trunc[ind - 1].w[1]) {
- // __bid_ten2mk128trunc[ind -1].w[1] is identical to
- // __bid_ten2mk128[ind -1].w[1]
+ tmp64 = fstar.w[1] - onehalf128[ind - 1];
+ if (tmp64 || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
+ // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // the result is inexact; f2* <= 1/2
+ } // else the result is exact
+ } else { // the result is inexact; f2* <= 1/2
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
}
// if the result was a midpoint it was rounded away from zero
- res = Cstar; // the result is positive
+ res = Cstar; // the result is positive
} else if (exp == 0) {
// 1 <= q <= 10
// res = +C (exact)
- res = C1; // the result is positive
- } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
+ res = C1; // the result is positive
+ } else { // if (exp > 0) => 1 <= exp <= 9, 1 <= q < 9, 2 <= q + exp <= 10
// res = +C * 10^exp (exact)
- res = C1 * __bid_ten2k64[exp]; // the result is positive
+ res = C1 * ten2k64[exp]; // the result is positive
}
}
BID_RETURN (res);