diff --git a/include/fast_float/ascii_number.h b/include/fast_float/ascii_number.h index 730496a..05149bb 100644 --- a/include/fast_float/ascii_number.h +++ b/include/fast_float/ascii_number.h @@ -439,7 +439,7 @@ parse_number_string(UC const *p, UC const *pend, } else { // Now let's parse the explicit exponent. while ((p != pend) && is_integer(*p)) { - if (exp_number < 0x10000000) { + if (exp_number < 0x10000) { // check for exponent overflow if we have too many digits. UC const digit = UC(*p - UC('0')); exp_number = 10 * exp_number + static_cast(digit); diff --git a/include/fast_float/decimal_to_binary.h b/include/fast_float/decimal_to_binary.h index 97a8d44..57e80f7 100644 --- a/include/fast_float/decimal_to_binary.h +++ b/include/fast_float/decimal_to_binary.h @@ -139,9 +139,9 @@ compute_float(int64_t q, uint64_t w) noexcept { // branchless approach: value128 product = compute_product(q, w); but in // practice, we can win big with the compute_product_approximation if its // additional branch is easily predicted. Which is best is data specific. - am_pow_t const upperbit = am_pow_t(product.high >> 63); - am_pow_t const shift = - am_pow_t(upperbit + 64 - binary::mantissa_explicit_bits() - 3); + limb_t const upperbit = limb_t(product.high >> 63); + limb_t const shift = + limb_t(upperbit + 64 - binary::mantissa_explicit_bits() - 3); answer.mantissa = product.high >> shift; diff --git a/include/fast_float/float_common.h b/include/fast_float/float_common.h index a0bfbcf..2801437 100644 --- a/include/fast_float/float_common.h +++ b/include/fast_float/float_common.h @@ -446,7 +446,7 @@ typedef int_fast8_t am_bits_t; // Power bias is signed for handling a denormal float // or an invalid mantissa. -typedef int_fast32_t am_pow_t; +typedef int_fast16_t am_pow_t; // Bias so we can get the real exponent with an invalid adjusted_mantissa. constexpr static am_pow_t invalid_am_bias = -0x8000;