diff --git a/include/fast_float/decimal_to_binary.h b/include/fast_float/decimal_to_binary.h index aa7d7cc..2c30cc5 100644 --- a/include/fast_float/decimal_to_binary.h +++ b/include/fast_float/decimal_to_binary.h @@ -141,13 +141,16 @@ compute_float(am_pow_t q, am_mant_t w) noexcept { // branchless approach: value128 product = compute_product(q, w); but in // practice, we can win big with the compute_product_approximation if its // additional branch is easily predicted. Which is best is data specific. - auto const upperbit = product.high >> 63; - auto const shift = upperbit + 64 - binary::mantissa_explicit_bits() - 3; + am_pow_t const upperbit = product.high >> 63; + am_pow_t const shift = upperbit + 64 - binary::mantissa_explicit_bits() - 3; // Shift right the mantissa to the correct position answer.mantissa = product.high >> shift; answer.power2 = detail::power(q) + upperbit - lz - binary::minimum_exponent(); + + // Now, we need to round the mantissa correctly. + if (answer.power2 <= 0) { // we have a subnormal or very small value. // Here have that answer.power2 <= 0 so -answer.power2 >= 0 if (-answer.power2 + 1 >=