From 35bca9cb347b45ae570ebe13c4d7a9ffde5280d1 Mon Sep 17 00:00:00 2001 From: IRainman Date: Mon, 29 Dec 2025 19:03:15 +0300 Subject: [PATCH] type usage fixes. --- include/fast_float/decimal_to_binary.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/include/fast_float/decimal_to_binary.h b/include/fast_float/decimal_to_binary.h index aa7d7cc..2c30cc5 100644 --- a/include/fast_float/decimal_to_binary.h +++ b/include/fast_float/decimal_to_binary.h @@ -141,13 +141,16 @@ compute_float(am_pow_t q, am_mant_t w) noexcept { // branchless approach: value128 product = compute_product(q, w); but in // practice, we can win big with the compute_product_approximation if its // additional branch is easily predicted. Which is best is data specific. - auto const upperbit = product.high >> 63; - auto const shift = upperbit + 64 - binary::mantissa_explicit_bits() - 3; + am_pow_t const upperbit = product.high >> 63; + am_pow_t const shift = upperbit + 64 - binary::mantissa_explicit_bits() - 3; // Shift right the mantissa to the correct position answer.mantissa = product.high >> shift; answer.power2 = detail::power(q) + upperbit - lz - binary::minimum_exponent(); + + // Now, we need to round the mantissa correctly. + if (answer.power2 <= 0) { // we have a subnormal or very small value. // Here have that answer.power2 <= 0 so -answer.power2 >= 0 if (-answer.power2 + 1 >=