aboutsummaryrefslogtreecommitdiff
path: root/lib/std/fmt/parse_hex_float.zig
diff options
context:
space:
mode:
authorviri <hi@viri.moe>2022-04-01 15:21:07 -0600
committerIsaac Freund <mail@isaacfreund.com>2022-04-06 15:50:36 +0200
commit7b7f45dc2a54aa9dfd6263b2654a5ccc8c5d2c82 (patch)
treef9b890968f04e421d848c69b4c7113831770fd48 /lib/std/fmt/parse_hex_float.zig
parentcb019b80ac8ae8ffd7f7dd619c4da29a83668cde (diff)
downloadzig-7b7f45dc2a54aa9dfd6263b2654a5ccc8c5d2c82.tar.gz
zig-7b7f45dc2a54aa9dfd6263b2654a5ccc8c5d2c82.zip
std.{fmt, math}: derive float constants from std
This also addresses a nit from #10133 where IntT might be a confusing name because it might imply signed integer (iX, not uX). We settled on TBits for math/float.zig so I've applied that change here too. When I originally wrote ldexp() I copied the name from parse_hex_float.
Diffstat (limited to 'lib/std/fmt/parse_hex_float.zig')
-rw-r--r--lib/std/fmt/parse_hex_float.zig17
1 files changed, 8 insertions, 9 deletions
diff --git a/lib/std/fmt/parse_hex_float.zig b/lib/std/fmt/parse_hex_float.zig
index 83c798ab96..3885e7e1a8 100644
--- a/lib/std/fmt/parse_hex_float.zig
+++ b/lib/std/fmt/parse_hex_float.zig
@@ -12,17 +12,16 @@ const assert = std.debug.assert;
pub fn parseHexFloat(comptime T: type, s: []const u8) !T {
assert(@typeInfo(T) == .Float);
- const IntT = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+ const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const mantissa_bits = math.floatMantissaBits(T);
const exponent_bits = math.floatExponentBits(T);
+ const exponent_min = math.floatExponentMin(T);
+ const exponent_max = math.floatExponentMax(T);
+ const exponent_bias = exponent_max;
const sign_shift = mantissa_bits + exponent_bits;
- const exponent_bias = (1 << (exponent_bits - 1)) - 1;
- const exponent_min = 1 - exponent_bias;
- const exponent_max = exponent_bias;
-
if (s.len == 0)
return error.InvalidCharacter;
@@ -233,10 +232,10 @@ pub fn parseHexFloat(comptime T: type, s: []const u8) !T {
// Remove the implicit bit.
mantissa &= @as(u128, (1 << mantissa_bits) - 1);
- const raw: IntT =
- (if (negative) @as(IntT, 1) << sign_shift else 0) |
- @as(IntT, @bitCast(u16, exponent + exponent_bias)) << mantissa_bits |
- @truncate(IntT, mantissa);
+ const raw: TBits =
+ (if (negative) @as(TBits, 1) << sign_shift else 0) |
+ @as(TBits, @bitCast(u16, exponent + exponent_bias)) << mantissa_bits |
+ @truncate(TBits, mantissa);
return @bitCast(T, raw);
}