aboutsummaryrefslogtreecommitdiff
path: root/lib/std/math
diff options
context:
space:
mode:
authorviri <hi@viri.moe>2022-04-01 15:21:07 -0600
committerIsaac Freund <mail@isaacfreund.com>2022-04-06 15:50:36 +0200
commit7b7f45dc2a54aa9dfd6263b2654a5ccc8c5d2c82 (patch)
treef9b890968f04e421d848c69b4c7113831770fd48 /lib/std/math
parentcb019b80ac8ae8ffd7f7dd619c4da29a83668cde (diff)
downloadzig-7b7f45dc2a54aa9dfd6263b2654a5ccc8c5d2c82.tar.gz
zig-7b7f45dc2a54aa9dfd6263b2654a5ccc8c5d2c82.zip
std.{fmt, math}: derive float constants from std
This also addresses a nit from #10133 where IntT might be a confusing name because it might imply signed integer (iX, not uX). We settled on TBits for math/float.zig so I've applied that change here too. When I originally wrote ldexp() I copied the name from parse_hex_float.
Diffstat (limited to 'lib/std/math')
-rw-r--r--lib/std/math/ldexp.zig16
1 files changed, 8 insertions, 8 deletions
diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig
index 228bd7dd39..f8ab237fad 100644
--- a/lib/std/math/ldexp.zig
+++ b/lib/std/math/ldexp.zig
@@ -15,22 +15,22 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) {
var shift = n;
const T = @TypeOf(base);
- const IntT = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const TBits = std.meta.Int(.unsigned, @bitSizeOf(T));
if (@typeInfo(T) != .Float) {
@compileError("ldexp not implemented for " ++ @typeName(T));
}
const mantissa_bits = math.floatMantissaBits(T);
- const exponent_bits = math.floatExponentBits(T);
- const exponent_bias = (1 << (exponent_bits - 1)) - 1;
- const exponent_min = 1 - exponent_bias;
- const exponent_max = exponent_bias;
+ const exponent_min = math.floatExponentMin(T);
+ const exponent_max = math.floatExponentMax(T);
+
+ const exponent_bias = exponent_max;
// fix double rounding errors in subnormal ranges
// https://git.musl-libc.org/cgit/musl/commit/src/math/ldexp.c?id=8c44a060243f04283ca68dad199aab90336141db
const scale_min_expo = exponent_min + mantissa_bits + 1;
- const scale_min = @bitCast(T, @as(IntT, scale_min_expo + exponent_bias) << mantissa_bits);
- const scale_max = @bitCast(T, @intCast(IntT, exponent_max + exponent_bias) << mantissa_bits);
+ const scale_min = @bitCast(T, @as(TBits, scale_min_expo + exponent_bias) << mantissa_bits);
+ const scale_max = @bitCast(T, @intCast(TBits, exponent_max + exponent_bias) << mantissa_bits);
// scale `shift` within floating point limits, if possible
// second pass is possible due to subnormal range
@@ -53,7 +53,7 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) {
}
}
- return base * @bitCast(T, @intCast(IntT, shift + exponent_bias) << mantissa_bits);
+ return base * @bitCast(T, @intCast(TBits, shift + exponent_bias) << mantissa_bits);
}
test "math.ldexp" {