diff options
Diffstat (limited to 'lib/compiler_rt/exp2.zig')
| -rw-r--r-- | lib/compiler_rt/exp2.zig | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/lib/compiler_rt/exp2.zig b/lib/compiler_rt/exp2.zig index 731fd7013d..acfeff0e35 100644 --- a/lib/compiler_rt/exp2.zig +++ b/lib/compiler_rt/exp2.zig @@ -27,18 +27,18 @@ comptime { pub fn __exp2h(x: f16) callconv(.C) f16 { // TODO: more efficient implementation - return @floatCast(f16, exp2f(x)); + return @as(f16, @floatCast(exp2f(x))); } pub fn exp2f(x: f32) callconv(.C) f32 { - const tblsiz = @intCast(u32, exp2ft.len); - const redux: f32 = 0x1.8p23 / @floatFromInt(f32, tblsiz); + const tblsiz = @as(u32, @intCast(exp2ft.len)); + const redux: f32 = 0x1.8p23 / @as(f32, @floatFromInt(tblsiz)); const P1: f32 = 0x1.62e430p-1; const P2: f32 = 0x1.ebfbe0p-3; const P3: f32 = 0x1.c6b348p-5; const P4: f32 = 0x1.3b2c9cp-7; - var u = @bitCast(u32, x); + var u = @as(u32, @bitCast(x)); const ix = u & 0x7FFFFFFF; // |x| > 126 @@ -72,32 +72,32 @@ pub fn exp2f(x: f32) callconv(.C) f32 { // intended result but should confirm how GCC/Clang handle this to ensure. var uf = x + redux; - var i_0 = @bitCast(u32, uf); + var i_0 = @as(u32, @bitCast(uf)); i_0 +%= tblsiz / 2; const k = i_0 / tblsiz; - const uk = @bitCast(f64, @as(u64, 0x3FF + k) << 52); + const uk = @as(f64, @bitCast(@as(u64, 0x3FF + k) << 52)); i_0 &= tblsiz - 1; uf -= redux; const z: f64 = x - uf; - var r: f64 = exp2ft[@intCast(usize, i_0)]; + var r: f64 = exp2ft[@as(usize, @intCast(i_0))]; const t: f64 = r * z; r = r + t * (P1 + z * P2) + t * (z * z) * (P3 + z * P4); - return @floatCast(f32, r * uk); + return @as(f32, @floatCast(r * uk)); } pub fn exp2(x: f64) callconv(.C) f64 { - const tblsiz: u32 = @intCast(u32, exp2dt.len / 2); - const redux: f64 = 0x1.8p52 / @floatFromInt(f64, tblsiz); + const tblsiz: u32 = @as(u32, @intCast(exp2dt.len / 2)); + const redux: f64 = 0x1.8p52 / @as(f64, @floatFromInt(tblsiz)); const P1: f64 = 0x1.62e42fefa39efp-1; const P2: f64 = 0x1.ebfbdff82c575p-3; const P3: f64 = 0x1.c6b08d704a0a6p-5; const P4: f64 = 0x1.3b2ab88f70400p-7; const P5: f64 = 0x1.5d88003875c74p-10; - const ux = @bitCast(u64, x); - const ix = @intCast(u32, ux >> 32) & 0x7FFFFFFF; + const ux = @as(u64, @bitCast(x)); + const ix = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF; // TODO: This should be handled beneath. if (math.isNan(x)) { @@ -119,7 +119,7 @@ pub fn exp2(x: f64) callconv(.C) f64 { if (ux >> 63 != 0) { // underflow if (x <= -1075 or x - 0x1.0p52 + 0x1.0p52 != x) { - math.doNotOptimizeAway(@floatCast(f32, -0x1.0p-149 / x)); + math.doNotOptimizeAway(@as(f32, @floatCast(-0x1.0p-149 / x))); } if (x <= -1075) { return 0; @@ -139,18 +139,18 @@ pub fn exp2(x: f64) callconv(.C) f64 { // reduce x var uf: f64 = x + redux; // NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here - var i_0: u32 = @truncate(u32, @bitCast(u64, uf)); + var i_0: u32 = @as(u32, @truncate(@as(u64, @bitCast(uf)))); i_0 +%= tblsiz / 2; const k: u32 = i_0 / tblsiz * tblsiz; - const ik: i32 = @divTrunc(@bitCast(i32, k), tblsiz); + const ik: i32 = @divTrunc(@as(i32, @bitCast(k)), tblsiz); i_0 %= tblsiz; uf -= redux; // r = exp2(y) = exp2t[i_0] * p(z - eps[i]) var z: f64 = x - uf; - const t: f64 = exp2dt[@intCast(usize, 2 * i_0)]; - z -= exp2dt[@intCast(usize, 2 * i_0 + 1)]; + const t: f64 = exp2dt[@as(usize, @intCast(2 * i_0))]; + z -= exp2dt[@as(usize, @intCast(2 * i_0 + 1))]; const r: f64 = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5)))); return math.scalbn(r, ik); @@ -158,12 +158,12 @@ pub fn exp2(x: f64) callconv(.C) f64 { pub fn __exp2x(x: f80) callconv(.C) f80 { // TODO: more efficient implementation - return @floatCast(f80, exp2q(x)); + return @as(f80, @floatCast(exp2q(x))); } pub fn exp2q(x: f128) callconv(.C) f128 { // TODO: more correct implementation - return exp2(@floatCast(f64, x)); + return exp2(@as(f64, @floatCast(x))); } pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble { |
