aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-06-24 16:58:19 -0700
committerGitHub <noreply@github.com>2023-06-24 16:58:19 -0700
commit146b79af153bbd5dafda0ba12a040385c7fc58f8 (patch)
tree67e3db8b444d65c667e314770fc983a7fc8ba293 /lib/std
parent13853bef0df3c90633021850cc6d6abaeea03282 (diff)
parent21ac0beb436f49fe49c6982a872f2dc48e4bea5e (diff)
downloadzig-146b79af153bbd5dafda0ba12a040385c7fc58f8.tar.gz
zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.zip
Merge pull request #16163 from mlugg/feat/builtins-infer-dest-ty
Infer destination type of cast builtins using result type
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/Build.zig12
-rw-r--r--lib/std/Build/Cache.zig4
-rw-r--r--lib/std/Build/Step.zig4
-rw-r--r--lib/std/Build/Step/CheckObject.zig14
-rw-r--r--lib/std/Build/Step/Compile.zig6
-rw-r--r--lib/std/Build/Step/Run.zig4
-rw-r--r--lib/std/Progress.zig4
-rw-r--r--lib/std/Thread.zig42
-rw-r--r--lib/std/Thread/Futex.zig50
-rw-r--r--lib/std/Thread/Mutex.zig6
-rw-r--r--lib/std/array_hash_map.zig46
-rw-r--r--lib/std/array_list.zig12
-rw-r--r--lib/std/atomic/Atomic.zig20
-rw-r--r--lib/std/atomic/queue.zig2
-rw-r--r--lib/std/atomic/stack.zig2
-rw-r--r--lib/std/base64.zig10
-rw-r--r--lib/std/bit_set.zig42
-rw-r--r--lib/std/bounded_array.zig2
-rw-r--r--lib/std/builtin.zig2
-rw-r--r--lib/std/c.zig2
-rw-r--r--lib/std/c/darwin.zig68
-rw-r--r--lib/std/c/dragonfly.zig20
-rw-r--r--lib/std/c/freebsd.zig22
-rw-r--r--lib/std/c/haiku.zig10
-rw-r--r--lib/std/c/linux.zig2
-rw-r--r--lib/std/c/netbsd.zig16
-rw-r--r--lib/std/c/openbsd.zig14
-rw-r--r--lib/std/c/solaris.zig28
-rw-r--r--lib/std/child_process.zig26
-rw-r--r--lib/std/coff.zig32
-rw-r--r--lib/std/compress/deflate/bits_utils.zig2
-rw-r--r--lib/std/compress/deflate/compressor.zig38
-rw-r--r--lib/std/compress/deflate/compressor_test.zig2
-rw-r--r--lib/std/compress/deflate/decompressor.zig86
-rw-r--r--lib/std/compress/deflate/deflate_fast.zig92
-rw-r--r--lib/std/compress/deflate/deflate_fast_test.zig8
-rw-r--r--lib/std/compress/deflate/dict_decoder.zig20
-rw-r--r--lib/std/compress/deflate/huffman_bit_writer.zig110
-rw-r--r--lib/std/compress/deflate/huffman_code.zig20
-rw-r--r--lib/std/compress/deflate/token.zig10
-rw-r--r--lib/std/compress/gzip.zig2
-rw-r--r--lib/std/compress/lzma/decode.zig10
-rw-r--r--lib/std/compress/lzma2/decode.zig6
-rw-r--r--lib/std/compress/xz.zig2
-rw-r--r--lib/std/compress/xz/block.zig6
-rw-r--r--lib/std/compress/zlib.zig6
-rw-r--r--lib/std/compress/zstandard/decode/block.zig14
-rw-r--r--lib/std/compress/zstandard/decode/fse.zig14
-rw-r--r--lib/std/compress/zstandard/decode/huffman.zig10
-rw-r--r--lib/std/compress/zstandard/decompress.zig8
-rw-r--r--lib/std/crypto/25519/curve25519.zig2
-rw-r--r--lib/std/crypto/25519/edwards25519.zig24
-rw-r--r--lib/std/crypto/25519/field.zig22
-rw-r--r--lib/std/crypto/25519/scalar.zig74
-rw-r--r--lib/std/crypto/Certificate.zig22
-rw-r--r--lib/std/crypto/Certificate/Bundle.zig6
-rw-r--r--lib/std/crypto/Certificate/Bundle/macos.zig6
-rw-r--r--lib/std/crypto/aegis.zig2
-rw-r--r--lib/std/crypto/aes/soft.zig102
-rw-r--r--lib/std/crypto/aes_ocb.zig8
-rw-r--r--lib/std/crypto/argon2.zig22
-rw-r--r--lib/std/crypto/ascon.zig4
-rw-r--r--lib/std/crypto/bcrypt.zig8
-rw-r--r--lib/std/crypto/benchmark.zig52
-rw-r--r--lib/std/crypto/blake2.zig18
-rw-r--r--lib/std/crypto/blake3.zig14
-rw-r--r--lib/std/crypto/chacha20.zig8
-rw-r--r--lib/std/crypto/ecdsa.zig6
-rw-r--r--lib/std/crypto/ff.zig70
-rw-r--r--lib/std/crypto/ghash_polyval.zig62
-rw-r--r--lib/std/crypto/isap.zig2
-rw-r--r--lib/std/crypto/keccak_p.zig4
-rw-r--r--lib/std/crypto/kyber_d00.zig72
-rw-r--r--lib/std/crypto/md5.zig6
-rw-r--r--lib/std/crypto/pbkdf2.zig2
-rw-r--r--lib/std/crypto/pcurves/common.zig6
-rw-r--r--lib/std/crypto/pcurves/p256.zig20
-rw-r--r--lib/std/crypto/pcurves/p256/p256_64.zig72
-rw-r--r--lib/std/crypto/pcurves/p256/p256_scalar_64.zig72
-rw-r--r--lib/std/crypto/pcurves/p384.zig20
-rw-r--r--lib/std/crypto/pcurves/p384/p384_64.zig104
-rw-r--r--lib/std/crypto/pcurves/p384/p384_scalar_64.zig104
-rw-r--r--lib/std/crypto/pcurves/secp256k1.zig32
-rw-r--r--lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig72
-rw-r--r--lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig72
-rw-r--r--lib/std/crypto/phc_encoding.zig2
-rw-r--r--lib/std/crypto/poly1305.zig14
-rw-r--r--lib/std/crypto/salsa20.zig4
-rw-r--r--lib/std/crypto/scrypt.zig46
-rw-r--r--lib/std/crypto/sha1.zig6
-rw-r--r--lib/std/crypto/sha2.zig20
-rw-r--r--lib/std/crypto/siphash.zig12
-rw-r--r--lib/std/crypto/tlcsprng.zig6
-rw-r--r--lib/std/crypto/tls.zig20
-rw-r--r--lib/std/crypto/tls/Client.zig56
-rw-r--r--lib/std/crypto/utils.zig16
-rw-r--r--lib/std/cstr.zig4
-rw-r--r--lib/std/debug.zig116
-rw-r--r--lib/std/dwarf.zig12
-rw-r--r--lib/std/dynamic_library.zig42
-rw-r--r--lib/std/elf.zig30
-rw-r--r--lib/std/enums.zig30
-rw-r--r--lib/std/event/lock.zig6
-rw-r--r--lib/std/event/loop.zig12
-rw-r--r--lib/std/event/rwlock.zig8
-rw-r--r--lib/std/fmt.zig70
-rw-r--r--lib/std/fmt/errol.zig98
-rw-r--r--lib/std/fmt/parse_float.zig2
-rw-r--r--lib/std/fmt/parse_float/common.zig10
-rw-r--r--lib/std/fmt/parse_float/convert_eisel_lemire.zig16
-rw-r--r--lib/std/fmt/parse_float/convert_fast.zig10
-rw-r--r--lib/std/fmt/parse_float/convert_hex.zig2
-rw-r--r--lib/std/fmt/parse_float/convert_slow.zig12
-rw-r--r--lib/std/fmt/parse_float/decimal.zig20
-rw-r--r--lib/std/fmt/parse_float/parse.zig14
-rw-r--r--lib/std/fs.zig37
-rw-r--r--lib/std/fs/file.zig18
-rw-r--r--lib/std/fs/get_app_data_dir.zig2
-rw-r--r--lib/std/fs/wasi.zig4
-rw-r--r--lib/std/fs/watch.zig16
-rw-r--r--lib/std/hash/adler.zig2
-rw-r--r--lib/std/hash/auto_hash.zig4
-rw-r--r--lib/std/hash/benchmark.zig12
-rw-r--r--lib/std/hash/cityhash.zig26
-rw-r--r--lib/std/hash/crc.zig24
-rw-r--r--lib/std/hash/murmur.zig50
-rw-r--r--lib/std/hash/wyhash.zig6
-rw-r--r--lib/std/hash/xxhash.zig2
-rw-r--r--lib/std/hash_map.zig44
-rw-r--r--lib/std/heap.zig50
-rw-r--r--lib/std/heap/PageAllocator.zig13
-rw-r--r--lib/std/heap/ThreadSafeAllocator.zig6
-rw-r--r--lib/std/heap/WasmAllocator.zig20
-rw-r--r--lib/std/heap/WasmPageAllocator.zig12
-rw-r--r--lib/std/heap/arena_allocator.zig24
-rw-r--r--lib/std/heap/general_purpose_allocator.zig56
-rw-r--r--lib/std/heap/log_to_writer_allocator.zig6
-rw-r--r--lib/std/heap/logging_allocator.zig6
-rw-r--r--lib/std/heap/memory_pool.zig8
-rw-r--r--lib/std/http/Client.zig14
-rw-r--r--lib/std/http/Server.zig12
-rw-r--r--lib/std/http/protocol.zig48
-rw-r--r--lib/std/io.zig2
-rw-r--r--lib/std/io/bit_reader.zig22
-rw-r--r--lib/std/io/bit_writer.zig28
-rw-r--r--lib/std/io/c_writer.zig2
-rw-r--r--lib/std/io/reader.zig2
-rw-r--r--lib/std/json/scanner.zig8
-rw-r--r--lib/std/json/static.zig20
-rw-r--r--lib/std/json/stringify.zig4
-rw-r--r--lib/std/json/write_stream.zig6
-rw-r--r--lib/std/leb128.zig42
-rw-r--r--lib/std/macho.zig14
-rw-r--r--lib/std/math.zig86
-rw-r--r--lib/std/math/acos.zig16
-rw-r--r--lib/std/math/acosh.zig4
-rw-r--r--lib/std/math/asin.zig12
-rw-r--r--lib/std/math/asinh.zig8
-rw-r--r--lib/std/math/atan.zig10
-rw-r--r--lib/std/math/atan2.zig16
-rw-r--r--lib/std/math/atanh.zig10
-rw-r--r--lib/std/math/big/int.zig64
-rw-r--r--lib/std/math/big/int_test.zig66
-rw-r--r--lib/std/math/big/rational.zig22
-rw-r--r--lib/std/math/cbrt.zig22
-rw-r--r--lib/std/math/complex/atan.zig4
-rw-r--r--lib/std/math/complex/cosh.zig16
-rw-r--r--lib/std/math/complex/exp.zig16
-rw-r--r--lib/std/math/complex/ldexp.zig24
-rw-r--r--lib/std/math/complex/sinh.zig16
-rw-r--r--lib/std/math/complex/sqrt.zig8
-rw-r--r--lib/std/math/complex/tanh.zig12
-rw-r--r--lib/std/math/copysign.zig6
-rw-r--r--lib/std/math/cosh.zig10
-rw-r--r--lib/std/math/expm1.zig24
-rw-r--r--lib/std/math/expo2.zig4
-rw-r--r--lib/std/math/float.zig2
-rw-r--r--lib/std/math/frexp.zig18
-rw-r--r--lib/std/math/hypot.zig18
-rw-r--r--lib/std/math/ilogb.zig8
-rw-r--r--lib/std/math/isfinite.zig2
-rw-r--r--lib/std/math/isinf.zig2
-rw-r--r--lib/std/math/isnormal.zig6
-rw-r--r--lib/std/math/ldexp.zig30
-rw-r--r--lib/std/math/log.zig4
-rw-r--r--lib/std/math/log10.zig14
-rw-r--r--lib/std/math/log1p.zig24
-rw-r--r--lib/std/math/modf.zig28
-rw-r--r--lib/std/math/pow.zig4
-rw-r--r--lib/std/math/signbit.zig2
-rw-r--r--lib/std/math/sinh.zig10
-rw-r--r--lib/std/math/sqrt.zig2
-rw-r--r--lib/std/math/tanh.zig12
-rw-r--r--lib/std/mem.zig223
-rw-r--r--lib/std/mem/Allocator.zig18
-rw-r--r--lib/std/meta.zig18
-rw-r--r--lib/std/meta/trailer_flags.zig6
-rw-r--r--lib/std/meta/trait.zig2
-rw-r--r--lib/std/multi_array_list.zig33
-rw-r--r--lib/std/net.zig78
-rw-r--r--lib/std/os.zig250
-rw-r--r--lib/std/os/linux.zig516
-rw-r--r--lib/std/os/linux/bpf.zig30
-rw-r--r--lib/std/os/linux/bpf/helpers.zig282
-rw-r--r--lib/std/os/linux/io_uring.zig101
-rw-r--r--lib/std/os/linux/ioctl.zig2
-rw-r--r--lib/std/os/linux/start_pie.zig8
-rw-r--r--lib/std/os/linux/test.zig16
-rw-r--r--lib/std/os/linux/tls.zig6
-rw-r--r--lib/std/os/linux/vdso.zig30
-rw-r--r--lib/std/os/plan9.zig4
-rw-r--r--lib/std/os/test.zig4
-rw-r--r--lib/std/os/uefi.zig2
-rw-r--r--lib/std/os/uefi/pool_allocator.zig6
-rw-r--r--lib/std/os/uefi/protocols/device_path_protocol.zig26
-rw-r--r--lib/std/os/uefi/protocols/file_protocol.zig4
-rw-r--r--lib/std/os/uefi/protocols/hii.zig2
-rw-r--r--lib/std/os/uefi/protocols/managed_network_protocol.zig2
-rw-r--r--lib/std/os/uefi/protocols/udp6_protocol.zig4
-rw-r--r--lib/std/os/uefi/tables/boot_services.zig2
-rw-r--r--lib/std/os/wasi.zig6
-rw-r--r--lib/std/os/windows.zig166
-rw-r--r--lib/std/os/windows/user32.zig2
-rw-r--r--lib/std/os/windows/ws2_32.zig2
-rw-r--r--lib/std/packed_int_array.zig32
-rw-r--r--lib/std/pdb.zig30
-rw-r--r--lib/std/process.zig18
-rw-r--r--lib/std/rand.zig59
-rw-r--r--lib/std/rand/Isaac64.zig8
-rw-r--r--lib/std/rand/Pcg.zig10
-rw-r--r--lib/std/rand/RomuTrio.zig8
-rw-r--r--lib/std/rand/Sfc64.zig4
-rw-r--r--lib/std/rand/Xoroshiro128.zig6
-rw-r--r--lib/std/rand/Xoshiro256.zig10
-rw-r--r--lib/std/rand/benchmark.zig4
-rw-r--r--lib/std/rand/test.zig16
-rw-r--r--lib/std/rand/ziggurat.zig6
-rw-r--r--lib/std/segmented_list.zig16
-rw-r--r--lib/std/simd.zig24
-rw-r--r--lib/std/sort/pdq.zig4
-rw-r--r--lib/std/start.zig24
-rw-r--r--lib/std/start_windows_tls.zig2
-rw-r--r--lib/std/tar.zig14
-rw-r--r--lib/std/target.zig18
-rw-r--r--lib/std/testing/failing_allocator.zig6
-rw-r--r--lib/std/time.zig16
-rw-r--r--lib/std/time/epoch.zig12
-rw-r--r--lib/std/tz.zig4
-rw-r--r--lib/std/unicode.zig32
-rw-r--r--lib/std/unicode/throughput_test.zig4
-rw-r--r--lib/std/valgrind.zig2
-rw-r--r--lib/std/valgrind/callgrind.zig2
-rw-r--r--lib/std/valgrind/memcheck.zig22
-rw-r--r--lib/std/zig.zig2
-rw-r--r--lib/std/zig/Ast.zig10
-rw-r--r--lib/std/zig/CrossTarget.zig2
-rw-r--r--lib/std/zig/ErrorBundle.zig34
-rw-r--r--lib/std/zig/Parse.zig30
-rw-r--r--lib/std/zig/Server.zig28
-rw-r--r--lib/std/zig/c_builtins.zig20
-rw-r--r--lib/std/zig/c_translation.zig65
-rw-r--r--lib/std/zig/number_literal.zig6
-rw-r--r--lib/std/zig/parser_test.zig20
-rw-r--r--lib/std/zig/perf_test.zig6
-rw-r--r--lib/std/zig/render.zig62
-rw-r--r--lib/std/zig/string_literal.zig4
-rw-r--r--lib/std/zig/system/NativeTargetInfo.zig56
-rw-r--r--lib/std/zig/system/arm.zig14
-rw-r--r--lib/std/zig/system/windows.zig40
-rw-r--r--lib/std/zig/tokenizer.zig2
270 files changed, 3453 insertions, 3444 deletions
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index c569e0074a..a411ddc500 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1111,7 +1111,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
var populated_cpu_features = whitelist_cpu.model.features;
populated_cpu_features.populateDependencies(all_features);
for (all_features, 0..) |feature, i_usize| {
- const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+ const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize));
const in_cpu_set = populated_cpu_features.isEnabled(i);
if (in_cpu_set) {
log.err("{s} ", .{feature.name});
@@ -1119,7 +1119,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
}
log.err(" Remove: ", .{});
for (all_features, 0..) |feature, i_usize| {
- const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+ const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize));
const in_cpu_set = populated_cpu_features.isEnabled(i);
const in_actual_set = selected_cpu.features.isEnabled(i);
if (in_actual_set and !in_cpu_set) {
@@ -1442,13 +1442,13 @@ pub fn execAllowFail(
switch (term) {
.Exited => |code| {
if (code != 0) {
- out_code.* = @truncate(u8, code);
+ out_code.* = @as(u8, @truncate(code));
return error.ExitCodeFailure;
}
return stdout;
},
.Signal, .Stopped, .Unknown => |code| {
- out_code.* = @truncate(u8, code);
+ out_code.* = @as(u8, @truncate(code));
return error.ProcessTerminated;
},
}
@@ -1815,7 +1815,7 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 {
try mcpu_buffer.appendSlice(cpu.model.name);
for (all_features, 0..) |feature, i_usize| {
- const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+ const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize));
const in_cpu_set = populated_cpu_features.isEnabled(i);
const in_actual_set = cpu.features.isEnabled(i);
if (in_cpu_set and !in_actual_set) {
@@ -1852,7 +1852,7 @@ pub fn hex64(x: u64) [16]u8 {
var result: [16]u8 = undefined;
var i: usize = 0;
while (i < 8) : (i += 1) {
- const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
+ const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i))));
result[i * 2 + 0] = hex_charset[byte >> 4];
result[i * 2 + 1] = hex_charset[byte & 15];
}
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig
index 3b7f180ae8..b0db88692c 100644
--- a/lib/std/Build/Cache.zig
+++ b/lib/std/Build/Cache.zig
@@ -128,7 +128,7 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath {
const sub_path = try gpa.dupe(u8, resolved_path[p.len + 1 ..]);
gpa.free(resolved_path);
return PrefixedPath{
- .prefix = @intCast(u8, i),
+ .prefix = @as(u8, @intCast(i)),
.sub_path = sub_path,
};
}
@@ -653,7 +653,7 @@ pub const Manifest = struct {
return error.FileTooBig;
}
- const contents = try self.cache.gpa.alloc(u8, @intCast(usize, ch_file.stat.size));
+ const contents = try self.cache.gpa.alloc(u8, @as(usize, @intCast(ch_file.stat.size)));
errdefer self.cache.gpa.free(contents);
// Hash while reading from disk, to keep the contents in the cpu cache while
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index a0d7a6a296..f21ef8bc8f 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -355,7 +355,7 @@ pub fn evalZigProcess(
},
.error_bundle => {
const EbHdr = std.zig.Server.Message.ErrorBundle;
- const eb_hdr = @ptrCast(*align(1) const EbHdr, body);
+ const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
const extra_bytes =
body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
const string_bytes =
@@ -377,7 +377,7 @@ pub fn evalZigProcess(
},
.emit_bin_path => {
const EbpHdr = std.zig.Server.Message.EmitBinPath;
- const ebp_hdr = @ptrCast(*align(1) const EbpHdr, body);
+ const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
s.result_cached = ebp_hdr.flags.cache_hit;
result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
},
diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig
index 1c2d86e4e3..171734c450 100644
--- a/lib/std/Build/Step/CheckObject.zig
+++ b/lib/std/Build/Step/CheckObject.zig
@@ -449,9 +449,9 @@ const MachODumper = struct {
},
.SYMTAB => if (opts.dump_symtab) {
const lc = cmd.cast(macho.symtab_command).?;
- symtab = @ptrCast(
+ symtab = @as(
[*]const macho.nlist_64,
- @alignCast(@alignOf(macho.nlist_64), &bytes[lc.symoff]),
+ @ptrCast(@alignCast(&bytes[lc.symoff])),
)[0..lc.nsyms];
strtab = bytes[lc.stroff..][0..lc.strsize];
},
@@ -474,7 +474,7 @@ const MachODumper = struct {
try writer.print("{s}\n", .{symtab_label});
for (symtab) |sym| {
if (sym.stab()) continue;
- const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0);
+ const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0);
if (sym.sect()) {
const sect = sections.items[sym.n_sect - 1];
try writer.print("{x} ({s},{s})", .{
@@ -487,7 +487,7 @@ const MachODumper = struct {
}
try writer.print(" {s}\n", .{sym_name});
} else if (sym.undf()) {
- const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+ const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
const import_name = blk: {
if (ordinal <= 0) {
if (ordinal == macho.BIND_SPECIAL_DYLIB_SELF)
@@ -498,7 +498,7 @@ const MachODumper = struct {
break :blk "flat lookup";
unreachable;
}
- const full_path = imports.items[@bitCast(u16, ordinal) - 1];
+ const full_path = imports.items[@as(u16, @bitCast(ordinal)) - 1];
const basename = fs.path.basename(full_path);
assert(basename.len > 0);
const ext = mem.lastIndexOfScalar(u8, basename, '.') orelse basename.len;
@@ -950,8 +950,8 @@ const WasmDumper = struct {
switch (opcode) {
.i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readILEB128(i32, reader)}),
.i64_const => try writer.print("i64.const {x}\n", .{try std.leb.readILEB128(i64, reader)}),
- .f32_const => try writer.print("f32.const {x}\n", .{@bitCast(f32, try reader.readIntLittle(u32))}),
- .f64_const => try writer.print("f64.const {x}\n", .{@bitCast(f64, try reader.readIntLittle(u64))}),
+ .f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readIntLittle(u32)))}),
+ .f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readIntLittle(u64)))}),
.global_get => try writer.print("global.get {x}\n", .{try std.leb.readULEB128(u32, reader)}),
else => unreachable,
}
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index 89576c15fa..58973d08d0 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -321,7 +321,7 @@ pub const BuildId = union(enum) {
pub fn initHexString(bytes: []const u8) BuildId {
var result: BuildId = .{ .hexstring = .{
.bytes = undefined,
- .len = @intCast(u8, bytes.len),
+ .len = @as(u8, @intCast(bytes.len)),
} };
@memcpy(result.hexstring.bytes[0..bytes.len], bytes);
return result;
@@ -342,7 +342,7 @@ pub const BuildId = union(enum) {
} else if (mem.startsWith(u8, text, "0x")) {
var result: BuildId = .{ .hexstring = undefined };
const slice = try std.fmt.hexToBytes(&result.hexstring.bytes, text[2..]);
- result.hexstring.len = @intCast(u8, slice.len);
+ result.hexstring.len = @as(u8, @intCast(slice.len));
return result;
}
return error.InvalidBuildIdStyle;
@@ -2059,7 +2059,7 @@ fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
const file = fs.cwd().openFile(path_file, .{}) catch return null;
defer file.close();
- const size = @intCast(usize, try file.getEndPos());
+ const size = @as(usize, @intCast(try file.getEndPos()));
const vcpkg_path = try allocator.alloc(u8, size);
const size_read = try file.read(vcpkg_path);
std.debug.assert(size == size_read);
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index c574dbb5af..3d81873308 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -998,7 +998,7 @@ fn evalZigTest(
},
.test_metadata => {
const TmHdr = std.zig.Server.Message.TestMetadata;
- const tm_hdr = @ptrCast(*align(1) const TmHdr, body);
+ const tm_hdr = @as(*align(1) const TmHdr, @ptrCast(body));
test_count = tm_hdr.tests_len;
const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)];
@@ -1034,7 +1034,7 @@ fn evalZigTest(
const md = metadata.?;
const TrHdr = std.zig.Server.Message.TestResults;
- const tr_hdr = @ptrCast(*align(1) const TrHdr, body);
+ const tr_hdr = @as(*align(1) const TrHdr, @ptrCast(body));
fail_count += @intFromBool(tr_hdr.flags.fail);
skip_count += @intFromBool(tr_hdr.flags.skip);
leak_count += @intFromBool(tr_hdr.flags.leak);
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index e3c5fc20dd..e0bb28569d 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -232,14 +232,14 @@ fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void {
}
var cursor_pos = windows.COORD{
- .X = info.dwCursorPosition.X - @intCast(windows.SHORT, p.columns_written),
+ .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(p.columns_written)),
.Y = info.dwCursorPosition.Y,
};
if (cursor_pos.X < 0)
cursor_pos.X = 0;
- const fill_chars = @intCast(windows.DWORD, info.dwSize.X - cursor_pos.X);
+ const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X));
var written: windows.DWORD = undefined;
if (windows.kernel32.FillConsoleOutputAttribute(
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index f16f8a9a79..a3b469ad6f 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -66,7 +66,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
if (self.getHandle() == std.c.pthread_self()) {
// Set the name of the calling thread (no thread id required).
const err = try os.prctl(.SET_NAME, .{@intFromPtr(name_with_terminator.ptr)});
- switch (@enumFromInt(os.E, err)) {
+ switch (@as(os.E, @enumFromInt(err))) {
.SUCCESS => return,
else => |e| return os.unexpectedErrno(e),
}
@@ -176,7 +176,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
if (self.getHandle() == std.c.pthread_self()) {
// Get the name of the calling thread (no thread id required).
const err = try os.prctl(.GET_NAME, .{@intFromPtr(buffer.ptr)});
- switch (@enumFromInt(os.E, err)) {
+ switch (@as(os.E, @enumFromInt(err))) {
.SUCCESS => return std.mem.sliceTo(buffer, 0),
else => |e| return os.unexpectedErrno(e),
}
@@ -211,7 +211,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
null,
)) {
.SUCCESS => {
- const string = @ptrCast(*const os.windows.UNICODE_STRING, &buf);
+ const string = @as(*const os.windows.UNICODE_STRING, @ptrCast(&buf));
const len = try std.unicode.utf16leToUtf8(buffer, string.Buffer[0 .. string.Length / 2]);
return if (len > 0) buffer[0..len] else null;
},
@@ -510,7 +510,7 @@ const WindowsThreadImpl = struct {
thread: ThreadCompletion,
fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD {
- const self = @ptrCast(*@This(), @alignCast(@alignOf(@This()), raw_ptr));
+ const self: *@This() = @ptrCast(@alignCast(raw_ptr));
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
.running => {},
.completed => unreachable,
@@ -525,7 +525,7 @@ const WindowsThreadImpl = struct {
const alloc_ptr = windows.kernel32.HeapAlloc(heap_handle, 0, alloc_bytes) orelse return error.OutOfMemory;
errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0);
- const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes];
+ const instance_bytes = @as([*]u8, @ptrCast(alloc_ptr))[0..alloc_bytes];
var fba = std.heap.FixedBufferAllocator.init(instance_bytes);
const instance = fba.allocator().create(Instance) catch unreachable;
instance.* = .{
@@ -547,7 +547,7 @@ const WindowsThreadImpl = struct {
null,
stack_size,
Instance.entryFn,
- @ptrCast(*anyopaque, instance),
+ @as(*anyopaque, @ptrCast(instance)),
0,
null,
) orelse {
@@ -596,19 +596,19 @@ const PosixThreadImpl = struct {
return thread_id;
},
.dragonfly => {
- return @bitCast(u32, c.lwp_gettid());
+ return @as(u32, @bitCast(c.lwp_gettid()));
},
.netbsd => {
- return @bitCast(u32, c._lwp_self());
+ return @as(u32, @bitCast(c._lwp_self()));
},
.freebsd => {
- return @bitCast(u32, c.pthread_getthreadid_np());
+ return @as(u32, @bitCast(c.pthread_getthreadid_np()));
},
.openbsd => {
- return @bitCast(u32, c.getthrid());
+ return @as(u32, @bitCast(c.getthrid()));
},
.haiku => {
- return @bitCast(u32, c.find_thread(null));
+ return @as(u32, @bitCast(c.find_thread(null)));
},
else => {
return @intFromPtr(c.pthread_self());
@@ -629,7 +629,7 @@ const PosixThreadImpl = struct {
error.NameTooLong, error.UnknownName => unreachable,
else => |e| return e,
};
- return @intCast(usize, count);
+ return @as(usize, @intCast(count));
},
.solaris => {
// The "proper" way to get the cpu count would be to query
@@ -637,7 +637,7 @@ const PosixThreadImpl = struct {
// cpu.
const rc = c.sysconf(os._SC.NPROCESSORS_ONLN);
return switch (os.errno(rc)) {
- .SUCCESS => @intCast(usize, rc),
+ .SUCCESS => @as(usize, @intCast(rc)),
else => |err| os.unexpectedErrno(err),
};
},
@@ -645,7 +645,7 @@ const PosixThreadImpl = struct {
var system_info: os.system.system_info = undefined;
const rc = os.system.get_system_info(&system_info); // always returns B_OK
return switch (os.errno(rc)) {
- .SUCCESS => @intCast(usize, system_info.cpu_count),
+ .SUCCESS => @as(usize, @intCast(system_info.cpu_count)),
else => |err| os.unexpectedErrno(err),
};
},
@@ -657,7 +657,7 @@ const PosixThreadImpl = struct {
error.NameTooLong, error.UnknownName => unreachable,
else => |e| return e,
};
- return @intCast(usize, count);
+ return @as(usize, @intCast(count));
},
}
}
@@ -675,7 +675,7 @@ const PosixThreadImpl = struct {
return callFn(f, @as(Args, undefined));
}
- const args_ptr = @ptrCast(*Args, @alignCast(@alignOf(Args), raw_arg));
+ const args_ptr: *Args = @ptrCast(@alignCast(raw_arg));
defer allocator.destroy(args_ptr);
return callFn(f, args_ptr.*);
}
@@ -699,7 +699,7 @@ const PosixThreadImpl = struct {
&handle,
&attr,
Instance.entryFn,
- if (@sizeOf(Args) > 1) @ptrCast(*anyopaque, args_ptr) else undefined,
+ if (@sizeOf(Args) > 1) @as(*anyopaque, @ptrCast(args_ptr)) else undefined,
)) {
.SUCCESS => return Impl{ .handle = handle },
.AGAIN => return error.SystemResources,
@@ -742,7 +742,7 @@ const LinuxThreadImpl = struct {
fn getCurrentId() Id {
return tls_thread_id orelse {
- const tid = @bitCast(u32, linux.gettid());
+ const tid = @as(u32, @bitCast(linux.gettid()));
tls_thread_id = tid;
return tid;
};
@@ -911,7 +911,7 @@ const LinuxThreadImpl = struct {
thread: ThreadCompletion,
fn entryFn(raw_arg: usize) callconv(.C) u8 {
- const self = @ptrFromInt(*@This(), raw_arg);
+ const self = @as(*@This(), @ptrFromInt(raw_arg));
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
.running => {},
.completed => unreachable,
@@ -969,7 +969,7 @@ const LinuxThreadImpl = struct {
// map everything but the guard page as read/write
os.mprotect(
- @alignCast(page_size, mapped[guard_offset..]),
+ @alignCast(mapped[guard_offset..]),
os.PROT.READ | os.PROT.WRITE,
) catch |err| switch (err) {
error.AccessDenied => unreachable,
@@ -994,7 +994,7 @@ const LinuxThreadImpl = struct {
};
}
- const instance = @ptrCast(*Instance, @alignCast(@alignOf(Instance), &mapped[instance_offset]));
+ const instance: *Instance = @ptrCast(@alignCast(&mapped[instance_offset]));
instance.* = .{
.fn_args = args,
.thread = .{ .mapped = mapped },
diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig
index 61e39eba27..768442539b 100644
--- a/lib/std/Thread/Futex.zig
+++ b/lib/std/Thread/Futex.zig
@@ -128,14 +128,14 @@ const WindowsImpl = struct {
// NTDLL functions work with time in units of 100 nanoseconds.
// Positive values are absolute deadlines while negative values are relative durations.
if (timeout) |delay| {
- timeout_value = @intCast(os.windows.LARGE_INTEGER, delay / 100);
+ timeout_value = @as(os.windows.LARGE_INTEGER, @intCast(delay / 100));
timeout_value = -timeout_value;
timeout_ptr = &timeout_value;
}
const rc = os.windows.ntdll.RtlWaitOnAddress(
- @ptrCast(?*const anyopaque, ptr),
- @ptrCast(?*const anyopaque, &expect),
+ @as(?*const anyopaque, @ptrCast(ptr)),
+ @as(?*const anyopaque, @ptrCast(&expect)),
@sizeOf(@TypeOf(expect)),
timeout_ptr,
);
@@ -151,7 +151,7 @@ const WindowsImpl = struct {
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
- const address = @ptrCast(?*const anyopaque, ptr);
+ const address = @as(?*const anyopaque, @ptrCast(ptr));
assert(max_waiters != 0);
switch (max_waiters) {
@@ -186,7 +186,7 @@ const DarwinImpl = struct {
// true so that we we know to ignore the ETIMEDOUT result.
var timeout_overflowed = false;
- const addr = @ptrCast(*const anyopaque, ptr);
+ const addr = @as(*const anyopaque, @ptrCast(ptr));
const flags = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO;
const status = blk: {
if (supports_ulock_wait2) {
@@ -202,7 +202,7 @@ const DarwinImpl = struct {
};
if (status >= 0) return;
- switch (@enumFromInt(std.os.E, -status)) {
+ switch (@as(std.os.E, @enumFromInt(-status))) {
// Wait was interrupted by the OS or other spurious signalling.
.INTR => {},
// Address of the futex was paged out. This is unlikely, but possible in theory, and
@@ -225,11 +225,11 @@ const DarwinImpl = struct {
}
while (true) {
- const addr = @ptrCast(*const anyopaque, ptr);
+ const addr = @as(*const anyopaque, @ptrCast(ptr));
const status = os.darwin.__ulock_wake(flags, addr, 0);
if (status >= 0) return;
- switch (@enumFromInt(std.os.E, -status)) {
+ switch (@as(std.os.E, @enumFromInt(-status))) {
.INTR => continue, // spurious wake()
.FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t
.NOENT => return, // nothing was woken up
@@ -245,14 +245,14 @@ const LinuxImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
- ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
- ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
+ ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+ ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.linux.futex_wait(
- @ptrCast(*const i32, &ptr.value),
+ @as(*const i32, @ptrCast(&ptr.value)),
os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAIT,
- @bitCast(i32, expect),
+ @as(i32, @bitCast(expect)),
if (timeout != null) &ts else null,
);
@@ -272,7 +272,7 @@ const LinuxImpl = struct {
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.linux.futex_wake(
- @ptrCast(*const i32, &ptr.value),
+ @as(*const i32, @ptrCast(&ptr.value)),
os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAKE,
std.math.cast(i32, max_waiters) orelse std.math.maxInt(i32),
);
@@ -299,8 +299,8 @@ const FreebsdImpl = struct {
tm._flags = 0; // use relative time not UMTX_ABSTIME
tm._clockid = os.CLOCK.MONOTONIC;
- tm._timeout.tv_sec = @intCast(@TypeOf(tm._timeout.tv_sec), timeout_ns / std.time.ns_per_s);
- tm._timeout.tv_nsec = @intCast(@TypeOf(tm._timeout.tv_nsec), timeout_ns % std.time.ns_per_s);
+ tm._timeout.tv_sec = @as(@TypeOf(tm._timeout.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+ tm._timeout.tv_nsec = @as(@TypeOf(tm._timeout.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.freebsd._umtx_op(
@@ -347,14 +347,14 @@ const OpenbsdImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
- ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
- ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
+ ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+ ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.openbsd.futex(
- @ptrCast(*const volatile u32, &ptr.value),
+ @as(*const volatile u32, @ptrCast(&ptr.value)),
os.openbsd.FUTEX_WAIT | os.openbsd.FUTEX_PRIVATE_FLAG,
- @bitCast(c_int, expect),
+ @as(c_int, @bitCast(expect)),
if (timeout != null) &ts else null,
null, // FUTEX_WAIT takes no requeue address
);
@@ -377,7 +377,7 @@ const OpenbsdImpl = struct {
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.openbsd.futex(
- @ptrCast(*const volatile u32, &ptr.value),
+ @as(*const volatile u32, @ptrCast(&ptr.value)),
os.openbsd.FUTEX_WAKE | os.openbsd.FUTEX_PRIVATE_FLAG,
std.math.cast(c_int, max_waiters) orelse std.math.maxInt(c_int),
null, // FUTEX_WAKE takes no timeout ptr
@@ -411,8 +411,8 @@ const DragonflyImpl = struct {
}
}
- const value = @bitCast(c_int, expect);
- const addr = @ptrCast(*const volatile c_int, &ptr.value);
+ const value = @as(c_int, @bitCast(expect));
+ const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
const rc = os.dragonfly.umtx_sleep(addr, value, timeout_us);
switch (os.errno(rc)) {
@@ -441,7 +441,7 @@ const DragonflyImpl = struct {
// https://man.dragonflybsd.org/?command=umtx&section=2
// > umtx_wakeup() will generally return 0 unless the address is bad.
// We are fine with the address being bad (e.g. for Semaphore.post() where Semaphore.wait() frees the Semaphore)
- const addr = @ptrCast(*const volatile c_int, &ptr.value);
+ const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
_ = os.dragonfly.umtx_wakeup(addr, to_wake);
}
};
@@ -488,8 +488,8 @@ const PosixImpl = struct {
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
os.clock_gettime(os.CLOCK.REALTIME, &ts) catch unreachable;
- ts.tv_sec +|= @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
- ts.tv_nsec += @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
+ ts.tv_sec +|= @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+ ts.tv_nsec += @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
if (ts.tv_nsec >= std.time.ns_per_s) {
ts.tv_sec +|= 1;
diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig
index 9114caaa12..0f618516b5 100644
--- a/lib/std/Thread/Mutex.zig
+++ b/lib/std/Thread/Mutex.zig
@@ -242,12 +242,12 @@ const NonAtomicCounter = struct {
value: [2]u64 = [_]u64{ 0, 0 },
fn get(self: NonAtomicCounter) u128 {
- return @bitCast(u128, self.value);
+ return @as(u128, @bitCast(self.value));
}
fn inc(self: *NonAtomicCounter) void {
- for (@bitCast([2]u64, self.get() + 1), 0..) |v, i| {
- @ptrCast(*volatile u64, &self.value[i]).* = v;
+ for (@as([2]u64, @bitCast(self.get() + 1)), 0..) |v, i| {
+ @as(*volatile u64, @ptrCast(&self.value[i])).* = v;
}
}
};
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index d3ad94324e..df4c95cbca 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -49,7 +49,7 @@ pub fn eqlString(a: []const u8, b: []const u8) bool {
}
pub fn hashString(s: []const u8) u32 {
- return @truncate(u32, std.hash.Wyhash.hash(0, s));
+ return @as(u32, @truncate(std.hash.Wyhash.hash(0, s)));
}
/// Insertion order is preserved.
@@ -617,7 +617,7 @@ pub fn ArrayHashMapUnmanaged(
return .{
.keys = slice.items(.key).ptr,
.values = slice.items(.value).ptr,
- .len = @intCast(u32, slice.len),
+ .len = @as(u32, @intCast(slice.len)),
};
}
pub const Iterator = struct {
@@ -1409,7 +1409,7 @@ pub fn ArrayHashMapUnmanaged(
indexes: []Index(I),
) void {
const slot = self.getSlotByIndex(old_entry_index, ctx, header, I, indexes);
- indexes[slot].entry_index = @intCast(I, new_entry_index);
+ indexes[slot].entry_index = @as(I, @intCast(new_entry_index));
}
fn removeFromIndexByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader) void {
@@ -1508,7 +1508,7 @@ pub fn ArrayHashMapUnmanaged(
const new_index = self.entries.addOneAssumeCapacity();
indexes[slot] = .{
.distance_from_start_index = distance_from_start_index,
- .entry_index = @intCast(I, new_index),
+ .entry_index = @as(I, @intCast(new_index)),
};
// update the hash if applicable
@@ -1549,7 +1549,7 @@ pub fn ArrayHashMapUnmanaged(
const new_index = self.entries.addOneAssumeCapacity();
if (store_hash) hashes_array.ptr[new_index] = h;
indexes[slot] = .{
- .entry_index = @intCast(I, new_index),
+ .entry_index = @as(I, @intCast(new_index)),
.distance_from_start_index = distance_from_start_index,
};
distance_from_start_index = slot_data.distance_from_start_index;
@@ -1639,7 +1639,7 @@ pub fn ArrayHashMapUnmanaged(
const start_index = safeTruncate(usize, h);
const end_index = start_index +% indexes.len;
var index = start_index;
- var entry_index = @intCast(I, i);
+ var entry_index = @as(I, @intCast(i));
var distance_from_start_index: I = 0;
while (index != end_index) : ({
index +%= 1;
@@ -1776,7 +1776,7 @@ fn capacityIndexSize(bit_index: u8) usize {
fn safeTruncate(comptime T: type, val: anytype) T {
if (@bitSizeOf(T) >= @bitSizeOf(@TypeOf(val)))
return val;
- return @truncate(T, val);
+ return @as(T, @truncate(val));
}
/// A single entry in the lookup acceleration structure. These structs
@@ -1852,13 +1852,13 @@ const IndexHeader = struct {
fn constrainIndex(header: IndexHeader, i: usize) usize {
// This is an optimization for modulo of power of two integers;
// it requires `indexes_len` to always be a power of two.
- return @intCast(usize, i & header.mask());
+ return @as(usize, @intCast(i & header.mask()));
}
/// Returns the attached array of indexes. I must match the type
/// returned by capacityIndexType.
fn indexes(header: *IndexHeader, comptime I: type) []Index(I) {
- const start_ptr = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader));
+ const start_ptr: [*]Index(I) = @alignCast(@ptrCast(@as([*]u8, @ptrCast(header)) + @sizeOf(IndexHeader)));
return start_ptr[0..header.length()];
}
@@ -1871,15 +1871,15 @@ const IndexHeader = struct {
return index_capacities[self.bit_index];
}
fn length(self: IndexHeader) usize {
- return @as(usize, 1) << @intCast(math.Log2Int(usize), self.bit_index);
+ return @as(usize, 1) << @as(math.Log2Int(usize), @intCast(self.bit_index));
}
fn mask(self: IndexHeader) u32 {
- return @intCast(u32, self.length() - 1);
+ return @as(u32, @intCast(self.length() - 1));
}
fn findBitIndex(desired_capacity: usize) !u8 {
if (desired_capacity > max_capacity) return error.OutOfMemory;
- var new_bit_index = @intCast(u8, std.math.log2_int_ceil(usize, desired_capacity));
+ var new_bit_index = @as(u8, @intCast(std.math.log2_int_ceil(usize, desired_capacity)));
if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1;
if (new_bit_index < min_bit_index) new_bit_index = min_bit_index;
assert(desired_capacity <= index_capacities[new_bit_index]);
@@ -1889,12 +1889,12 @@ const IndexHeader = struct {
/// Allocates an index header, and fills the entryIndexes array with empty.
/// The distance array contents are undefined.
fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader {
- const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
+ const len = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(new_bit_index));
const index_size = hash_map.capacityIndexSize(new_bit_index);
const nbytes = @sizeOf(IndexHeader) + index_size * len;
const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes);
@memset(bytes[@sizeOf(IndexHeader)..], 0xff);
- const result = @ptrCast(*IndexHeader, bytes.ptr);
+ const result: *IndexHeader = @alignCast(@ptrCast(bytes.ptr));
result.* = .{
.bit_index = new_bit_index,
};
@@ -1904,7 +1904,7 @@ const IndexHeader = struct {
/// Releases the memory for a header and its associated arrays.
fn free(header: *IndexHeader, allocator: Allocator) void {
const index_size = hash_map.capacityIndexSize(header.bit_index);
- const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
+ const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header);
const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
allocator.free(slice);
}
@@ -1912,7 +1912,7 @@ const IndexHeader = struct {
/// Puts an IndexHeader into the state that it would be in after being freshly allocated.
fn reset(header: *IndexHeader) void {
const index_size = hash_map.capacityIndexSize(header.bit_index);
- const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
+ const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header);
const nbytes = @sizeOf(IndexHeader) + header.length() * index_size;
@memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff);
}
@@ -2020,25 +2020,25 @@ test "iterator hash map" {
var count: usize = 0;
while (it.next()) |entry| : (count += 1) {
- buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*;
+ buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*;
}
try testing.expect(count == 3);
try testing.expect(it.next() == null);
for (buffer, 0..) |_, i| {
- try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
+ try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]);
}
it.reset();
count = 0;
while (it.next()) |entry| {
- buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*;
+ buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*;
count += 1;
if (count >= 2) break;
}
for (buffer[0..2], 0..) |_, i| {
- try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
+ try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]);
}
it.reset();
@@ -2336,11 +2336,11 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
fn hash(ctx: Context, key: K) u32 {
_ = ctx;
if (comptime trait.hasUniqueRepresentation(K)) {
- return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key)));
+ return @as(u32, @truncate(Wyhash.hash(0, std.mem.asBytes(&key))));
} else {
var hasher = Wyhash.init(0);
autoHash(&hasher, key);
- return @truncate(u32, hasher.final());
+ return @as(u32, @truncate(hasher.final()));
}
}
}.hash;
@@ -2380,7 +2380,7 @@ pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime str
_ = ctx;
var hasher = Wyhash.init(0);
std.hash.autoHashStrat(&hasher, key, strategy);
- return @truncate(u32, hasher.final());
+ return @as(u32, @truncate(hasher.final()));
}
}.hash;
}
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index c2a2486dfa..8f3458481c 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -1123,19 +1123,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
{
var i: usize = 0;
while (i < 10) : (i += 1) {
- list.append(@intCast(i32, i + 1)) catch unreachable;
+ list.append(@as(i32, @intCast(i + 1))) catch unreachable;
}
}
{
var i: usize = 0;
while (i < 10) : (i += 1) {
- try testing.expect(list.items[i] == @intCast(i32, i + 1));
+ try testing.expect(list.items[i] == @as(i32, @intCast(i + 1)));
}
}
for (list.items, 0..) |v, i| {
- try testing.expect(v == @intCast(i32, i + 1));
+ try testing.expect(v == @as(i32, @intCast(i + 1)));
}
try testing.expect(list.pop() == 10);
@@ -1173,19 +1173,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
{
var i: usize = 0;
while (i < 10) : (i += 1) {
- list.append(a, @intCast(i32, i + 1)) catch unreachable;
+ list.append(a, @as(i32, @intCast(i + 1))) catch unreachable;
}
}
{
var i: usize = 0;
while (i < 10) : (i += 1) {
- try testing.expect(list.items[i] == @intCast(i32, i + 1));
+ try testing.expect(list.items[i] == @as(i32, @intCast(i + 1)));
}
}
for (list.items, 0..) |v, i| {
- try testing.expect(v == @intCast(i32, i + 1));
+ try testing.expect(v == @as(i32, @intCast(i + 1)));
}
try testing.expect(list.pop() == 10);
diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig
index c3f17421f3..b9e1b18f77 100644
--- a/lib/std/atomic/Atomic.zig
+++ b/lib/std/atomic/Atomic.zig
@@ -46,7 +46,7 @@ pub fn Atomic(comptime T: type) type {
extern "c" fn __tsan_release(addr: *anyopaque) void;
};
- const addr = @ptrCast(*anyopaque, self);
+ const addr = @as(*anyopaque, @ptrCast(self));
return switch (ordering) {
.Unordered, .Monotonic => @compileError(@tagName(ordering) ++ " only applies to atomic loads and stores"),
.Acquire => tsan.__tsan_acquire(addr),
@@ -307,7 +307,7 @@ pub fn Atomic(comptime T: type) type {
// TODO: emit appropriate tsan fence if compiling with tsan
_ = ordering;
- return @intCast(u1, old_bit);
+ return @as(u1, @intCast(old_bit));
}
});
};
@@ -392,8 +392,8 @@ test "Atomic.swap" {
try testing.expectEqual(a.load(.SeqCst), true);
var b = Atomic(?*u8).init(null);
- try testing.expectEqual(b.swap(@ptrFromInt(?*u8, @alignOf(u8)), ordering), null);
- try testing.expectEqual(b.load(.SeqCst), @ptrFromInt(?*u8, @alignOf(u8)));
+ try testing.expectEqual(b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), ordering), null);
+ try testing.expectEqual(b.load(.SeqCst), @as(?*u8, @ptrFromInt(@alignOf(u8))));
}
}
@@ -544,7 +544,7 @@ test "Atomic.bitSet" {
var x = Atomic(Int).init(0);
for (0..@bitSizeOf(Int)) |bit_index| {
- const bit = @intCast(std.math.Log2Int(Int), bit_index);
+ const bit = @as(std.math.Log2Int(Int), @intCast(bit_index));
const mask = @as(Int, 1) << bit;
// setting the bit should change the bit
@@ -558,7 +558,7 @@ test "Atomic.bitSet" {
// all the previous bits should have not changed (still be set)
for (0..bit_index) |prev_bit_index| {
- const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+ const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index));
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask != 0);
}
@@ -573,7 +573,7 @@ test "Atomic.bitReset" {
var x = Atomic(Int).init(0);
for (0..@bitSizeOf(Int)) |bit_index| {
- const bit = @intCast(std.math.Log2Int(Int), bit_index);
+ const bit = @as(std.math.Log2Int(Int), @intCast(bit_index));
const mask = @as(Int, 1) << bit;
x.storeUnchecked(x.loadUnchecked() | mask);
@@ -588,7 +588,7 @@ test "Atomic.bitReset" {
// all the previous bits should have not changed (still be reset)
for (0..bit_index) |prev_bit_index| {
- const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+ const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index));
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
}
@@ -603,7 +603,7 @@ test "Atomic.bitToggle" {
var x = Atomic(Int).init(0);
for (0..@bitSizeOf(Int)) |bit_index| {
- const bit = @intCast(std.math.Log2Int(Int), bit_index);
+ const bit = @as(std.math.Log2Int(Int), @intCast(bit_index));
const mask = @as(Int, 1) << bit;
// toggling the bit should change the bit
@@ -617,7 +617,7 @@ test "Atomic.bitToggle" {
// all the previous bits should have not changed (still be toggled back)
for (0..bit_index) |prev_bit_index| {
- const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+ const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index));
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
}
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 70cb293cf4..78eb746347 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -248,7 +248,7 @@ fn startPuts(ctx: *Context) u8 {
const random = prng.random();
while (put_count != 0) : (put_count -= 1) {
std.time.sleep(1); // let the os scheduler be our fuzz
- const x = @bitCast(i32, random.int(u32));
+ const x = @as(i32, @bitCast(random.int(u32)));
const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
node.* = .{
.prev = undefined,
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index 9ad7c76d81..1289217652 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -151,7 +151,7 @@ fn startPuts(ctx: *Context) u8 {
const random = prng.random();
while (put_count != 0) : (put_count -= 1) {
std.time.sleep(1); // let the os scheduler be our fuzz
- const x = @bitCast(i32, random.int(u32));
+ const x = @as(i32, @bitCast(random.int(u32)));
const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
node.* = Stack(i32).Node{
.next = undefined,
diff --git a/lib/std/base64.zig b/lib/std/base64.zig
index 869fa47e5e..16e6aa7e8e 100644
--- a/lib/std/base64.zig
+++ b/lib/std/base64.zig
@@ -108,12 +108,12 @@ pub const Base64Encoder = struct {
acc_len += 8;
while (acc_len >= 6) {
acc_len -= 6;
- dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc >> acc_len))];
+ dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc >> acc_len)))];
out_idx += 1;
}
}
if (acc_len > 0) {
- dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc << 6 - acc_len))];
+ dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc << 6 - acc_len)))];
out_idx += 1;
}
if (encoder.pad_char) |pad_char| {
@@ -144,7 +144,7 @@ pub const Base64Decoder = struct {
assert(!char_in_alphabet[c]);
assert(pad_char == null or c != pad_char.?);
- result.char_to_index[c] = @intCast(u8, i);
+ result.char_to_index[c] = @as(u8, @intCast(i));
char_in_alphabet[c] = true;
}
return result;
@@ -196,7 +196,7 @@ pub const Base64Decoder = struct {
acc_len += 6;
if (acc_len >= 8) {
acc_len -= 8;
- dest[dest_idx] = @truncate(u8, acc >> acc_len);
+ dest[dest_idx] = @as(u8, @truncate(acc >> acc_len));
dest_idx += 1;
}
}
@@ -271,7 +271,7 @@ pub const Base64DecoderWithIgnore = struct {
if (acc_len >= 8) {
if (dest_idx == dest.len) return error.NoSpaceLeft;
acc_len -= 8;
- dest[dest_idx] = @truncate(u8, acc >> acc_len);
+ dest[dest_idx] = @as(u8, @truncate(acc >> acc_len));
dest_idx += 1;
}
}
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
index 4b83e8e057..9e5c707b84 100644
--- a/lib/std/bit_set.zig
+++ b/lib/std/bit_set.zig
@@ -119,19 +119,19 @@ pub fn IntegerBitSet(comptime size: u16) type {
if (range.start == range.end) return;
if (MaskInt == u0) return;
- const start_bit = @intCast(ShiftInt, range.start);
+ const start_bit = @as(ShiftInt, @intCast(range.start));
var mask = std.math.boolMask(MaskInt, true) << start_bit;
if (range.end != bit_length) {
- const end_bit = @intCast(ShiftInt, range.end);
- mask &= std.math.boolMask(MaskInt, true) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit));
+ const end_bit = @as(ShiftInt, @intCast(range.end));
+ mask &= std.math.boolMask(MaskInt, true) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)));
}
self.mask &= ~mask;
mask = std.math.boolMask(MaskInt, value) << start_bit;
if (range.end != bit_length) {
- const end_bit = @intCast(ShiftInt, range.end);
- mask &= std.math.boolMask(MaskInt, value) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit));
+ const end_bit = @as(ShiftInt, @intCast(range.end));
+ mask &= std.math.boolMask(MaskInt, value) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)));
}
self.mask |= mask;
}
@@ -292,7 +292,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
.reverse => {
const leading_zeroes = @clz(self.bits_remain);
const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
- self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+ self.bits_remain &= (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1;
return top_bit;
},
}
@@ -302,11 +302,11 @@ pub fn IntegerBitSet(comptime size: u16) type {
fn maskBit(index: usize) MaskInt {
if (MaskInt == u0) return 0;
- return @as(MaskInt, 1) << @intCast(ShiftInt, index);
+ return @as(MaskInt, 1) << @as(ShiftInt, @intCast(index));
}
fn boolMaskBit(index: usize, value: bool) MaskInt {
if (MaskInt == u0) return 0;
- return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index);
+ return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index));
}
};
}
@@ -442,10 +442,10 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
if (num_masks == 0) return;
const start_mask_index = maskIndex(range.start);
- const start_bit = @truncate(ShiftInt, range.start);
+ const start_bit = @as(ShiftInt, @truncate(range.start));
const end_mask_index = maskIndex(range.end);
- const end_bit = @truncate(ShiftInt, range.end);
+ const end_bit = @as(ShiftInt, @truncate(range.end));
if (start_mask_index == end_mask_index) {
var mask1 = std.math.boolMask(MaskInt, true) << start_bit;
@@ -634,13 +634,13 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
}
fn maskBit(index: usize) MaskInt {
- return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+ return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index));
}
fn maskIndex(index: usize) usize {
return index >> @bitSizeOf(ShiftInt);
}
fn boolMaskBit(index: usize, value: bool) MaskInt {
- return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index);
+ return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index));
}
};
}
@@ -731,7 +731,7 @@ pub const DynamicBitSetUnmanaged = struct {
// set the padding bits in the old last item to 1
if (fill and old_masks > 0) {
const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len;
- const old_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, old_padding_bits);
+ const old_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(old_padding_bits));
self.masks[old_masks - 1] |= ~old_mask;
}
@@ -745,7 +745,7 @@ pub const DynamicBitSetUnmanaged = struct {
// Zero out the padding bits
if (new_len > 0) {
const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len;
- const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits));
self.masks[new_masks - 1] &= last_item_mask;
}
@@ -816,10 +816,10 @@ pub const DynamicBitSetUnmanaged = struct {
if (range.start == range.end) return;
const start_mask_index = maskIndex(range.start);
- const start_bit = @truncate(ShiftInt, range.start);
+ const start_bit = @as(ShiftInt, @truncate(range.start));
const end_mask_index = maskIndex(range.end);
- const end_bit = @truncate(ShiftInt, range.end);
+ const end_bit = @as(ShiftInt, @truncate(range.end));
if (start_mask_index == end_mask_index) {
var mask1 = std.math.boolMask(MaskInt, true) << start_bit;
@@ -887,7 +887,7 @@ pub const DynamicBitSetUnmanaged = struct {
}
const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length;
- const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits));
self.masks[num_masks - 1] &= last_item_mask;
}
@@ -996,7 +996,7 @@ pub const DynamicBitSetUnmanaged = struct {
pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) {
const num_masks = numMasks(self.bit_length);
const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length;
- const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits));
return Iterator(options).init(self.masks[0..num_masks], last_item_mask);
}
@@ -1005,13 +1005,13 @@ pub const DynamicBitSetUnmanaged = struct {
}
fn maskBit(index: usize) MaskInt {
- return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+ return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index));
}
fn maskIndex(index: usize) usize {
return index >> @bitSizeOf(ShiftInt);
}
fn boolMaskBit(index: usize, value: bool) MaskInt {
- return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index);
+ return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index));
}
fn numMasks(bit_length: usize) usize {
return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt);
@@ -1255,7 +1255,7 @@ fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) typ
.reverse => {
const leading_zeroes = @clz(self.bits_remain);
const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
- const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+ const no_top_bit_mask = (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1;
self.bits_remain &= no_top_bit_mask;
return top_bit + self.bit_offset;
},
diff --git a/lib/std/bounded_array.zig b/lib/std/bounded_array.zig
index 0e0b601af6..6986414a24 100644
--- a/lib/std/bounded_array.zig
+++ b/lib/std/bounded_array.zig
@@ -394,7 +394,7 @@ test "BoundedArrayAligned" {
try a.append(255);
try a.append(255);
- const b = @ptrCast(*const [2]u16, a.constSlice().ptr);
+ const b = @as(*const [2]u16, @ptrCast(a.constSlice().ptr));
try testing.expectEqual(@as(u16, 0), b[0]);
try testing.expectEqual(@as(u16, 65535), b[1]);
}
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 54781e4465..99761b146d 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -784,7 +784,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
exit_size.* = 256;
- return @ptrCast([*:0]u16, utf16.ptr);
+ return @as([*:0]u16, @ptrCast(utf16.ptr));
}
};
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 3b4bfef826..149f3ab7e1 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -113,7 +113,7 @@ pub usingnamespace switch (builtin.os.tag) {
pub fn getErrno(rc: anytype) c.E {
if (rc == -1) {
- return @enumFromInt(c.E, c._errno().*);
+ return @as(c.E, @enumFromInt(c._errno().*));
} else {
return .SUCCESS;
}
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index 6dd517eada..0f60c2f841 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -1177,10 +1177,10 @@ pub const sigset_t = u32;
pub const empty_sigset: sigset_t = 0;
pub const SIG = struct {
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
- pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 5);
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+ pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(5));
/// block specified signal set
pub const _BLOCK = 1;
@@ -1411,7 +1411,7 @@ pub const MAP = struct {
pub const NOCACHE = 0x0400;
/// don't reserve needed swap area
pub const NORESERVE = 0x0040;
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
};
pub const MSF = struct {
@@ -1879,7 +1879,7 @@ pub const W = struct {
pub const UNTRACED = 0x00000002;
pub fn EXITSTATUS(x: u32) u8 {
- return @intCast(u8, x >> 8);
+ return @as(u8, @intCast(x >> 8));
}
pub fn TERMSIG(x: u32) u32 {
return status(x);
@@ -2463,7 +2463,7 @@ pub const KernE = enum(u32) {
pub const mach_msg_return_t = kern_return_t;
pub fn getMachMsgError(err: mach_msg_return_t) MachMsgE {
- return @enumFromInt(MachMsgE, @truncate(u32, @intCast(usize, err)));
+ return @as(MachMsgE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err))))));
}
/// All special error code bits defined below.
@@ -2665,10 +2665,10 @@ pub const RTLD = struct {
pub const NODELETE = 0x80;
pub const FIRST = 0x100;
- pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
- pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
- pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
- pub const MAIN_ONLY = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -5)));
+ pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+ pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+ pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
+ pub const MAIN_ONLY = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -5)))));
};
pub const F = struct {
@@ -3238,14 +3238,14 @@ pub const PosixSpawn = struct {
pub fn get(self: Attr) Error!u16 {
var flags: c_short = undefined;
switch (errno(posix_spawnattr_getflags(&self.attr, &flags))) {
- .SUCCESS => return @bitCast(u16, flags),
+ .SUCCESS => return @as(u16, @bitCast(flags)),
.INVAL => unreachable,
else => |err| return unexpectedErrno(err),
}
}
pub fn set(self: *Attr, flags: u16) Error!void {
- switch (errno(posix_spawnattr_setflags(&self.attr, @bitCast(c_short, flags)))) {
+ switch (errno(posix_spawnattr_setflags(&self.attr, @as(c_short, @bitCast(flags))))) {
.SUCCESS => return,
.INVAL => unreachable,
else => |err| return unexpectedErrno(err),
@@ -3281,7 +3281,7 @@ pub const PosixSpawn = struct {
}
pub fn openZ(self: *Actions, fd: fd_t, path: [*:0]const u8, flags: u32, mode: mode_t) Error!void {
- switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @bitCast(c_int, flags), mode))) {
+ switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @as(c_int, @bitCast(flags)), mode))) {
.SUCCESS => return,
.BADF => return error.InvalidFileDescriptor,
.NOMEM => return error.SystemResources,
@@ -3402,11 +3402,11 @@ pub const PosixSpawn = struct {
pub fn waitpid(pid: pid_t, flags: u32) Error!std.os.WaitPidResult {
var status: c_int = undefined;
while (true) {
- const rc = waitpid(pid, &status, @intCast(c_int, flags));
+ const rc = waitpid(pid, &status, @as(c_int, @intCast(flags)));
switch (errno(rc)) {
.SUCCESS => return std.os.WaitPidResult{
- .pid = @intCast(pid_t, rc),
- .status = @bitCast(u32, status),
+ .pid = @as(pid_t, @intCast(rc)),
+ .status = @as(u32, @bitCast(status)),
},
.INTR => continue,
.CHILD => return error.ChildExecFailed,
@@ -3418,7 +3418,7 @@ pub const PosixSpawn = struct {
};
pub fn getKernError(err: kern_return_t) KernE {
- return @enumFromInt(KernE, @truncate(u32, @intCast(usize, err)));
+ return @as(KernE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err))))));
}
pub fn unexpectedKernError(err: KernE) std.os.UnexpectedError {
@@ -3585,9 +3585,9 @@ pub const MachTask = extern struct {
.top => VM_REGION_TOP_INFO,
},
switch (tag) {
- .basic => @ptrCast(vm_region_info_t, &info.info.basic),
- .extended => @ptrCast(vm_region_info_t, &info.info.extended),
- .top => @ptrCast(vm_region_info_t, &info.info.top),
+ .basic => @as(vm_region_info_t, @ptrCast(&info.info.basic)),
+ .extended => @as(vm_region_info_t, @ptrCast(&info.info.extended)),
+ .top => @as(vm_region_info_t, @ptrCast(&info.info.top)),
},
&count,
&objname,
@@ -3640,8 +3640,8 @@ pub const MachTask = extern struct {
&base_len,
&nesting,
switch (tag) {
- .short => @ptrCast(vm_region_recurse_info_t, &info.info.short),
- .full => @ptrCast(vm_region_recurse_info_t, &info.info.full),
+ .short => @as(vm_region_recurse_info_t, @ptrCast(&info.info.short)),
+ .full => @as(vm_region_recurse_info_t, @ptrCast(&info.info.full)),
},
&count,
))) {
@@ -3701,7 +3701,7 @@ pub const MachTask = extern struct {
task.port,
curr_addr,
@intFromPtr(out_buf.ptr),
- @intCast(mach_msg_type_number_t, curr_size),
+ @as(mach_msg_type_number_t, @intCast(curr_size)),
))) {
.SUCCESS => {},
.FAILURE => return error.PermissionDenied,
@@ -3752,7 +3752,7 @@ pub const MachTask = extern struct {
else => |err| return unexpectedKernError(err),
}
- @memcpy(out_buf[0..curr_bytes_read], @ptrFromInt([*]const u8, vm_memory));
+ @memcpy(out_buf[0..curr_bytes_read], @as([*]const u8, @ptrFromInt(vm_memory)));
_ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read);
out_buf = out_buf[curr_bytes_read..];
@@ -3782,10 +3782,10 @@ pub const MachTask = extern struct {
switch (getKernError(task_info(
task.port,
TASK_VM_INFO,
- @ptrCast(task_info_t, &vm_info),
+ @as(task_info_t, @ptrCast(&vm_info)),
&info_count,
))) {
- .SUCCESS => return @intCast(usize, vm_info.page_size),
+ .SUCCESS => return @as(usize, @intCast(vm_info.page_size)),
else => {},
}
}
@@ -3802,7 +3802,7 @@ pub const MachTask = extern struct {
switch (getKernError(task_info(
task.port,
MACH_TASK_BASIC_INFO,
- @ptrCast(task_info_t, &info),
+ @as(task_info_t, @ptrCast(&info)),
&count,
))) {
.SUCCESS => return info,
@@ -3832,7 +3832,7 @@ pub const MachTask = extern struct {
_ = vm_deallocate(
self_task.port,
@intFromPtr(list.buf.ptr),
- @intCast(vm_size_t, list.buf.len * @sizeOf(mach_port_t)),
+ @as(vm_size_t, @intCast(list.buf.len * @sizeOf(mach_port_t))),
);
}
};
@@ -3841,7 +3841,7 @@ pub const MachTask = extern struct {
var thread_list: mach_port_array_t = undefined;
var thread_count: mach_msg_type_number_t = undefined;
switch (getKernError(task_threads(task.port, &thread_list, &thread_count))) {
- .SUCCESS => return ThreadList{ .buf = @ptrCast([*]MachThread, thread_list)[0..thread_count] },
+ .SUCCESS => return ThreadList{ .buf = @as([*]MachThread, @ptrCast(thread_list))[0..thread_count] },
else => |err| return unexpectedKernError(err),
}
}
@@ -3860,7 +3860,7 @@ pub const MachThread = extern struct {
switch (getKernError(thread_info(
thread.port,
THREAD_BASIC_INFO,
- @ptrCast(thread_info_t, &info),
+ @as(thread_info_t, @ptrCast(&info)),
&count,
))) {
.SUCCESS => return info,
@@ -3874,7 +3874,7 @@ pub const MachThread = extern struct {
switch (getKernError(thread_info(
thread.port,
THREAD_IDENTIFIER_INFO,
- @ptrCast(thread_info_t, &info),
+ @as(thread_info_t, @ptrCast(&info)),
&count,
))) {
.SUCCESS => return info,
@@ -3962,7 +3962,7 @@ pub const thread_affinity_policy_t = [*]thread_affinity_policy;
pub const THREAD_AFFINITY = struct {
pub const POLICY = 0;
- pub const POLICY_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t));
+ pub const POLICY_COUNT = @as(mach_msg_type_number_t, @intCast(@sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t)));
};
/// cpu affinity api
@@ -4041,7 +4041,7 @@ pub const host_preferred_user_arch_data_t = host_preferred_user_arch;
pub const host_preferred_user_arch_t = *host_preferred_user_arch;
fn HostCount(comptime HT: type) mach_msg_type_number_t {
- return @intCast(mach_msg_type_number_t, @sizeOf(HT) / @sizeOf(integer_t));
+ return @as(mach_msg_type_number_t, @intCast(@sizeOf(HT) / @sizeOf(integer_t)));
}
pub const HOST = struct {
diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig
index 912bb99056..6782aa098a 100644
--- a/lib/std/c/dragonfly.zig
+++ b/lib/std/c/dragonfly.zig
@@ -172,7 +172,7 @@ pub const PROT = struct {
pub const MAP = struct {
pub const FILE = 0;
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
pub const ANONYMOUS = ANON;
pub const COPY = PRIVATE;
pub const SHARED = 1;
@@ -208,7 +208,7 @@ pub const W = struct {
pub const TRAPPED = 0x0020;
pub fn EXITSTATUS(s: u32) u8 {
- return @intCast(u8, (s & 0xff00) >> 8);
+ return @as(u8, @intCast((s & 0xff00) >> 8));
}
pub fn TERMSIG(s: u32) u32 {
return s & 0x7f;
@@ -220,7 +220,7 @@ pub const W = struct {
return TERMSIG(s) == 0;
}
pub fn IFSTOPPED(s: u32) bool {
- return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00;
+ return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00;
}
pub fn IFSIGNALED(s: u32) bool {
return (s & 0xffff) -% 1 < 0xff;
@@ -620,9 +620,9 @@ pub const S = struct {
pub const BADSIG = SIG.ERR;
pub const SIG = struct {
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
pub const BLOCK = 1;
pub const UNBLOCK = 2;
@@ -871,10 +871,10 @@ pub const RTLD = struct {
pub const NODELETE = 0x01000;
pub const NOLOAD = 0x02000;
- pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
- pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
- pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
- pub const ALL = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4)));
+ pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+ pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+ pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
+ pub const ALL = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4)))));
};
pub const dl_phdr_info = extern struct {
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index 7a265ac2b3..deec41493d 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -20,11 +20,11 @@ fn __BIT_COUNT(bits: []const c_long) c_long {
fn __BIT_MASK(s: usize) c_long {
var x = s % CPU_SETSIZE;
- return @bitCast(c_long, @intCast(c_ulong, 1) << @intCast(u6, x));
+ return @as(c_long, @bitCast(@as(c_ulong, @intCast(1)) << @as(u6, @intCast(x))));
}
pub fn CPU_COUNT(set: cpuset_t) c_int {
- return @intCast(c_int, __BIT_COUNT(set.__bits[0..]));
+ return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..])));
}
pub fn CPU_ZERO(set: *cpuset_t) void {
@@ -529,7 +529,7 @@ pub const cap_rights_t = extern struct {
pub const CAP = struct {
pub fn RIGHT(idx: u6, bit: u64) u64 {
- return (@intCast(u64, 1) << (57 + idx)) | bit;
+ return (@as(u64, @intCast(1)) << (57 + idx)) | bit;
}
pub const READ = CAP.RIGHT(0, 0x0000000000000001);
pub const WRITE = CAP.RIGHT(0, 0x0000000000000002);
@@ -961,7 +961,7 @@ pub const CLOCK = struct {
};
pub const MAP = struct {
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
pub const SHARED = 0x0001;
pub const PRIVATE = 0x0002;
pub const FIXED = 0x0010;
@@ -1013,7 +1013,7 @@ pub const W = struct {
pub const TRAPPED = 32;
pub fn EXITSTATUS(s: u32) u8 {
- return @intCast(u8, (s & 0xff00) >> 8);
+ return @as(u8, @intCast((s & 0xff00) >> 8));
}
pub fn TERMSIG(s: u32) u32 {
return s & 0x7f;
@@ -1025,7 +1025,7 @@ pub const W = struct {
return TERMSIG(s) == 0;
}
pub fn IFSTOPPED(s: u32) bool {
- return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00;
+ return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00;
}
pub fn IFSIGNALED(s: u32) bool {
return (s & 0xffff) -% 1 < 0xff;
@@ -1086,9 +1086,9 @@ pub const SIG = struct {
pub const UNBLOCK = 2;
pub const SETMASK = 3;
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
pub const WORDS = 4;
pub const MAXSIG = 128;
@@ -2626,7 +2626,7 @@ pub const domainset_t = extern struct {
};
pub fn DOMAINSET_COUNT(set: domainset_t) c_int {
- return @intCast(c_int, __BIT_COUNT(set.__bits[0..]));
+ return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..])));
}
pub const domainset = extern struct {
@@ -2650,7 +2650,7 @@ const ioctl_cmd = enum(u32) {
};
fn ioImpl(cmd: ioctl_cmd, op: u8, nr: u8, comptime IT: type) u32 {
- return @bitCast(u32, @intFromEnum(cmd) | @intCast(u32, @truncate(u8, @sizeOf(IT))) << 16 | @intCast(u32, op) << 8 | nr);
+ return @as(u32, @bitCast(@intFromEnum(cmd) | @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IT))))) << 16 | @as(u32, @intCast(op)) << 8 | nr));
}
pub fn IO(op: u8, nr: u8) u32 {
diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig
index 2f9917a0f3..c47ceeb003 100644
--- a/lib/std/c/haiku.zig
+++ b/lib/std/c/haiku.zig
@@ -414,7 +414,7 @@ pub const CLOCK = struct {
pub const MAP = struct {
/// mmap() error return code
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
/// changes are seen by others
pub const SHARED = 0x01;
/// changes are only seen by caller
@@ -443,7 +443,7 @@ pub const W = struct {
pub const NOWAIT = 0x20;
pub fn EXITSTATUS(s: u32) u8 {
- return @intCast(u8, s & 0xff);
+ return @as(u8, @intCast(s & 0xff));
}
pub fn TERMSIG(s: u32) u32 {
@@ -481,9 +481,9 @@ pub const SA = struct {
};
pub const SIG = struct {
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
pub const HUP = 1;
pub const INT = 2;
diff --git a/lib/std/c/linux.zig b/lib/std/c/linux.zig
index d3a3bfdeba..ddc488e115 100644
--- a/lib/std/c/linux.zig
+++ b/lib/std/c/linux.zig
@@ -32,7 +32,7 @@ pub const MADV = linux.MADV;
pub const MAP = struct {
pub usingnamespace linux.MAP;
/// Only used by libc to communicate failure.
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
};
pub const MSF = linux.MSF;
pub const MMAP2_UNIT = linux.MMAP2_UNIT;
diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig
index 2c7c236ed0..1fc0784287 100644
--- a/lib/std/c/netbsd.zig
+++ b/lib/std/c/netbsd.zig
@@ -172,9 +172,9 @@ pub const RTLD = struct {
pub const NODELETE = 0x01000;
pub const NOLOAD = 0x02000;
- pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
- pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
- pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
+ pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+ pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+ pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
};
pub const dl_phdr_info = extern struct {
@@ -597,7 +597,7 @@ pub const CLOCK = struct {
};
pub const MAP = struct {
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
pub const SHARED = 0x0001;
pub const PRIVATE = 0x0002;
pub const REMAPDUP = 0x0004;
@@ -653,7 +653,7 @@ pub const W = struct {
pub const TRAPPED = 0x00000040;
pub fn EXITSTATUS(s: u32) u8 {
- return @intCast(u8, (s >> 8) & 0xff);
+ return @as(u8, @intCast((s >> 8) & 0xff));
}
pub fn TERMSIG(s: u32) u32 {
return s & 0x7f;
@@ -1106,9 +1106,9 @@ pub const winsize = extern struct {
const NSIG = 32;
pub const SIG = struct {
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
pub const WORDS = 4;
pub const MAXSIG = 128;
diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig
index 47c1aec862..06085903e4 100644
--- a/lib/std/c/openbsd.zig
+++ b/lib/std/c/openbsd.zig
@@ -449,7 +449,7 @@ pub const CLOCK = struct {
};
pub const MAP = struct {
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
pub const SHARED = 0x0001;
pub const PRIVATE = 0x0002;
pub const FIXED = 0x0010;
@@ -488,7 +488,7 @@ pub const W = struct {
pub const CONTINUED = 8;
pub fn EXITSTATUS(s: u32) u8 {
- return @intCast(u8, (s >> 8) & 0xff);
+ return @as(u8, @intCast((s >> 8) & 0xff));
}
pub fn TERMSIG(s: u32) u32 {
return (s & 0x7f);
@@ -1000,11 +1000,11 @@ pub const winsize = extern struct {
const NSIG = 33;
pub const SIG = struct {
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
- pub const CATCH = @ptrFromInt(?Sigaction.handler_fn, 2);
- pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 3);
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+ pub const CATCH = @as(?Sigaction.handler_fn, @ptrFromInt(2));
+ pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(3));
pub const HUP = 1;
pub const INT = 2;
diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig
index 511bf9ccc5..cbca1805bb 100644
--- a/lib/std/c/solaris.zig
+++ b/lib/std/c/solaris.zig
@@ -111,10 +111,10 @@ pub const RTLD = struct {
pub const FIRST = 0x02000;
pub const CONFGEN = 0x10000;
- pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
- pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
- pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
- pub const PROBE = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4)));
+ pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+ pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+ pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
+ pub const PROBE = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4)))));
};
pub const Flock = extern struct {
@@ -524,7 +524,7 @@ pub const CLOCK = struct {
};
pub const MAP = struct {
- pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+ pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
pub const SHARED = 0x0001;
pub const PRIVATE = 0x0002;
pub const TYPE = 0x000f;
@@ -583,7 +583,7 @@ pub const W = struct {
pub const NOWAIT = 0o200;
pub fn EXITSTATUS(s: u32) u8 {
- return @intCast(u8, (s >> 8) & 0xff);
+ return @as(u8, @intCast((s >> 8) & 0xff));
}
pub fn TERMSIG(s: u32) u32 {
return s & 0x7f;
@@ -886,10 +886,10 @@ pub const winsize = extern struct {
const NSIG = 75;
pub const SIG = struct {
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
- pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 2);
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+ pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(2));
pub const WORDS = 4;
pub const MAXSIG = 75;
@@ -1441,7 +1441,7 @@ pub const AT = struct {
/// Magic value that specify the use of the current working directory
/// to determine the target of relative file paths in the openat() and
/// similar syscalls.
- pub const FDCWD = @bitCast(fd_t, @as(u32, 0xffd19553));
+ pub const FDCWD = @as(fd_t, @bitCast(@as(u32, 0xffd19553)));
/// Do not follow symbolic links
pub const SYMLINK_NOFOLLOW = 0x1000;
@@ -1907,9 +1907,9 @@ const IoCtlCommand = enum(u32) {
};
fn ioImpl(cmd: IoCtlCommand, io_type: u8, nr: u8, comptime IOT: type) i32 {
- const size = @intCast(u32, @truncate(u8, @sizeOf(IOT))) << 16;
- const t = @intCast(u32, io_type) << 8;
- return @bitCast(i32, @intFromEnum(cmd) | size | t | nr);
+ const size = @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IOT))))) << 16;
+ const t = @as(u32, @intCast(io_type)) << 8;
+ return @as(i32, @bitCast(@intFromEnum(cmd) | size | t | nr));
}
pub fn IO(io_type: u8, nr: u8) i32 {
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 636ef7f4d7..9f4d75084f 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -93,7 +93,7 @@ pub const ChildProcess = struct {
switch (builtin.os.tag) {
.linux => {
if (rus.rusage) |ru| {
- return @intCast(usize, ru.maxrss) * 1024;
+ return @as(usize, @intCast(ru.maxrss)) * 1024;
} else {
return null;
}
@@ -108,7 +108,7 @@ pub const ChildProcess = struct {
.macos, .ios => {
if (rus.rusage) |ru| {
// Darwin oddly reports in bytes instead of kilobytes.
- return @intCast(usize, ru.maxrss);
+ return @as(usize, @intCast(ru.maxrss));
} else {
return null;
}
@@ -376,7 +376,7 @@ pub const ChildProcess = struct {
if (windows.kernel32.GetExitCodeProcess(self.id, &exit_code) == 0) {
break :x Term{ .Unknown = 0 };
} else {
- break :x Term{ .Exited = @truncate(u8, exit_code) };
+ break :x Term{ .Exited = @as(u8, @truncate(exit_code)) };
}
});
@@ -449,7 +449,7 @@ pub const ChildProcess = struct {
// has a value greater than 0
if ((fd[0].revents & std.os.POLL.IN) != 0) {
const err_int = try readIntFd(err_pipe[0]);
- return @errSetCast(SpawnError, @errorFromInt(err_int));
+ return @as(SpawnError, @errSetCast(@errorFromInt(err_int)));
}
} else {
// Write maxInt(ErrInt) to the write end of the err_pipe. This is after
@@ -462,7 +462,7 @@ pub const ChildProcess = struct {
// Here we potentially return the fork child's error from the parent
// pid.
if (err_int != maxInt(ErrInt)) {
- return @errSetCast(SpawnError, @errorFromInt(err_int));
+ return @as(SpawnError, @errSetCast(@errorFromInt(err_int)));
}
}
}
@@ -542,7 +542,7 @@ pub const ChildProcess = struct {
} else if (builtin.output_mode == .Exe) {
// Then we have Zig start code and this works.
// TODO type-safety for null-termination of `os.environ`.
- break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr);
+ break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr));
} else {
// TODO come up with a solution for this.
@compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");
@@ -605,7 +605,7 @@ pub const ChildProcess = struct {
}
// we are the parent
- const pid = @intCast(i32, pid_result);
+ const pid = @as(i32, @intCast(pid_result));
if (self.stdin_behavior == StdIo.Pipe) {
self.stdin = File{ .handle = stdin_pipe[1] };
} else {
@@ -1015,11 +1015,11 @@ fn windowsCreateProcessPathExt(
else => return windows.unexpectedStatus(rc),
}
- const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf);
+ const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf));
if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) {
break :found_name null;
}
- break :found_name @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2];
+ break :found_name @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2];
};
const unappended_err = unappended: {
@@ -1104,7 +1104,7 @@ fn windowsCreateProcessPathExt(
else => return windows.unexpectedStatus(rc),
}
- const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf);
+ const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf));
// Skip directories
if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) continue;
@@ -1164,7 +1164,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1
null,
windows.TRUE,
windows.CREATE_UNICODE_ENVIRONMENT,
- @ptrCast(?*anyopaque, envp_ptr),
+ @as(?*anyopaque, @ptrCast(envp_ptr)),
cwd_ptr,
lpStartupInfo,
lpProcessInformation,
@@ -1376,7 +1376,7 @@ fn writeIntFd(fd: i32, value: ErrInt) !void {
.capable_io_mode = .blocking,
.intended_io_mode = .blocking,
};
- file.writer().writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources;
+ file.writer().writeIntNative(u64, @as(u64, @intCast(value))) catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
@@ -1385,7 +1385,7 @@ fn readIntFd(fd: i32) !ErrInt {
.capable_io_mode = .blocking,
.intended_io_mode = .blocking,
};
- return @intCast(ErrInt, file.reader().readIntNative(u64) catch return error.SystemResources);
+ return @as(ErrInt, @intCast(file.reader().readIntNative(u64) catch return error.SystemResources));
}
/// Caller must free result.
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index d28e54b94c..a08c2c514d 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -457,12 +457,12 @@ pub const ImportLookupEntry32 = struct {
pub fn getImportByName(raw: u32) ?ByName {
if (mask & raw != 0) return null;
- return @bitCast(ByName, raw);
+ return @as(ByName, @bitCast(raw));
}
pub fn getImportByOrdinal(raw: u32) ?ByOrdinal {
if (mask & raw == 0) return null;
- return @bitCast(ByOrdinal, raw);
+ return @as(ByOrdinal, @bitCast(raw));
}
};
@@ -483,12 +483,12 @@ pub const ImportLookupEntry64 = struct {
pub fn getImportByName(raw: u64) ?ByName {
if (mask & raw != 0) return null;
- return @bitCast(ByName, raw);
+ return @as(ByName, @bitCast(raw));
}
pub fn getImportByOrdinal(raw: u64) ?ByOrdinal {
if (mask & raw == 0) return null;
- return @bitCast(ByOrdinal, raw);
+ return @as(ByOrdinal, @bitCast(raw));
}
};
@@ -1146,25 +1146,25 @@ pub const Coff = struct {
}
pub fn getCoffHeader(self: Coff) CoffHeader {
- return @ptrCast(*align(1) const CoffHeader, self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)]).*;
+ return @as(*align(1) const CoffHeader, @ptrCast(self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)])).*;
}
pub fn getOptionalHeader(self: Coff) OptionalHeader {
assert(self.is_image);
const offset = self.coff_header_offset + @sizeOf(CoffHeader);
- return @ptrCast(*align(1) const OptionalHeader, self.data[offset..][0..@sizeOf(OptionalHeader)]).*;
+ return @as(*align(1) const OptionalHeader, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeader)])).*;
}
pub fn getOptionalHeader32(self: Coff) OptionalHeaderPE32 {
assert(self.is_image);
const offset = self.coff_header_offset + @sizeOf(CoffHeader);
- return @ptrCast(*align(1) const OptionalHeaderPE32, self.data[offset..][0..@sizeOf(OptionalHeaderPE32)]).*;
+ return @as(*align(1) const OptionalHeaderPE32, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE32)])).*;
}
pub fn getOptionalHeader64(self: Coff) OptionalHeaderPE64 {
assert(self.is_image);
const offset = self.coff_header_offset + @sizeOf(CoffHeader);
- return @ptrCast(*align(1) const OptionalHeaderPE64, self.data[offset..][0..@sizeOf(OptionalHeaderPE64)]).*;
+ return @as(*align(1) const OptionalHeaderPE64, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE64)])).*;
}
pub fn getImageBase(self: Coff) u64 {
@@ -1193,7 +1193,7 @@ pub const Coff = struct {
else => unreachable, // We assume we have validated the header already
};
const offset = self.coff_header_offset + @sizeOf(CoffHeader) + size;
- return @ptrCast([*]align(1) const ImageDataDirectory, self.data[offset..])[0..self.getNumberOfDataDirectories()];
+ return @as([*]align(1) const ImageDataDirectory, @ptrCast(self.data[offset..]))[0..self.getNumberOfDataDirectories()];
}
pub fn getSymtab(self: *const Coff) ?Symtab {
@@ -1217,7 +1217,7 @@ pub const Coff = struct {
pub fn getSectionHeaders(self: *const Coff) []align(1) const SectionHeader {
const coff_header = self.getCoffHeader();
const offset = self.coff_header_offset + @sizeOf(CoffHeader) + coff_header.size_of_optional_header;
- return @ptrCast([*]align(1) const SectionHeader, self.data.ptr + offset)[0..coff_header.number_of_sections];
+ return @as([*]align(1) const SectionHeader, @ptrCast(self.data.ptr + offset))[0..coff_header.number_of_sections];
}
pub fn getSectionHeadersAlloc(self: *const Coff, allocator: mem.Allocator) ![]SectionHeader {
@@ -1303,9 +1303,9 @@ pub const Symtab = struct {
return .{
.name = raw[0..8].*,
.value = mem.readIntLittle(u32, raw[8..12]),
- .section_number = @enumFromInt(SectionNumber, mem.readIntLittle(u16, raw[12..14])),
- .type = @bitCast(SymType, mem.readIntLittle(u16, raw[14..16])),
- .storage_class = @enumFromInt(StorageClass, raw[16]),
+ .section_number = @as(SectionNumber, @enumFromInt(mem.readIntLittle(u16, raw[12..14]))),
+ .type = @as(SymType, @bitCast(mem.readIntLittle(u16, raw[14..16]))),
+ .storage_class = @as(StorageClass, @enumFromInt(raw[16])),
.number_of_aux_symbols = raw[17],
};
}
@@ -1333,7 +1333,7 @@ pub const Symtab = struct {
fn asWeakExtDef(raw: []const u8) WeakExternalDefinition {
return .{
.tag_index = mem.readIntLittle(u32, raw[0..4]),
- .flag = @enumFromInt(WeakExternalFlag, mem.readIntLittle(u32, raw[4..8])),
+ .flag = @as(WeakExternalFlag, @enumFromInt(mem.readIntLittle(u32, raw[4..8]))),
.unused = raw[8..18].*,
};
}
@@ -1351,7 +1351,7 @@ pub const Symtab = struct {
.number_of_linenumbers = mem.readIntLittle(u16, raw[6..8]),
.checksum = mem.readIntLittle(u32, raw[8..12]),
.number = mem.readIntLittle(u16, raw[12..14]),
- .selection = @enumFromInt(ComdatSelection, raw[14]),
+ .selection = @as(ComdatSelection, @enumFromInt(raw[14])),
.unused = raw[15..18].*,
};
}
@@ -1384,6 +1384,6 @@ pub const Strtab = struct {
pub fn get(self: Strtab, off: u32) []const u8 {
assert(off < self.buffer.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.ptr + off), 0);
+ return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.ptr + off)), 0);
}
};
diff --git a/lib/std/compress/deflate/bits_utils.zig b/lib/std/compress/deflate/bits_utils.zig
index 85bae95bc8..4b440dc44e 100644
--- a/lib/std/compress/deflate/bits_utils.zig
+++ b/lib/std/compress/deflate/bits_utils.zig
@@ -3,7 +3,7 @@ const math = @import("std").math;
// Reverse bit-by-bit a N-bit code.
pub fn bitReverse(comptime T: type, value: T, N: usize) T {
const r = @bitReverse(value);
- return r >> @intCast(math.Log2Int(T), @typeInfo(T).Int.bits - N);
+ return r >> @as(math.Log2Int(T), @intCast(@typeInfo(T).Int.bits - N));
}
test "bitReverse" {
diff --git a/lib/std/compress/deflate/compressor.zig b/lib/std/compress/deflate/compressor.zig
index e2cbafe520..72de63f162 100644
--- a/lib/std/compress/deflate/compressor.zig
+++ b/lib/std/compress/deflate/compressor.zig
@@ -160,7 +160,7 @@ fn matchLen(a: []u8, b: []u8, max: u32) u32 {
var bounded_b = b[0..max];
for (bounded_a, 0..) |av, i| {
if (bounded_b[i] != av) {
- return @intCast(u32, i);
+ return @as(u32, @intCast(i));
}
}
return max;
@@ -313,14 +313,14 @@ pub fn Compressor(comptime WriterType: anytype) type {
// the entire table onto the stack (https://golang.org/issue/18625).
for (self.hash_prev, 0..) |v, i| {
if (v > delta) {
- self.hash_prev[i] = @intCast(u32, v - delta);
+ self.hash_prev[i] = @as(u32, @intCast(v - delta));
} else {
self.hash_prev[i] = 0;
}
}
for (self.hash_head, 0..) |v, i| {
if (v > delta) {
- self.hash_head[i] = @intCast(u32, v - delta);
+ self.hash_head[i] = @as(u32, @intCast(v - delta));
} else {
self.hash_head[i] = 0;
}
@@ -329,7 +329,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
}
const n = std.compress.deflate.copy(self.window[self.window_end..], b);
self.window_end += n;
- return @intCast(u32, n);
+ return @as(u32, @intCast(n));
}
fn writeBlock(self: *Self, tokens: []token.Token, index: usize) !void {
@@ -398,13 +398,13 @@ pub fn Compressor(comptime WriterType: anytype) type {
// Our chain should point to the previous value.
self.hash_prev[di & window_mask] = hh.*;
// Set the head of the hash chain to us.
- hh.* = @intCast(u32, di + self.hash_offset);
+ hh.* = @as(u32, @intCast(di + self.hash_offset));
}
self.hash = new_h;
}
// Update window information.
self.window_end = n;
- self.index = @intCast(u32, n);
+ self.index = @as(u32, @intCast(n));
}
const Match = struct {
@@ -471,11 +471,11 @@ pub fn Compressor(comptime WriterType: anytype) type {
break;
}
- if (@intCast(u32, self.hash_prev[i & window_mask]) < self.hash_offset) {
+ if (@as(u32, @intCast(self.hash_prev[i & window_mask])) < self.hash_offset) {
break;
}
- i = @intCast(u32, self.hash_prev[i & window_mask]) - self.hash_offset;
+ i = @as(u32, @intCast(self.hash_prev[i & window_mask])) - self.hash_offset;
if (i < min_index) {
break;
}
@@ -576,7 +576,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
// Flush current output block if any.
if (self.byte_available) {
// There is still one pending token that needs to be flushed
- self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[self.index - 1]));
+ self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[self.index - 1])));
self.tokens_count += 1;
self.byte_available = false;
}
@@ -591,9 +591,9 @@ pub fn Compressor(comptime WriterType: anytype) type {
// Update the hash
self.hash = hash4(self.window[self.index .. self.index + min_match_length]);
var hh = &self.hash_head[self.hash & hash_mask];
- self.chain_head = @intCast(u32, hh.*);
- self.hash_prev[self.index & window_mask] = @intCast(u32, self.chain_head);
- hh.* = @intCast(u32, self.index + self.hash_offset);
+ self.chain_head = @as(u32, @intCast(hh.*));
+ self.hash_prev[self.index & window_mask] = @as(u32, @intCast(self.chain_head));
+ hh.* = @as(u32, @intCast(self.index + self.hash_offset));
}
var prev_length = self.length;
var prev_offset = self.offset;
@@ -614,7 +614,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
self.index,
self.chain_head -| self.hash_offset,
min_match_length - 1,
- @intCast(u32, lookahead),
+ @as(u32, @intCast(lookahead)),
);
if (fmatch.ok) {
self.length = fmatch.length;
@@ -631,12 +631,12 @@ pub fn Compressor(comptime WriterType: anytype) type {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
if (self.compression_level.fast_skip_hashshing != skip_never) {
- self.tokens[self.tokens_count] = token.matchToken(@intCast(u32, self.length - base_match_length), @intCast(u32, self.offset - base_match_offset));
+ self.tokens[self.tokens_count] = token.matchToken(@as(u32, @intCast(self.length - base_match_length)), @as(u32, @intCast(self.offset - base_match_offset)));
self.tokens_count += 1;
} else {
self.tokens[self.tokens_count] = token.matchToken(
- @intCast(u32, prev_length - base_match_length),
- @intCast(u32, prev_offset -| base_match_offset),
+ @as(u32, @intCast(prev_length - base_match_length)),
+ @as(u32, @intCast(prev_offset -| base_match_offset)),
);
self.tokens_count += 1;
}
@@ -661,7 +661,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
var hh = &self.hash_head[self.hash & hash_mask];
self.hash_prev[index & window_mask] = hh.*;
// Set the head of the hash chain to us.
- hh.* = @intCast(u32, index + self.hash_offset);
+ hh.* = @as(u32, @intCast(index + self.hash_offset));
}
}
self.index = index;
@@ -689,7 +689,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
if (self.compression_level.fast_skip_hashshing != skip_never) {
i = self.index;
}
- self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[i]));
+ self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[i])));
self.tokens_count += 1;
if (self.tokens_count == max_flate_block_tokens) {
try self.writeBlock(self.tokens[0..self.tokens_count], i + 1);
@@ -707,7 +707,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
fn fillStore(self: *Self, b: []const u8) u32 {
const n = std.compress.deflate.copy(self.window[self.window_end..], b);
self.window_end += n;
- return @intCast(u32, n);
+ return @as(u32, @intCast(n));
}
fn store(self: *Self) !void {
diff --git a/lib/std/compress/deflate/compressor_test.zig b/lib/std/compress/deflate/compressor_test.zig
index 858da8d8b5..5012bb3c07 100644
--- a/lib/std/compress/deflate/compressor_test.zig
+++ b/lib/std/compress/deflate/compressor_test.zig
@@ -172,7 +172,7 @@ test "deflate/inflate" {
defer testing.allocator.free(large_data_chunk);
// fill with random data
for (large_data_chunk, 0..) |_, i| {
- large_data_chunk[i] = @truncate(u8, i) *% @truncate(u8, i);
+ large_data_chunk[i] = @as(u8, @truncate(i)) *% @as(u8, @truncate(i));
}
try testToFromWithLimit(large_data_chunk, limits);
}
diff --git a/lib/std/compress/deflate/decompressor.zig b/lib/std/compress/deflate/decompressor.zig
index 40bde67326..3f6ee151ba 100644
--- a/lib/std/compress/deflate/decompressor.zig
+++ b/lib/std/compress/deflate/decompressor.zig
@@ -130,30 +130,30 @@ const HuffmanDecoder = struct {
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
- if (code != @as(u32, 1) << @intCast(u5, max) and !(code == 1 and max == 1)) {
+ if (code != @as(u32, 1) << @as(u5, @intCast(max)) and !(code == 1 and max == 1)) {
return false;
}
self.min = min;
if (max > huffman_chunk_bits) {
- var num_links = @as(u32, 1) << @intCast(u5, max - huffman_chunk_bits);
- self.link_mask = @intCast(u32, num_links - 1);
+ var num_links = @as(u32, 1) << @as(u5, @intCast(max - huffman_chunk_bits));
+ self.link_mask = @as(u32, @intCast(num_links - 1));
// create link tables
var link = next_code[huffman_chunk_bits + 1] >> 1;
self.links = try self.allocator.alloc([]u16, huffman_num_chunks - link);
self.sub_chunks = ArrayList(u32).init(self.allocator);
self.initialized = true;
- var j = @intCast(u32, link);
+ var j = @as(u32, @intCast(link));
while (j < huffman_num_chunks) : (j += 1) {
- var reverse = @intCast(u32, bu.bitReverse(u16, @intCast(u16, j), 16));
- reverse >>= @intCast(u32, 16 - huffman_chunk_bits);
- var off = j - @intCast(u32, link);
+ var reverse = @as(u32, @intCast(bu.bitReverse(u16, @as(u16, @intCast(j)), 16)));
+ reverse >>= @as(u32, @intCast(16 - huffman_chunk_bits));
+ var off = j - @as(u32, @intCast(link));
if (sanity) {
// check we are not overwriting an existing chunk
assert(self.chunks[reverse] == 0);
}
- self.chunks[reverse] = @intCast(u16, off << huffman_value_shift | (huffman_chunk_bits + 1));
+ self.chunks[reverse] = @as(u16, @intCast(off << huffman_value_shift | (huffman_chunk_bits + 1)));
self.links[off] = try self.allocator.alloc(u16, num_links);
if (sanity) {
// initialize to a known invalid chunk code (0) to see if we overwrite
@@ -170,12 +170,12 @@ const HuffmanDecoder = struct {
}
var ncode = next_code[n];
next_code[n] += 1;
- var chunk = @intCast(u16, (li << huffman_value_shift) | n);
- var reverse = @intCast(u16, bu.bitReverse(u16, @intCast(u16, ncode), 16));
- reverse >>= @intCast(u4, 16 - n);
+ var chunk = @as(u16, @intCast((li << huffman_value_shift) | n));
+ var reverse = @as(u16, @intCast(bu.bitReverse(u16, @as(u16, @intCast(ncode)), 16)));
+ reverse >>= @as(u4, @intCast(16 - n));
if (n <= huffman_chunk_bits) {
var off = reverse;
- while (off < self.chunks.len) : (off += @as(u16, 1) << @intCast(u4, n)) {
+ while (off < self.chunks.len) : (off += @as(u16, 1) << @as(u4, @intCast(n))) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
@@ -198,12 +198,12 @@ const HuffmanDecoder = struct {
var link_tab = self.links[value];
reverse >>= huffman_chunk_bits;
var off = reverse;
- while (off < link_tab.len) : (off += @as(u16, 1) << @intCast(u4, n - huffman_chunk_bits)) {
+ while (off < link_tab.len) : (off += @as(u16, 1) << @as(u4, @intCast(n - huffman_chunk_bits))) {
if (sanity) {
// check we are not overwriting an existing chunk
assert(link_tab[off] == 0);
}
- link_tab[off] = @intCast(u16, chunk);
+ link_tab[off] = @as(u16, @intCast(chunk));
}
}
}
@@ -494,21 +494,21 @@ pub fn Decompressor(comptime ReaderType: type) type {
while (self.nb < 5 + 5 + 4) {
try self.moreBits();
}
- var nlit = @intCast(u32, self.b & 0x1F) + 257;
+ var nlit = @as(u32, @intCast(self.b & 0x1F)) + 257;
if (nlit > max_num_lit) {
corrupt_input_error_offset = self.roffset;
self.err = InflateError.CorruptInput;
return InflateError.CorruptInput;
}
self.b >>= 5;
- var ndist = @intCast(u32, self.b & 0x1F) + 1;
+ var ndist = @as(u32, @intCast(self.b & 0x1F)) + 1;
if (ndist > max_num_dist) {
corrupt_input_error_offset = self.roffset;
self.err = InflateError.CorruptInput;
return InflateError.CorruptInput;
}
self.b >>= 5;
- var nclen = @intCast(u32, self.b & 0xF) + 4;
+ var nclen = @as(u32, @intCast(self.b & 0xF)) + 4;
// num_codes is 19, so nclen is always valid.
self.b >>= 4;
self.nb -= 5 + 5 + 4;
@@ -519,7 +519,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
while (self.nb < 3) {
try self.moreBits();
}
- self.codebits[code_order[i]] = @intCast(u32, self.b & 0x7);
+ self.codebits[code_order[i]] = @as(u32, @intCast(self.b & 0x7));
self.b >>= 3;
self.nb -= 3;
}
@@ -575,8 +575,8 @@ pub fn Decompressor(comptime ReaderType: type) type {
while (self.nb < nb) {
try self.moreBits();
}
- rep += @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1);
- self.b >>= @intCast(u5, nb);
+ rep += @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1));
+ self.b >>= @as(u5, @intCast(nb));
self.nb -= nb;
if (i + rep > n) {
corrupt_input_error_offset = self.roffset;
@@ -623,7 +623,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
var length: u32 = 0;
switch (v) {
0...255 => {
- self.dict.writeByte(@intCast(u8, v));
+ self.dict.writeByte(@as(u8, @intCast(v)));
if (self.dict.availWrite() == 0) {
self.to_read = self.dict.readFlush();
self.step = huffmanBlock;
@@ -676,8 +676,8 @@ pub fn Decompressor(comptime ReaderType: type) type {
while (self.nb < n) {
try self.moreBits();
}
- length += @intCast(u32, self.b) & ((@as(u32, 1) << @intCast(u5, n)) - 1);
- self.b >>= @intCast(u5, n);
+ length += @as(u32, @intCast(self.b)) & ((@as(u32, 1) << @as(u5, @intCast(n))) - 1);
+ self.b >>= @as(u5, @intCast(n));
self.nb -= n;
}
@@ -686,9 +686,9 @@ pub fn Decompressor(comptime ReaderType: type) type {
while (self.nb < 5) {
try self.moreBits();
}
- dist = @intCast(
+ dist = @as(
u32,
- bu.bitReverse(u8, @intCast(u8, (self.b & 0x1F) << 3), 8),
+ @intCast(bu.bitReverse(u8, @as(u8, @intCast((self.b & 0x1F) << 3)), 8)),
);
self.b >>= 5;
self.nb -= 5;
@@ -699,16 +699,16 @@ pub fn Decompressor(comptime ReaderType: type) type {
switch (dist) {
0...3 => dist += 1,
4...max_num_dist - 1 => { // 4...29
- var nb = @intCast(u32, dist - 2) >> 1;
+ var nb = @as(u32, @intCast(dist - 2)) >> 1;
// have 1 bit in bottom of dist, need nb more.
- var extra = (dist & 1) << @intCast(u5, nb);
+ var extra = (dist & 1) << @as(u5, @intCast(nb));
while (self.nb < nb) {
try self.moreBits();
}
- extra |= @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1);
- self.b >>= @intCast(u5, nb);
+ extra |= @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1));
+ self.b >>= @as(u5, @intCast(nb));
self.nb -= nb;
- dist = (@as(u32, 1) << @intCast(u5, nb + 1)) + 1 + extra;
+ dist = (@as(u32, 1) << @as(u5, @intCast(nb + 1))) + 1 + extra;
},
else => {
corrupt_input_error_offset = self.roffset;
@@ -762,10 +762,10 @@ pub fn Decompressor(comptime ReaderType: type) type {
self.err = InflateError.UnexpectedEndOfStream;
return InflateError.UnexpectedEndOfStream;
};
- self.roffset += @intCast(u64, nr);
- var n = @intCast(u32, self.buf[0]) | @intCast(u32, self.buf[1]) << 8;
- var nn = @intCast(u32, self.buf[2]) | @intCast(u32, self.buf[3]) << 8;
- if (@intCast(u16, nn) != @truncate(u16, ~n)) {
+ self.roffset += @as(u64, @intCast(nr));
+ var n = @as(u32, @intCast(self.buf[0])) | @as(u32, @intCast(self.buf[1])) << 8;
+ var nn = @as(u32, @intCast(self.buf[2])) | @as(u32, @intCast(self.buf[3])) << 8;
+ if (@as(u16, @intCast(nn)) != @as(u16, @truncate(~n))) {
corrupt_input_error_offset = self.roffset;
self.err = InflateError.CorruptInput;
return InflateError.CorruptInput;
@@ -793,9 +793,9 @@ pub fn Decompressor(comptime ReaderType: type) type {
if (cnt < buf.len) {
self.err = InflateError.UnexpectedEndOfStream;
}
- self.roffset += @intCast(u64, cnt);
- self.copy_len -= @intCast(u32, cnt);
- self.dict.writeMark(@intCast(u32, cnt));
+ self.roffset += @as(u64, @intCast(cnt));
+ self.copy_len -= @as(u32, @intCast(cnt));
+ self.dict.writeMark(@as(u32, @intCast(cnt)));
if (self.err != null) {
return InflateError.UnexpectedEndOfStream;
}
@@ -826,7 +826,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
return InflateError.BadReaderState;
};
self.roffset += 1;
- self.b |= @as(u32, c) << @intCast(u5, self.nb);
+ self.b |= @as(u32, c) << @as(u5, @intCast(self.nb));
self.nb += 8;
return;
}
@@ -854,14 +854,14 @@ pub fn Decompressor(comptime ReaderType: type) type {
return InflateError.BadReaderState;
};
self.roffset += 1;
- b |= @intCast(u32, c) << @intCast(u5, nb & 31);
+ b |= @as(u32, @intCast(c)) << @as(u5, @intCast(nb & 31));
nb += 8;
}
var chunk = h.chunks[b & (huffman_num_chunks - 1)];
- n = @intCast(u32, chunk & huffman_count_mask);
+ n = @as(u32, @intCast(chunk & huffman_count_mask));
if (n > huffman_chunk_bits) {
chunk = h.links[chunk >> huffman_value_shift][(b >> huffman_chunk_bits) & h.link_mask];
- n = @intCast(u32, chunk & huffman_count_mask);
+ n = @as(u32, @intCast(chunk & huffman_count_mask));
}
if (n <= nb) {
if (n == 0) {
@@ -871,9 +871,9 @@ pub fn Decompressor(comptime ReaderType: type) type {
self.err = InflateError.CorruptInput;
return InflateError.CorruptInput;
}
- self.b = b >> @intCast(u5, n & 31);
+ self.b = b >> @as(u5, @intCast(n & 31));
self.nb = nb - n;
- return @intCast(u32, chunk >> huffman_value_shift);
+ return @as(u32, @intCast(chunk >> huffman_value_shift));
}
}
}
diff --git a/lib/std/compress/deflate/deflate_fast.zig b/lib/std/compress/deflate/deflate_fast.zig
index c86d181cb5..a11548fa1f 100644
--- a/lib/std/compress/deflate/deflate_fast.zig
+++ b/lib/std/compress/deflate/deflate_fast.zig
@@ -30,23 +30,23 @@ const table_size = 1 << table_bits; // Size of the table.
const buffer_reset = math.maxInt(i32) - max_store_block_size * 2;
fn load32(b: []u8, i: i32) u32 {
- var s = b[@intCast(usize, i) .. @intCast(usize, i) + 4];
- return @intCast(u32, s[0]) |
- @intCast(u32, s[1]) << 8 |
- @intCast(u32, s[2]) << 16 |
- @intCast(u32, s[3]) << 24;
+ var s = b[@as(usize, @intCast(i)) .. @as(usize, @intCast(i)) + 4];
+ return @as(u32, @intCast(s[0])) |
+ @as(u32, @intCast(s[1])) << 8 |
+ @as(u32, @intCast(s[2])) << 16 |
+ @as(u32, @intCast(s[3])) << 24;
}
fn load64(b: []u8, i: i32) u64 {
- var s = b[@intCast(usize, i)..@intCast(usize, i + 8)];
- return @intCast(u64, s[0]) |
- @intCast(u64, s[1]) << 8 |
- @intCast(u64, s[2]) << 16 |
- @intCast(u64, s[3]) << 24 |
- @intCast(u64, s[4]) << 32 |
- @intCast(u64, s[5]) << 40 |
- @intCast(u64, s[6]) << 48 |
- @intCast(u64, s[7]) << 56;
+ var s = b[@as(usize, @intCast(i))..@as(usize, @intCast(i + 8))];
+ return @as(u64, @intCast(s[0])) |
+ @as(u64, @intCast(s[1])) << 8 |
+ @as(u64, @intCast(s[2])) << 16 |
+ @as(u64, @intCast(s[3])) << 24 |
+ @as(u64, @intCast(s[4])) << 32 |
+ @as(u64, @intCast(s[5])) << 40 |
+ @as(u64, @intCast(s[6])) << 48 |
+ @as(u64, @intCast(s[7])) << 56;
}
fn hash(u: u32) u32 {
@@ -117,7 +117,7 @@ pub const DeflateFast = struct {
// s_limit is when to stop looking for offset/length copies. The input_margin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
- var s_limit = @intCast(i32, src.len - input_margin);
+ var s_limit = @as(i32, @intCast(src.len - input_margin));
// next_emit is where in src the next emitLiteral should start from.
var next_emit: i32 = 0;
@@ -170,7 +170,7 @@ pub const DeflateFast = struct {
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[next_emit..s] are unmatched. Emit
// them as literal bytes.
- emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..@intCast(usize, s)]);
+ emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..@as(usize, @intCast(s))]);
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
@@ -192,8 +192,8 @@ pub const DeflateFast = struct {
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
dst[tokens_count.*] = token.matchToken(
- @intCast(u32, l + 4 - base_match_length),
- @intCast(u32, s - t - base_match_offset),
+ @as(u32, @intCast(l + 4 - base_match_length)),
+ @as(u32, @intCast(s - t - base_match_offset)),
);
tokens_count.* += 1;
s += l;
@@ -209,22 +209,22 @@ pub const DeflateFast = struct {
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
var x = load64(src, s - 1);
- var prev_hash = hash(@truncate(u32, x));
+ var prev_hash = hash(@as(u32, @truncate(x)));
self.table[prev_hash & table_mask] = TableEntry{
.offset = self.cur + s - 1,
- .val = @truncate(u32, x),
+ .val = @as(u32, @truncate(x)),
};
x >>= 8;
- var curr_hash = hash(@truncate(u32, x));
+ var curr_hash = hash(@as(u32, @truncate(x)));
candidate = self.table[curr_hash & table_mask];
self.table[curr_hash & table_mask] = TableEntry{
.offset = self.cur + s,
- .val = @truncate(u32, x),
+ .val = @as(u32, @truncate(x)),
};
var offset = s - (candidate.offset - self.cur);
- if (offset > max_match_offset or @truncate(u32, x) != candidate.val) {
- cv = @truncate(u32, x >> 8);
+ if (offset > max_match_offset or @as(u32, @truncate(x)) != candidate.val) {
+ cv = @as(u32, @truncate(x >> 8));
next_hash = hash(cv);
s += 1;
break;
@@ -232,18 +232,18 @@ pub const DeflateFast = struct {
}
}
- if (@intCast(u32, next_emit) < src.len) {
- emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..]);
+ if (@as(u32, @intCast(next_emit)) < src.len) {
+ emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..]);
}
- self.cur += @intCast(i32, src.len);
- self.prev_len = @intCast(u32, src.len);
+ self.cur += @as(i32, @intCast(src.len));
+ self.prev_len = @as(u32, @intCast(src.len));
@memcpy(self.prev[0..self.prev_len], src);
return;
}
fn emitLiteral(dst: []token.Token, tokens_count: *u16, lit: []u8) void {
for (lit) |v| {
- dst[tokens_count.*] = token.literalToken(@intCast(u32, v));
+ dst[tokens_count.*] = token.literalToken(@as(u32, @intCast(v)));
tokens_count.* += 1;
}
return;
@@ -253,60 +253,60 @@ pub const DeflateFast = struct {
// t can be negative to indicate the match is starting in self.prev.
// We assume that src[s-4 .. s] and src[t-4 .. t] already match.
fn matchLen(self: *Self, s: i32, t: i32, src: []u8) i32 {
- var s1 = @intCast(u32, s) + max_match_length - 4;
+ var s1 = @as(u32, @intCast(s)) + max_match_length - 4;
if (s1 > src.len) {
- s1 = @intCast(u32, src.len);
+ s1 = @as(u32, @intCast(src.len));
}
// If we are inside the current block
if (t >= 0) {
- var b = src[@intCast(usize, t)..];
- var a = src[@intCast(usize, s)..@intCast(usize, s1)];
+ var b = src[@as(usize, @intCast(t))..];
+ var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))];
b = b[0..a.len];
// Extend the match to be as long as possible.
for (a, 0..) |_, i| {
if (a[i] != b[i]) {
- return @intCast(i32, i);
+ return @as(i32, @intCast(i));
}
}
- return @intCast(i32, a.len);
+ return @as(i32, @intCast(a.len));
}
// We found a match in the previous block.
- var tp = @intCast(i32, self.prev_len) + t;
+ var tp = @as(i32, @intCast(self.prev_len)) + t;
if (tp < 0) {
return 0;
}
// Extend the match to be as long as possible.
- var a = src[@intCast(usize, s)..@intCast(usize, s1)];
- var b = self.prev[@intCast(usize, tp)..@intCast(usize, self.prev_len)];
+ var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))];
+ var b = self.prev[@as(usize, @intCast(tp))..@as(usize, @intCast(self.prev_len))];
if (b.len > a.len) {
b = b[0..a.len];
}
a = a[0..b.len];
for (b, 0..) |_, i| {
if (a[i] != b[i]) {
- return @intCast(i32, i);
+ return @as(i32, @intCast(i));
}
}
// If we reached our limit, we matched everything we are
// allowed to in the previous block and we return.
- var n = @intCast(i32, b.len);
- if (@intCast(u32, s + n) == s1) {
+ var n = @as(i32, @intCast(b.len));
+ if (@as(u32, @intCast(s + n)) == s1) {
return n;
}
// Continue looking for more matches in the current block.
- a = src[@intCast(usize, s + n)..@intCast(usize, s1)];
+ a = src[@as(usize, @intCast(s + n))..@as(usize, @intCast(s1))];
b = src[0..a.len];
for (a, 0..) |_, i| {
if (a[i] != b[i]) {
- return @intCast(i32, i) + n;
+ return @as(i32, @intCast(i)) + n;
}
}
- return @intCast(i32, a.len) + n;
+ return @as(i32, @intCast(a.len)) + n;
}
// Reset resets the encoding history.
@@ -574,7 +574,7 @@ test "best speed match 2/2" {
var e = DeflateFast{
.prev = previous,
- .prev_len = @intCast(u32, previous.len),
+ .prev_len = @as(u32, @intCast(previous.len)),
.table = undefined,
.allocator = undefined,
.cur = 0,
@@ -617,7 +617,7 @@ test "best speed shift offsets" {
try expect(want_first_tokens > want_second_tokens);
// Forward the current indicator to before wraparound.
- enc.cur = buffer_reset - @intCast(i32, test_data.len);
+ enc.cur = buffer_reset - @as(i32, @intCast(test_data.len));
// Part 1 before wrap, should match clean state.
tokens_count = 0;
diff --git a/lib/std/compress/deflate/deflate_fast_test.zig b/lib/std/compress/deflate/deflate_fast_test.zig
index 1c771d925a..08f6079aa5 100644
--- a/lib/std/compress/deflate/deflate_fast_test.zig
+++ b/lib/std/compress/deflate/deflate_fast_test.zig
@@ -19,7 +19,7 @@ test "best speed" {
defer testing.allocator.free(abcabc);
for (abcabc, 0..) |_, i| {
- abcabc[i] = @intCast(u8, i % 128);
+ abcabc[i] = @as(u8, @intCast(i % 128));
}
var tc_01 = [_]u32{ 65536, 0 };
@@ -119,16 +119,16 @@ test "best speed max match offset" {
// zeros1 is between 0 and 30 zeros.
// The difference between the two abc's will be offset, which
// is max_match_offset plus or minus a small adjustment.
- var src_len: usize = @intCast(usize, offset + @as(i32, abc.len) + @intCast(i32, extra));
+ var src_len: usize = @as(usize, @intCast(offset + @as(i32, abc.len) + @as(i32, @intCast(extra))));
var src = try testing.allocator.alloc(u8, src_len);
defer testing.allocator.free(src);
@memcpy(src[0..abc.len], abc);
if (!do_match_before) {
- const src_offset: usize = @intCast(usize, offset - @as(i32, xyz.len));
+ const src_offset: usize = @as(usize, @intCast(offset - @as(i32, xyz.len)));
@memcpy(src[src_offset..][0..xyz.len], xyz);
}
- const src_offset: usize = @intCast(usize, offset);
+ const src_offset: usize = @as(usize, @intCast(offset));
@memcpy(src[src_offset..][0..abc.len], abc);
var compressed = ArrayList(u8).init(testing.allocator);
diff --git a/lib/std/compress/deflate/dict_decoder.zig b/lib/std/compress/deflate/dict_decoder.zig
index d9f240e7b4..75fdd359dd 100644
--- a/lib/std/compress/deflate/dict_decoder.zig
+++ b/lib/std/compress/deflate/dict_decoder.zig
@@ -49,7 +49,7 @@ pub const DictDecoder = struct {
if (dict != null) {
const src = dict.?[dict.?.len -| self.hist.len..];
@memcpy(self.hist[0..src.len], src);
- self.wr_pos = @intCast(u32, dict.?.len);
+ self.wr_pos = @as(u32, @intCast(dict.?.len));
}
if (self.wr_pos == self.hist.len) {
@@ -66,7 +66,7 @@ pub const DictDecoder = struct {
// Reports the total amount of historical data in the dictionary.
pub fn histSize(self: *Self) u32 {
if (self.full) {
- return @intCast(u32, self.hist.len);
+ return @as(u32, @intCast(self.hist.len));
}
return self.wr_pos;
}
@@ -78,7 +78,7 @@ pub const DictDecoder = struct {
// Reports the available amount of output buffer space.
pub fn availWrite(self: *Self) u32 {
- return @intCast(u32, self.hist.len - self.wr_pos);
+ return @as(u32, @intCast(self.hist.len - self.wr_pos));
}
// Returns a slice of the available buffer to write data to.
@@ -110,10 +110,10 @@ pub const DictDecoder = struct {
fn copy(dst: []u8, src: []const u8) u32 {
if (src.len > dst.len) {
mem.copyForwards(u8, dst, src[0..dst.len]);
- return @intCast(u32, dst.len);
+ return @as(u32, @intCast(dst.len));
}
mem.copyForwards(u8, dst[0..src.len], src);
- return @intCast(u32, src.len);
+ return @as(u32, @intCast(src.len));
}
// Copies a string at a given (dist, length) to the output.
@@ -125,10 +125,10 @@ pub const DictDecoder = struct {
assert(0 < dist and dist <= self.histSize());
var dst_base = self.wr_pos;
var dst_pos = dst_base;
- var src_pos: i32 = @intCast(i32, dst_pos) - @intCast(i32, dist);
+ var src_pos: i32 = @as(i32, @intCast(dst_pos)) - @as(i32, @intCast(dist));
var end_pos = dst_pos + length;
if (end_pos > self.hist.len) {
- end_pos = @intCast(u32, self.hist.len);
+ end_pos = @as(u32, @intCast(self.hist.len));
}
// Copy non-overlapping section after destination position.
@@ -139,8 +139,8 @@ pub const DictDecoder = struct {
// Thus, a backwards copy is performed here; that is, the exact bytes in
// the source prior to the copy is placed in the destination.
if (src_pos < 0) {
- src_pos += @intCast(i32, self.hist.len);
- dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..]);
+ src_pos += @as(i32, @intCast(self.hist.len));
+ dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..]);
src_pos = 0;
}
@@ -160,7 +160,7 @@ pub const DictDecoder = struct {
// dst_pos = end_pos;
//
while (dst_pos < end_pos) {
- dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..dst_pos]);
+ dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..dst_pos]);
}
self.wr_pos = dst_pos;
diff --git a/lib/std/compress/deflate/huffman_bit_writer.zig b/lib/std/compress/deflate/huffman_bit_writer.zig
index a852287b53..5204435106 100644
--- a/lib/std/compress/deflate/huffman_bit_writer.zig
+++ b/lib/std/compress/deflate/huffman_bit_writer.zig
@@ -107,7 +107,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
}
var n = self.nbytes;
while (self.nbits != 0) {
- self.bytes[n] = @truncate(u8, self.bits);
+ self.bytes[n] = @as(u8, @truncate(self.bits));
self.bits >>= 8;
if (self.nbits > 8) { // Avoid underflow
self.nbits -= 8;
@@ -132,7 +132,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
if (self.err) {
return;
}
- self.bits |= @intCast(u64, b) << @intCast(u6, self.nbits);
+ self.bits |= @as(u64, @intCast(b)) << @as(u6, @intCast(self.nbits));
self.nbits += nb;
if (self.nbits >= 48) {
var bits = self.bits;
@@ -140,12 +140,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self.nbits -= 48;
var n = self.nbytes;
var bytes = self.bytes[n..][0..6];
- bytes[0] = @truncate(u8, bits);
- bytes[1] = @truncate(u8, bits >> 8);
- bytes[2] = @truncate(u8, bits >> 16);
- bytes[3] = @truncate(u8, bits >> 24);
- bytes[4] = @truncate(u8, bits >> 32);
- bytes[5] = @truncate(u8, bits >> 40);
+ bytes[0] = @as(u8, @truncate(bits));
+ bytes[1] = @as(u8, @truncate(bits >> 8));
+ bytes[2] = @as(u8, @truncate(bits >> 16));
+ bytes[3] = @as(u8, @truncate(bits >> 24));
+ bytes[4] = @as(u8, @truncate(bits >> 32));
+ bytes[5] = @as(u8, @truncate(bits >> 40));
n += 6;
if (n >= buffer_flush_size) {
try self.write(self.bytes[0..n]);
@@ -165,7 +165,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
return;
}
while (self.nbits != 0) {
- self.bytes[n] = @truncate(u8, self.bits);
+ self.bytes[n] = @as(u8, @truncate(self.bits));
self.bits >>= 8;
self.nbits -= 8;
n += 1;
@@ -209,12 +209,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
// Copy the concatenated code sizes to codegen. Put a marker at the end.
var cgnl = codegen[0..num_literals];
for (cgnl, 0..) |_, i| {
- cgnl[i] = @intCast(u8, lit_enc.codes[i].len);
+ cgnl[i] = @as(u8, @intCast(lit_enc.codes[i].len));
}
cgnl = codegen[num_literals .. num_literals + num_offsets];
for (cgnl, 0..) |_, i| {
- cgnl[i] = @intCast(u8, off_enc.codes[i].len);
+ cgnl[i] = @as(u8, @intCast(off_enc.codes[i].len));
}
codegen[num_literals + num_offsets] = bad_code;
@@ -243,7 +243,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
}
codegen[out_index] = 16;
out_index += 1;
- codegen[out_index] = @intCast(u8, n - 3);
+ codegen[out_index] = @as(u8, @intCast(n - 3));
out_index += 1;
self.codegen_freq[16] += 1;
count -= n;
@@ -256,7 +256,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
}
codegen[out_index] = 18;
out_index += 1;
- codegen[out_index] = @intCast(u8, n - 11);
+ codegen[out_index] = @as(u8, @intCast(n - 11));
out_index += 1;
self.codegen_freq[18] += 1;
count -= n;
@@ -265,7 +265,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
// 3 <= count <= 10
codegen[out_index] = 17;
out_index += 1;
- codegen[out_index] = @intCast(u8, count - 3);
+ codegen[out_index] = @as(u8, @intCast(count - 3));
out_index += 1;
self.codegen_freq[17] += 1;
count = 0;
@@ -307,8 +307,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
extra_bits;
return DynamicSize{
- .size = @intCast(u32, size),
- .num_codegens = @intCast(u32, num_codegens),
+ .size = @as(u32, @intCast(size)),
+ .num_codegens = @as(u32, @intCast(num_codegens)),
};
}
@@ -328,7 +328,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
return .{ .size = 0, .storable = false };
}
if (in.?.len <= deflate_const.max_store_block_size) {
- return .{ .size = @intCast(u32, (in.?.len + 5) * 8), .storable = true };
+ return .{ .size = @as(u32, @intCast((in.?.len + 5) * 8)), .storable = true };
}
return .{ .size = 0, .storable = false };
}
@@ -337,20 +337,20 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
if (self.err) {
return;
}
- self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits);
- self.nbits += @intCast(u32, c.len);
+ self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits));
+ self.nbits += @as(u32, @intCast(c.len));
if (self.nbits >= 48) {
var bits = self.bits;
self.bits >>= 48;
self.nbits -= 48;
var n = self.nbytes;
var bytes = self.bytes[n..][0..6];
- bytes[0] = @truncate(u8, bits);
- bytes[1] = @truncate(u8, bits >> 8);
- bytes[2] = @truncate(u8, bits >> 16);
- bytes[3] = @truncate(u8, bits >> 24);
- bytes[4] = @truncate(u8, bits >> 32);
- bytes[5] = @truncate(u8, bits >> 40);
+ bytes[0] = @as(u8, @truncate(bits));
+ bytes[1] = @as(u8, @truncate(bits >> 8));
+ bytes[2] = @as(u8, @truncate(bits >> 16));
+ bytes[3] = @as(u8, @truncate(bits >> 24));
+ bytes[4] = @as(u8, @truncate(bits >> 32));
+ bytes[5] = @as(u8, @truncate(bits >> 40));
n += 6;
if (n >= buffer_flush_size) {
try self.write(self.bytes[0..n]);
@@ -381,36 +381,36 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
first_bits = 5;
}
try self.writeBits(first_bits, 3);
- try self.writeBits(@intCast(u32, num_literals - 257), 5);
- try self.writeBits(@intCast(u32, num_offsets - 1), 5);
- try self.writeBits(@intCast(u32, num_codegens - 4), 4);
+ try self.writeBits(@as(u32, @intCast(num_literals - 257)), 5);
+ try self.writeBits(@as(u32, @intCast(num_offsets - 1)), 5);
+ try self.writeBits(@as(u32, @intCast(num_codegens - 4)), 4);
var i: u32 = 0;
while (i < num_codegens) : (i += 1) {
- var value = @intCast(u32, self.codegen_encoding.codes[codegen_order[i]].len);
- try self.writeBits(@intCast(u32, value), 3);
+ var value = @as(u32, @intCast(self.codegen_encoding.codes[codegen_order[i]].len));
+ try self.writeBits(@as(u32, @intCast(value)), 3);
}
i = 0;
while (true) {
- var code_word: u32 = @intCast(u32, self.codegen[i]);
+ var code_word: u32 = @as(u32, @intCast(self.codegen[i]));
i += 1;
if (code_word == bad_code) {
break;
}
- try self.writeCode(self.codegen_encoding.codes[@intCast(u32, code_word)]);
+ try self.writeCode(self.codegen_encoding.codes[@as(u32, @intCast(code_word))]);
switch (code_word) {
16 => {
- try self.writeBits(@intCast(u32, self.codegen[i]), 2);
+ try self.writeBits(@as(u32, @intCast(self.codegen[i])), 2);
i += 1;
},
17 => {
- try self.writeBits(@intCast(u32, self.codegen[i]), 3);
+ try self.writeBits(@as(u32, @intCast(self.codegen[i])), 3);
i += 1;
},
18 => {
- try self.writeBits(@intCast(u32, self.codegen[i]), 7);
+ try self.writeBits(@as(u32, @intCast(self.codegen[i])), 7);
i += 1;
},
else => {},
@@ -428,8 +428,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
}
try self.writeBits(flag, 3);
try self.flush();
- try self.writeBits(@intCast(u32, length), 16);
- try self.writeBits(@intCast(u32, ~@intCast(u16, length)), 16);
+ try self.writeBits(@as(u32, @intCast(length)), 16);
+ try self.writeBits(@as(u32, @intCast(~@as(u16, @intCast(length)))), 16);
}
fn writeFixedHeader(self: *Self, is_eof: bool) Error!void {
@@ -476,14 +476,14 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
var length_code: u32 = length_codes_start + 8;
while (length_code < num_literals) : (length_code += 1) {
// First eight length codes have extra size = 0.
- extra_bits += @intCast(u32, self.literal_freq[length_code]) *
- @intCast(u32, length_extra_bits[length_code - length_codes_start]);
+ extra_bits += @as(u32, @intCast(self.literal_freq[length_code])) *
+ @as(u32, @intCast(length_extra_bits[length_code - length_codes_start]));
}
var offset_code: u32 = 4;
while (offset_code < num_offsets) : (offset_code += 1) {
// First four offset codes have extra size = 0.
- extra_bits += @intCast(u32, self.offset_freq[offset_code]) *
- @intCast(u32, offset_extra_bits[offset_code]);
+ extra_bits += @as(u32, @intCast(self.offset_freq[offset_code])) *
+ @as(u32, @intCast(offset_extra_bits[offset_code]));
}
}
@@ -621,12 +621,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self.literal_freq[token.literal(deflate_const.end_block_marker)] += 1;
// get the number of literals
- num_literals = @intCast(u32, self.literal_freq.len);
+ num_literals = @as(u32, @intCast(self.literal_freq.len));
while (self.literal_freq[num_literals - 1] == 0) {
num_literals -= 1;
}
// get the number of offsets
- num_offsets = @intCast(u32, self.offset_freq.len);
+ num_offsets = @as(u32, @intCast(self.offset_freq.len));
while (num_offsets > 0 and self.offset_freq[num_offsets - 1] == 0) {
num_offsets -= 1;
}
@@ -664,18 +664,18 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
var length = token.length(t);
var length_code = token.lengthCode(length);
try self.writeCode(le_codes[length_code + length_codes_start]);
- var extra_length_bits = @intCast(u32, length_extra_bits[length_code]);
+ var extra_length_bits = @as(u32, @intCast(length_extra_bits[length_code]));
if (extra_length_bits > 0) {
- var extra_length = @intCast(u32, length - length_base[length_code]);
+ var extra_length = @as(u32, @intCast(length - length_base[length_code]));
try self.writeBits(extra_length, extra_length_bits);
}
// Write the offset
var offset = token.offset(t);
var offset_code = token.offsetCode(offset);
try self.writeCode(oe_codes[offset_code]);
- var extra_offset_bits = @intCast(u32, offset_extra_bits[offset_code]);
+ var extra_offset_bits = @as(u32, @intCast(offset_extra_bits[offset_code]));
if (extra_offset_bits > 0) {
- var extra_offset = @intCast(u32, offset - offset_base[offset_code]);
+ var extra_offset = @as(u32, @intCast(offset - offset_base[offset_code]));
try self.writeBits(extra_offset, extra_offset_bits);
}
}
@@ -742,8 +742,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
for (input) |t| {
// Bitwriting inlined, ~30% speedup
var c = encoding[t];
- self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits);
- self.nbits += @intCast(u32, c.len);
+ self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits));
+ self.nbits += @as(u32, @intCast(c.len));
if (self.nbits < 48) {
continue;
}
@@ -752,12 +752,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
self.bits >>= 48;
self.nbits -= 48;
var bytes = self.bytes[n..][0..6];
- bytes[0] = @truncate(u8, bits);
- bytes[1] = @truncate(u8, bits >> 8);
- bytes[2] = @truncate(u8, bits >> 16);
- bytes[3] = @truncate(u8, bits >> 24);
- bytes[4] = @truncate(u8, bits >> 32);
- bytes[5] = @truncate(u8, bits >> 40);
+ bytes[0] = @as(u8, @truncate(bits));
+ bytes[1] = @as(u8, @truncate(bits >> 8));
+ bytes[2] = @as(u8, @truncate(bits >> 16));
+ bytes[3] = @as(u8, @truncate(bits >> 24));
+ bytes[4] = @as(u8, @truncate(bits >> 32));
+ bytes[5] = @as(u8, @truncate(bits >> 40));
n += 6;
if (n < buffer_flush_size) {
continue;
diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig
index 689ac1441a..4fea45f863 100644
--- a/lib/std/compress/deflate/huffman_code.zig
+++ b/lib/std/compress/deflate/huffman_code.zig
@@ -73,7 +73,7 @@ pub const HuffmanEncoder = struct {
// Set list to be the set of all non-zero literals and their frequencies
for (freq, 0..) |f, i| {
if (f != 0) {
- list[count] = LiteralNode{ .literal = @intCast(u16, i), .freq = f };
+ list[count] = LiteralNode{ .literal = @as(u16, @intCast(i)), .freq = f };
count += 1;
} else {
list[count] = LiteralNode{ .literal = 0x00, .freq = 0 };
@@ -88,7 +88,7 @@ pub const HuffmanEncoder = struct {
// two or fewer literals, everything has bit length 1.
for (list, 0..) |node, i| {
// "list" is in order of increasing literal value.
- self.codes[node.literal].set(@intCast(u16, i), 1);
+ self.codes[node.literal].set(@as(u16, @intCast(i)), 1);
}
return;
}
@@ -105,7 +105,7 @@ pub const HuffmanEncoder = struct {
var total: u32 = 0;
for (freq, 0..) |f, i| {
if (f != 0) {
- total += @intCast(u32, f) * @intCast(u32, self.codes[i].len);
+ total += @as(u32, @intCast(f)) * @as(u32, @intCast(self.codes[i].len));
}
}
return total;
@@ -167,7 +167,7 @@ pub const HuffmanEncoder = struct {
}
// We need a total of 2*n - 2 items at top level and have already generated 2.
- levels[max_bits].needed = 2 * @intCast(u32, n) - 4;
+ levels[max_bits].needed = 2 * @as(u32, @intCast(n)) - 4;
{
var level = max_bits;
@@ -267,19 +267,19 @@ pub const HuffmanEncoder = struct {
// are encoded using "bits" bits, and get the values
// code, code + 1, .... The code values are
// assigned in literal order (not frequency order).
- var chunk = list[list.len - @intCast(u32, bits) ..];
+ var chunk = list[list.len - @as(u32, @intCast(bits)) ..];
self.lns = chunk;
mem.sort(LiteralNode, self.lns, {}, byLiteral);
for (chunk) |node| {
self.codes[node.literal] = HuffCode{
- .code = bu.bitReverse(u16, code, @intCast(u5, n)),
- .len = @intCast(u16, n),
+ .code = bu.bitReverse(u16, code, @as(u5, @intCast(n))),
+ .len = @as(u16, @intCast(n)),
};
code += 1;
}
- list = list[0 .. list.len - @intCast(u32, bits)];
+ list = list[0 .. list.len - @as(u32, @intCast(bits))];
}
}
};
@@ -332,7 +332,7 @@ pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder {
size = 8;
},
}
- codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @intCast(u5, size)), .len = size };
+ codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @as(u5, @intCast(size))), .len = size };
}
return h;
}
@@ -341,7 +341,7 @@ pub fn generateFixedOffsetEncoding(allocator: Allocator) !HuffmanEncoder {
var h = try newHuffmanEncoder(allocator, 30);
var codes = h.codes;
for (codes, 0..) |_, ch| {
- codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @intCast(u16, ch), 5), .len = 5 };
+ codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @as(u16, @intCast(ch)), 5), .len = 5 };
}
return h;
}
diff --git a/lib/std/compress/deflate/token.zig b/lib/std/compress/deflate/token.zig
index d0e9a23647..744fcdeb12 100644
--- a/lib/std/compress/deflate/token.zig
+++ b/lib/std/compress/deflate/token.zig
@@ -70,16 +70,16 @@ pub fn matchToken(xlength: u32, xoffset: u32) Token {
// Returns the literal of a literal token
pub fn literal(t: Token) u32 {
- return @intCast(u32, t - literal_type);
+ return @as(u32, @intCast(t - literal_type));
}
// Returns the extra offset of a match token
pub fn offset(t: Token) u32 {
- return @intCast(u32, t) & offset_mask;
+ return @as(u32, @intCast(t)) & offset_mask;
}
pub fn length(t: Token) u32 {
- return @intCast(u32, (t - match_type) >> length_shift);
+ return @as(u32, @intCast((t - match_type) >> length_shift));
}
pub fn lengthCode(len: u32) u32 {
@@ -88,10 +88,10 @@ pub fn lengthCode(len: u32) u32 {
// Returns the offset code corresponding to a specific offset
pub fn offsetCode(off: u32) u32 {
- if (off < @intCast(u32, offset_codes.len)) {
+ if (off < @as(u32, @intCast(offset_codes.len))) {
return offset_codes[off];
}
- if (off >> 7 < @intCast(u32, offset_codes.len)) {
+ if (off >> 7 < @as(u32, @intCast(offset_codes.len))) {
return offset_codes[off >> 7] + 14;
}
return offset_codes[off >> 14] + 28;
diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig
index 7e9fea6814..f6fb038ae3 100644
--- a/lib/std/compress/gzip.zig
+++ b/lib/std/compress/gzip.zig
@@ -89,7 +89,7 @@ pub fn Decompress(comptime ReaderType: type) type {
if (FLG & FHCRC != 0) {
const hash = try source.readIntLittle(u16);
- if (hash != @truncate(u16, hasher.hasher.final()))
+ if (hash != @as(u16, @truncate(hasher.hasher.final())))
return error.WrongChecksum;
}
diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig
index a6adb941a4..0dae9281e8 100644
--- a/lib/std/compress/lzma/decode.zig
+++ b/lib/std/compress/lzma/decode.zig
@@ -52,11 +52,11 @@ pub const Params = struct {
return error.CorruptInput;
}
- const lc = @intCast(u4, props % 9);
+ const lc = @as(u4, @intCast(props % 9));
props /= 9;
- const lp = @intCast(u3, props % 5);
+ const lp = @as(u3, @intCast(props % 5));
props /= 5;
- const pb = @intCast(u3, props);
+ const pb = @as(u3, @intCast(props));
const dict_size_provided = try reader.readIntLittle(u32);
const dict_size = @max(0x1000, dict_size_provided);
@@ -342,7 +342,7 @@ pub const DecoderState = struct {
result = (result << 1) ^ @intFromBool(try decoder.decodeBit(reader, &probs[result], update));
}
- return @truncate(u8, result - 0x100);
+ return @as(u8, @truncate(result - 0x100));
}
fn decodeDistance(
@@ -358,7 +358,7 @@ pub const DecoderState = struct {
if (pos_slot < 4)
return pos_slot;
- const num_direct_bits = @intCast(u5, (pos_slot >> 1) - 1);
+ const num_direct_bits = @as(u5, @intCast((pos_slot >> 1) - 1));
var result = (2 ^ (pos_slot & 1)) << num_direct_bits;
if (pos_slot < 14) {
diff --git a/lib/std/compress/lzma2/decode.zig b/lib/std/compress/lzma2/decode.zig
index 7297a1a51b..a23007d42a 100644
--- a/lib/std/compress/lzma2/decode.zig
+++ b/lib/std/compress/lzma2/decode.zig
@@ -119,11 +119,11 @@ pub const Decoder = struct {
return error.CorruptInput;
}
- const lc = @intCast(u4, props % 9);
+ const lc = @as(u4, @intCast(props % 9));
props /= 9;
- const lp = @intCast(u3, props % 5);
+ const lp = @as(u3, @intCast(props % 5));
props /= 5;
- const pb = @intCast(u3, props);
+ const pb = @as(u3, @intCast(props));
if (lc + lp > 4) {
return error.CorruptInput;
diff --git a/lib/std/compress/xz.zig b/lib/std/compress/xz.zig
index 5debc81835..3ceec90a7a 100644
--- a/lib/std/compress/xz.zig
+++ b/lib/std/compress/xz.zig
@@ -18,7 +18,7 @@ fn readStreamFlags(reader: anytype, check: *Check) !void {
if (reserved1 != 0)
return error.CorruptInput;
- check.* = @enumFromInt(Check, try bit_reader.readBitsNoEof(u4, 4));
+ check.* = @as(Check, @enumFromInt(try bit_reader.readBitsNoEof(u4, 4)));
const reserved2 = try bit_reader.readBitsNoEof(u4, 4);
if (reserved2 != 0)
diff --git a/lib/std/compress/xz/block.zig b/lib/std/compress/xz/block.zig
index 2a034011c2..6f4fad1c7f 100644
--- a/lib/std/compress/xz/block.zig
+++ b/lib/std/compress/xz/block.zig
@@ -108,7 +108,7 @@ pub fn Decoder(comptime ReaderType: type) type {
has_unpacked_size: bool,
};
- const flags = @bitCast(Flags, try header_reader.readByte());
+ const flags = @as(Flags, @bitCast(try header_reader.readByte()));
const filter_count = @as(u3, flags.last_filter_index) + 1;
if (filter_count > 1)
return error.Unsupported;
@@ -124,9 +124,9 @@ pub fn Decoder(comptime ReaderType: type) type {
_,
};
- const filter_id = @enumFromInt(
+ const filter_id = @as(
FilterId,
- try std.leb.readULEB128(u64, header_reader),
+ @enumFromInt(try std.leb.readULEB128(u64, header_reader)),
);
if (@intFromEnum(filter_id) >= 0x4000_0000_0000_0000)
diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig
index 98cabb4732..5580192537 100644
--- a/lib/std/compress/zlib.zig
+++ b/lib/std/compress/zlib.zig
@@ -41,7 +41,7 @@ pub fn DecompressStream(comptime ReaderType: type) type {
// verify the header checksum
if (header_u16 % 31 != 0)
return error.BadHeader;
- const header = @bitCast(ZLibHeader, header_u16);
+ const header = @as(ZLibHeader, @bitCast(header_u16));
// The CM field must be 8 to indicate the use of DEFLATE
if (header.compression_method != ZLibHeader.DEFLATE)
@@ -130,9 +130,9 @@ pub fn CompressStream(comptime WriterType: type) type {
.preset_dict = 0,
.checksum = 0,
};
- header.checksum = @truncate(u5, 31 - @bitCast(u16, header) % 31);
+ header.checksum = @as(u5, @truncate(31 - @as(u16, @bitCast(header)) % 31));
- try dest.writeIntBig(u16, @bitCast(u16, header));
+ try dest.writeIntBig(u16, @as(u16, @bitCast(header)));
const compression_level: deflate.Compression = switch (options.level) {
.no_compression => .no_compression,
diff --git a/lib/std/compress/zstandard/decode/block.zig b/lib/std/compress/zstandard/decode/block.zig
index 40f5903a24..bbf8492f04 100644
--- a/lib/std/compress/zstandard/decode/block.zig
+++ b/lib/std/compress/zstandard/decode/block.zig
@@ -894,7 +894,7 @@ pub fn decodeBlockReader(
/// Decode the header of a block.
pub fn decodeBlockHeader(src: *const [3]u8) frame.Zstandard.Block.Header {
const last_block = src[0] & 1 == 1;
- const block_type = @enumFromInt(frame.Zstandard.Block.Type, (src[0] & 0b110) >> 1);
+ const block_type = @as(frame.Zstandard.Block.Type, @enumFromInt((src[0] & 0b110) >> 1));
const block_size = ((src[0] & 0b11111000) >> 3) + (@as(u21, src[1]) << 5) + (@as(u21, src[2]) << 13);
return .{
.last_block = last_block,
@@ -1008,7 +1008,7 @@ pub fn decodeLiteralsSection(
try huffman.decodeHuffmanTree(counting_reader.reader(), buffer)
else
null;
- const huffman_tree_size = @intCast(usize, counting_reader.bytes_read);
+ const huffman_tree_size = @as(usize, @intCast(counting_reader.bytes_read));
const total_streams_size = std.math.sub(usize, header.compressed_size.?, huffman_tree_size) catch
return error.MalformedLiteralsSection;
@@ -1058,8 +1058,8 @@ fn decodeStreams(size_format: u2, stream_data: []const u8) !LiteralsSection.Stre
/// - `error.EndOfStream` if there are not enough bytes in `source`
pub fn decodeLiteralsHeader(source: anytype) !LiteralsSection.Header {
const byte0 = try source.readByte();
- const block_type = @enumFromInt(LiteralsSection.BlockType, byte0 & 0b11);
- const size_format = @intCast(u2, (byte0 & 0b1100) >> 2);
+ const block_type = @as(LiteralsSection.BlockType, @enumFromInt(byte0 & 0b11));
+ const size_format = @as(u2, @intCast((byte0 & 0b1100) >> 2));
var regenerated_size: u20 = undefined;
var compressed_size: ?u18 = null;
switch (block_type) {
@@ -1132,9 +1132,9 @@ pub fn decodeSequencesHeader(
const compression_modes = try source.readByte();
- const matches_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00001100) >> 2);
- const offsets_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00110000) >> 4);
- const literal_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b11000000) >> 6);
+ const matches_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00001100) >> 2));
+ const offsets_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00110000) >> 4));
+ const literal_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b11000000) >> 6));
if (compression_modes & 0b11 != 0) return error.ReservedBitSet;
return SequencesSection.Header{
diff --git a/lib/std/compress/zstandard/decode/fse.zig b/lib/std/compress/zstandard/decode/fse.zig
index 232af39ccf..6e987f9c6f 100644
--- a/lib/std/compress/zstandard/decode/fse.zig
+++ b/lib/std/compress/zstandard/decode/fse.zig
@@ -69,7 +69,7 @@ pub fn decodeFseTable(
}
fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
- const total_probability = @intCast(u16, entries.len);
+ const total_probability = @as(u16, @intCast(entries.len));
const accuracy_log = std.math.log2_int(u16, total_probability);
assert(total_probability <= 1 << 9);
@@ -77,7 +77,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
for (values, 0..) |value, i| {
if (value == 0) {
entries[entries.len - 1 - less_than_one_count] = Table.Fse{
- .symbol = @intCast(u8, i),
+ .symbol = @as(u8, @intCast(i)),
.baseline = 0,
.bits = accuracy_log,
};
@@ -99,7 +99,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
const share_size_log = std.math.log2_int(u16, share_size);
for (0..probability) |i| {
- temp_states[i] = @intCast(u16, position);
+ temp_states[i] = @as(u16, @intCast(position));
position += (entries.len >> 1) + (entries.len >> 3) + 3;
position &= entries.len - 1;
while (position >= entries.len - less_than_one_count) {
@@ -110,13 +110,13 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16));
for (0..probability) |i| {
entries[temp_states[i]] = if (i < double_state_count) Table.Fse{
- .symbol = @intCast(u8, symbol),
+ .symbol = @as(u8, @intCast(symbol)),
.bits = share_size_log + 1,
- .baseline = single_state_count * share_size + @intCast(u16, i) * 2 * share_size,
+ .baseline = single_state_count * share_size + @as(u16, @intCast(i)) * 2 * share_size,
} else Table.Fse{
- .symbol = @intCast(u8, symbol),
+ .symbol = @as(u8, @intCast(symbol)),
.bits = share_size_log,
- .baseline = (@intCast(u16, i) - double_state_count) * share_size,
+ .baseline = (@as(u16, @intCast(i)) - double_state_count) * share_size,
};
}
}
diff --git a/lib/std/compress/zstandard/decode/huffman.zig b/lib/std/compress/zstandard/decode/huffman.zig
index f5e977d0da..13fb1ac5f2 100644
--- a/lib/std/compress/zstandard/decode/huffman.zig
+++ b/lib/std/compress/zstandard/decode/huffman.zig
@@ -109,8 +109,8 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights
const weights_byte_count = (encoded_symbol_count + 1) / 2;
for (0..weights_byte_count) |i| {
const byte = try source.readByte();
- weights[2 * i] = @intCast(u4, byte >> 4);
- weights[2 * i + 1] = @intCast(u4, byte & 0xF);
+ weights[2 * i] = @as(u4, @intCast(byte >> 4));
+ weights[2 * i + 1] = @as(u4, @intCast(byte & 0xF));
}
return encoded_symbol_count + 1;
}
@@ -118,7 +118,7 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights
fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.PrefixedSymbol, weights: [256]u4) usize {
for (0..weight_sorted_prefixed_symbols.len) |i| {
weight_sorted_prefixed_symbols[i] = .{
- .symbol = @intCast(u8, i),
+ .symbol = @as(u8, @intCast(i)),
.weight = undefined,
.prefix = undefined,
};
@@ -167,7 +167,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm
weight_power_sum_big += (@as(u16, 1) << value) >> 1;
}
if (weight_power_sum_big >= 1 << 11) return error.MalformedHuffmanTree;
- const weight_power_sum = @intCast(u16, weight_power_sum_big);
+ const weight_power_sum = @as(u16, @intCast(weight_power_sum_big));
// advance to next power of two (even if weight_power_sum is a power of 2)
// TODO: is it valid to have weight_power_sum == 0?
@@ -179,7 +179,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm
const prefixed_symbol_count = assignSymbols(weight_sorted_prefixed_symbols[0..symbol_count], weights.*);
const tree = LiteralsSection.HuffmanTree{
.max_bit_count = max_number_of_bits,
- .symbol_count_minus_one = @intCast(u8, prefixed_symbol_count - 1),
+ .symbol_count_minus_one = @as(u8, @intCast(prefixed_symbol_count - 1)),
.nodes = weight_sorted_prefixed_symbols,
};
return tree;
diff --git a/lib/std/compress/zstandard/decompress.zig b/lib/std/compress/zstandard/decompress.zig
index a2ba59e688..bc977d1fba 100644
--- a/lib/std/compress/zstandard/decompress.zig
+++ b/lib/std/compress/zstandard/decompress.zig
@@ -260,7 +260,7 @@ pub fn decodeFrameArrayList(
/// Returns the frame checksum corresponding to the data fed into `hasher`
pub fn computeChecksum(hasher: *std.hash.XxHash64) u32 {
const hash = hasher.final();
- return @intCast(u32, hash & 0xFFFFFFFF);
+ return @as(u32, @intCast(hash & 0xFFFFFFFF));
}
const FrameError = error{
@@ -398,7 +398,7 @@ pub const FrameContext = struct {
const window_size = if (window_size_raw > window_size_max)
return error.WindowTooLarge
else
- @intCast(usize, window_size_raw);
+ @as(usize, @intCast(window_size_raw));
const should_compute_checksum =
frame_header.descriptor.content_checksum_flag and verify_checksum;
@@ -585,7 +585,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 {
const exponent = (descriptor & 0b11111000) >> 3;
const mantissa = descriptor & 0b00000111;
const window_log = 10 + exponent;
- const window_base = @as(u64, 1) << @intCast(u6, window_log);
+ const window_base = @as(u64, 1) << @as(u6, @intCast(window_log));
const window_add = (window_base / 8) * mantissa;
return window_base + window_add;
} else return header.content_size;
@@ -599,7 +599,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 {
pub fn decodeZstandardHeader(
source: anytype,
) (@TypeOf(source).Error || error{ EndOfStream, ReservedBitSet })!ZstandardHeader {
- const descriptor = @bitCast(ZstandardHeader.Descriptor, try source.readByte());
+ const descriptor = @as(ZstandardHeader.Descriptor, @bitCast(try source.readByte()));
if (descriptor.reserved) return error.ReservedBitSet;
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index f5938dd218..7c3343ba8c 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -54,7 +54,7 @@ pub const Curve25519 = struct {
var swap: u8 = 0;
var pos: usize = bits - 1;
while (true) : (pos -= 1) {
- const bit = (s[pos >> 3] >> @truncate(u3, pos)) & 1;
+ const bit = (s[pos >> 3] >> @as(u3, @truncate(pos))) & 1;
swap ^= bit;
Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
swap = bit;
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 50f34c45f3..bf0c62f9de 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -162,8 +162,8 @@ pub const Edwards25519 = struct {
const reduced = if ((s[s.len - 1] & 0x80) == 0) s else scalar.reduce(s);
var e: [2 * 32]i8 = undefined;
for (reduced, 0..) |x, i| {
- e[i * 2 + 0] = @as(i8, @truncate(u4, x));
- e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+ e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+ e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
}
// Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
var carry: i8 = 0;
@@ -190,9 +190,9 @@ pub const Edwards25519 = struct {
while (true) : (pos -= 1) {
const slot = e[pos];
if (slot > 0) {
- q = q.add(pc[@intCast(usize, slot)]);
+ q = q.add(pc[@as(usize, @intCast(slot))]);
} else if (slot < 0) {
- q = q.sub(pc[@intCast(usize, -slot)]);
+ q = q.sub(pc[@as(usize, @intCast(-slot))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
@@ -206,7 +206,7 @@ pub const Edwards25519 = struct {
var q = Edwards25519.identityElement;
var pos: usize = 252;
while (true) : (pos -= 4) {
- const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+ const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
if (vartime) {
if (slot != 0) {
q = q.add(pc[slot]);
@@ -283,15 +283,15 @@ pub const Edwards25519 = struct {
while (true) : (pos -= 1) {
const slot1 = e1[pos];
if (slot1 > 0) {
- q = q.add(pc1[@intCast(usize, slot1)]);
+ q = q.add(pc1[@as(usize, @intCast(slot1))]);
} else if (slot1 < 0) {
- q = q.sub(pc1[@intCast(usize, -slot1)]);
+ q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
}
const slot2 = e2[pos];
if (slot2 > 0) {
- q = q.add(pc2[@intCast(usize, slot2)]);
+ q = q.add(pc2[@as(usize, @intCast(slot2))]);
} else if (slot2 < 0) {
- q = q.sub(pc2[@intCast(usize, -slot2)]);
+ q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
@@ -326,9 +326,9 @@ pub const Edwards25519 = struct {
for (es, 0..) |e, i| {
const slot = e[pos];
if (slot > 0) {
- q = q.add(pcs[i][@intCast(usize, slot)]);
+ q = q.add(pcs[i][@as(usize, @intCast(slot))]);
} else if (slot < 0) {
- q = q.sub(pcs[i][@intCast(usize, -slot)]);
+ q = q.sub(pcs[i][@as(usize, @intCast(-slot))]);
}
}
if (pos == 0) break;
@@ -427,7 +427,7 @@ pub const Edwards25519 = struct {
}
const empty_block = [_]u8{0} ** H.block_length;
var t = [3]u8{ 0, n * h_l, 0 };
- var xctx_len_u8 = [1]u8{@intCast(u8, xctx.len)};
+ var xctx_len_u8 = [1]u8{@as(u8, @intCast(xctx.len))};
var st = H.init(.{});
st.update(empty_block[0..]);
st.update(s);
diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig
index eec83f3d2e..627df9d4cb 100644
--- a/lib/std/crypto/25519/field.zig
+++ b/lib/std/crypto/25519/field.zig
@@ -254,11 +254,11 @@ pub const Fe = struct {
var rs: [5]u64 = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
- rs[i] = @truncate(u64, r[i]) & MASK51;
- r[i + 1] += @intCast(u64, r[i] >> 51);
+ rs[i] = @as(u64, @truncate(r[i])) & MASK51;
+ r[i + 1] += @as(u64, @intCast(r[i] >> 51));
}
- rs[4] = @truncate(u64, r[4]) & MASK51;
- var carry = @intCast(u64, r[4] >> 51);
+ rs[4] = @as(u64, @truncate(r[4])) & MASK51;
+ var carry = @as(u64, @intCast(r[4] >> 51));
rs[0] += 19 * carry;
carry = rs[0] >> 51;
rs[0] &= MASK51;
@@ -278,8 +278,8 @@ pub const Fe = struct {
var r: [5]u128 = undefined;
comptime var i = 0;
inline while (i < 5) : (i += 1) {
- ax[i] = @intCast(u128, a.limbs[i]);
- bx[i] = @intCast(u128, b.limbs[i]);
+ ax[i] = @as(u128, @intCast(a.limbs[i]));
+ bx[i] = @as(u128, @intCast(b.limbs[i]));
}
i = 1;
inline while (i < 5) : (i += 1) {
@@ -299,7 +299,7 @@ pub const Fe = struct {
var r: [5]u128 = undefined;
comptime var i = 0;
inline while (i < 5) : (i += 1) {
- ax[i] = @intCast(u128, a.limbs[i]);
+ ax[i] = @as(u128, @intCast(a.limbs[i]));
}
const a0_2 = 2 * ax[0];
const a1_2 = 2 * ax[1];
@@ -334,15 +334,15 @@ pub const Fe = struct {
/// Multiply a field element with a small (32-bit) integer
pub inline fn mul32(a: Fe, comptime n: u32) Fe {
- const sn = @intCast(u128, n);
+ const sn = @as(u128, @intCast(n));
var fe: Fe = undefined;
var x: u128 = 0;
comptime var i = 0;
inline while (i < 5) : (i += 1) {
x = a.limbs[i] * sn + (x >> 51);
- fe.limbs[i] = @truncate(u64, x) & MASK51;
+ fe.limbs[i] = @as(u64, @truncate(x)) & MASK51;
}
- fe.limbs[0] += @intCast(u64, x >> 51) * 19;
+ fe.limbs[0] += @as(u64, @intCast(x >> 51)) * 19;
return fe;
}
@@ -402,7 +402,7 @@ pub const Fe = struct {
const t2 = t.sqn(30).mul(t);
const t3 = t2.sqn(60).mul(t2);
const t4 = t3.sqn(120).mul(t3).sqn(10).mul(u).sqn(3).mul(_11).sq();
- return @bitCast(bool, @truncate(u1, ~(t4.toBytes()[1] & 1)));
+ return @as(bool, @bitCast(@as(u1, @truncate(~(t4.toBytes()[1] & 1)))));
}
fn uncheckedSqrt(x2: Fe) Fe {
diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig
index fd6d42aebe..1699c68e12 100644
--- a/lib/std/crypto/25519/scalar.zig
+++ b/lib/std/crypto/25519/scalar.zig
@@ -27,8 +27,8 @@ pub fn rejectNonCanonical(s: CompressedScalar) NonCanonicalError!void {
while (true) : (i -= 1) {
const xs = @as(u16, s[i]);
const xfield_order_s = @as(u16, field_order_s[i]);
- c |= @intCast(u8, ((xs -% xfield_order_s) >> 8) & n);
- n &= @intCast(u8, ((xs ^ xfield_order_s) -% 1) >> 8);
+ c |= @as(u8, @intCast(((xs -% xfield_order_s) >> 8) & n));
+ n &= @as(u8, @intCast(((xs ^ xfield_order_s) -% 1) >> 8));
if (i == 0) break;
}
if (c == 0) {
@@ -89,7 +89,7 @@ pub fn neg(s: CompressedScalar) CompressedScalar {
var i: usize = 0;
while (i < 64) : (i += 1) {
carry = @as(u32, fs[i]) -% sx[i] -% @as(u32, carry);
- sx[i] = @truncate(u8, carry);
+ sx[i] = @as(u8, @truncate(carry));
carry = (carry >> 8) & 1;
}
return reduce64(sx);
@@ -129,7 +129,7 @@ pub const Scalar = struct {
while (i < 4) : (i += 1) {
mem.writeIntLittle(u64, bytes[i * 7 ..][0..8], expanded.limbs[i]);
}
- mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @intCast(u32, expanded.limbs[i]));
+ mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @as(u32, @intCast(expanded.limbs[i])));
return bytes;
}
@@ -234,42 +234,42 @@ pub const Scalar = struct {
const z80 = xy440;
const carry0 = z00 >> 56;
- const t10 = @truncate(u64, z00) & 0xffffffffffffff;
+ const t10 = @as(u64, @truncate(z00)) & 0xffffffffffffff;
const c00 = carry0;
const t00 = t10;
const carry1 = (z10 + c00) >> 56;
- const t11 = @truncate(u64, (z10 + c00)) & 0xffffffffffffff;
+ const t11 = @as(u64, @truncate((z10 + c00))) & 0xffffffffffffff;
const c10 = carry1;
const t12 = t11;
const carry2 = (z20 + c10) >> 56;
- const t13 = @truncate(u64, (z20 + c10)) & 0xffffffffffffff;
+ const t13 = @as(u64, @truncate((z20 + c10))) & 0xffffffffffffff;
const c20 = carry2;
const t20 = t13;
const carry3 = (z30 + c20) >> 56;
- const t14 = @truncate(u64, (z30 + c20)) & 0xffffffffffffff;
+ const t14 = @as(u64, @truncate((z30 + c20))) & 0xffffffffffffff;
const c30 = carry3;
const t30 = t14;
const carry4 = (z40 + c30) >> 56;
- const t15 = @truncate(u64, (z40 + c30)) & 0xffffffffffffff;
+ const t15 = @as(u64, @truncate((z40 + c30))) & 0xffffffffffffff;
const c40 = carry4;
const t40 = t15;
const carry5 = (z50 + c40) >> 56;
- const t16 = @truncate(u64, (z50 + c40)) & 0xffffffffffffff;
+ const t16 = @as(u64, @truncate((z50 + c40))) & 0xffffffffffffff;
const c50 = carry5;
const t50 = t16;
const carry6 = (z60 + c50) >> 56;
- const t17 = @truncate(u64, (z60 + c50)) & 0xffffffffffffff;
+ const t17 = @as(u64, @truncate((z60 + c50))) & 0xffffffffffffff;
const c60 = carry6;
const t60 = t17;
const carry7 = (z70 + c60) >> 56;
- const t18 = @truncate(u64, (z70 + c60)) & 0xffffffffffffff;
+ const t18 = @as(u64, @truncate((z70 + c60))) & 0xffffffffffffff;
const c70 = carry7;
const t70 = t18;
const carry8 = (z80 + c70) >> 56;
- const t19 = @truncate(u64, (z80 + c70)) & 0xffffffffffffff;
+ const t19 = @as(u64, @truncate((z80 + c70))) & 0xffffffffffffff;
const c80 = carry8;
const t80 = t19;
- const t90 = (@truncate(u64, c80));
+ const t90 = (@as(u64, @truncate(c80)));
const r0 = t00;
const r1 = t12;
const r2 = t20;
@@ -356,26 +356,26 @@ pub const Scalar = struct {
const carry12 = (z32 + c21) >> 56;
const c31 = carry12;
const carry13 = (z42 + c31) >> 56;
- const t24 = @truncate(u64, z42 + c31) & 0xffffffffffffff;
+ const t24 = @as(u64, @truncate(z42 + c31)) & 0xffffffffffffff;
const c41 = carry13;
const t41 = t24;
const carry14 = (z5 + c41) >> 56;
- const t25 = @truncate(u64, z5 + c41) & 0xffffffffffffff;
+ const t25 = @as(u64, @truncate(z5 + c41)) & 0xffffffffffffff;
const c5 = carry14;
const t5 = t25;
const carry15 = (z6 + c5) >> 56;
- const t26 = @truncate(u64, z6 + c5) & 0xffffffffffffff;
+ const t26 = @as(u64, @truncate(z6 + c5)) & 0xffffffffffffff;
const c6 = carry15;
const t6 = t26;
const carry16 = (z7 + c6) >> 56;
- const t27 = @truncate(u64, z7 + c6) & 0xffffffffffffff;
+ const t27 = @as(u64, @truncate(z7 + c6)) & 0xffffffffffffff;
const c7 = carry16;
const t7 = t27;
const carry17 = (z8 + c7) >> 56;
- const t28 = @truncate(u64, z8 + c7) & 0xffffffffffffff;
+ const t28 = @as(u64, @truncate(z8 + c7)) & 0xffffffffffffff;
const c8 = carry17;
const t8 = t28;
- const t9 = @truncate(u64, c8);
+ const t9 = @as(u64, @truncate(c8));
const qmu4_ = t41;
const qmu5_ = t5;
@@ -425,22 +425,22 @@ pub const Scalar = struct {
const xy31 = @as(u128, qdiv3) * @as(u128, m1);
const xy40 = @as(u128, qdiv4) * @as(u128, m0);
const carry18 = xy00 >> 56;
- const t29 = @truncate(u64, xy00) & 0xffffffffffffff;
+ const t29 = @as(u64, @truncate(xy00)) & 0xffffffffffffff;
const c0 = carry18;
const t01 = t29;
const carry19 = (xy01 + xy10 + c0) >> 56;
- const t31 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff;
+ const t31 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff;
const c12 = carry19;
const t110 = t31;
const carry20 = (xy02 + xy11 + xy20 + c12) >> 56;
- const t32 = @truncate(u64, xy02 + xy11 + xy20 + c12) & 0xffffffffffffff;
+ const t32 = @as(u64, @truncate(xy02 + xy11 + xy20 + c12)) & 0xffffffffffffff;
const c22 = carry20;
const t210 = t32;
const carry = (xy03 + xy12 + xy21 + xy30 + c22) >> 56;
- const t33 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c22) & 0xffffffffffffff;
+ const t33 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c22)) & 0xffffffffffffff;
const c32 = carry;
const t34 = t33;
- const t42 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c32) & 0xffffffffff;
+ const t42 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c32)) & 0xffffffffff;
const qmul0 = t01;
const qmul1 = t110;
@@ -498,7 +498,7 @@ pub const Scalar = struct {
const t = ((b << 56) + s4) -% (y41 + b3);
const b4 = b;
const t4 = t;
- const mask = (b4 -% @intCast(u64, ((1))));
+ const mask = (b4 -% @as(u64, @intCast(((1)))));
const z04 = s0 ^ (mask & (s0 ^ t0));
const z14 = s1 ^ (mask & (s1 ^ t1));
const z24 = s2 ^ (mask & (s2 ^ t2));
@@ -691,26 +691,26 @@ const ScalarDouble = struct {
const carry3 = (z31 + c20) >> 56;
const c30 = carry3;
const carry4 = (z41 + c30) >> 56;
- const t103 = @as(u64, @truncate(u64, z41 + c30)) & 0xffffffffffffff;
+ const t103 = @as(u64, @as(u64, @truncate(z41 + c30))) & 0xffffffffffffff;
const c40 = carry4;
const t410 = t103;
const carry5 = (z5 + c40) >> 56;
- const t104 = @as(u64, @truncate(u64, z5 + c40)) & 0xffffffffffffff;
+ const t104 = @as(u64, @as(u64, @truncate(z5 + c40))) & 0xffffffffffffff;
const c5 = carry5;
const t51 = t104;
const carry6 = (z6 + c5) >> 56;
- const t105 = @as(u64, @truncate(u64, z6 + c5)) & 0xffffffffffffff;
+ const t105 = @as(u64, @as(u64, @truncate(z6 + c5))) & 0xffffffffffffff;
const c6 = carry6;
const t61 = t105;
const carry7 = (z7 + c6) >> 56;
- const t106 = @as(u64, @truncate(u64, z7 + c6)) & 0xffffffffffffff;
+ const t106 = @as(u64, @as(u64, @truncate(z7 + c6))) & 0xffffffffffffff;
const c7 = carry7;
const t71 = t106;
const carry8 = (z8 + c7) >> 56;
- const t107 = @as(u64, @truncate(u64, z8 + c7)) & 0xffffffffffffff;
+ const t107 = @as(u64, @as(u64, @truncate(z8 + c7))) & 0xffffffffffffff;
const c8 = carry8;
const t81 = t107;
- const t91 = @as(u64, @truncate(u64, c8));
+ const t91 = @as(u64, @as(u64, @truncate(c8)));
const qmu4_ = t410;
const qmu5_ = t51;
@@ -760,22 +760,22 @@ const ScalarDouble = struct {
const xy31 = @as(u128, qdiv3) * @as(u128, m1);
const xy40 = @as(u128, qdiv4) * @as(u128, m0);
const carry9 = xy00 >> 56;
- const t108 = @truncate(u64, xy00) & 0xffffffffffffff;
+ const t108 = @as(u64, @truncate(xy00)) & 0xffffffffffffff;
const c0 = carry9;
const t010 = t108;
const carry10 = (xy01 + xy10 + c0) >> 56;
- const t109 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff;
+ const t109 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff;
const c11 = carry10;
const t110 = t109;
const carry11 = (xy02 + xy11 + xy20 + c11) >> 56;
- const t1010 = @truncate(u64, xy02 + xy11 + xy20 + c11) & 0xffffffffffffff;
+ const t1010 = @as(u64, @truncate(xy02 + xy11 + xy20 + c11)) & 0xffffffffffffff;
const c21 = carry11;
const t210 = t1010;
const carry = (xy03 + xy12 + xy21 + xy30 + c21) >> 56;
- const t1011 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c21) & 0xffffffffffffff;
+ const t1011 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c21)) & 0xffffffffffffff;
const c31 = carry;
const t310 = t1011;
- const t411 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c31) & 0xffffffffff;
+ const t411 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c31)) & 0xffffffffff;
const qmul0 = t010;
const qmul1 = t110;
diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig
index 51eb97ab32..a4f0ff604b 100644
--- a/lib/std/crypto/Certificate.zig
+++ b/lib/std/crypto/Certificate.zig
@@ -312,7 +312,7 @@ pub const Parsed = struct {
while (name_i < general_names.slice.end) {
const general_name = try der.Element.parse(subject_alt_name, name_i);
name_i = general_name.slice.end;
- switch (@enumFromInt(GeneralNameTag, @intFromEnum(general_name.identifier.tag))) {
+ switch (@as(GeneralNameTag, @enumFromInt(@intFromEnum(general_name.identifier.tag)))) {
.dNSName => {
const dns_name = subject_alt_name[general_name.slice.start..general_name.slice.end];
if (checkHostName(host_name, dns_name)) return;
@@ -379,7 +379,7 @@ pub fn parse(cert: Certificate) ParseError!Parsed {
const tbs_certificate = try der.Element.parse(cert_bytes, certificate.slice.start);
const version_elem = try der.Element.parse(cert_bytes, tbs_certificate.slice.start);
const version = try parseVersion(cert_bytes, version_elem);
- const serial_number = if (@bitCast(u8, version_elem.identifier) == 0xa0)
+ const serial_number = if (@as(u8, @bitCast(version_elem.identifier)) == 0xa0)
try der.Element.parse(cert_bytes, version_elem.slice.end)
else
version_elem;
@@ -597,8 +597,8 @@ const Date = struct {
var month: u4 = 1;
while (month < date.month) : (month += 1) {
const days: u64 = std.time.epoch.getDaysInMonth(
- @enumFromInt(std.time.epoch.YearLeapKind, @intFromBool(is_leap)),
- @enumFromInt(std.time.epoch.Month, month),
+ @as(std.time.epoch.YearLeapKind, @enumFromInt(@intFromBool(is_leap))),
+ @as(std.time.epoch.Month, @enumFromInt(month)),
);
sec += days * std.time.epoch.secs_per_day;
}
@@ -685,7 +685,7 @@ fn parseEnum(comptime E: type, bytes: []const u8, element: der.Element) ParseEnu
pub const ParseVersionError = error{ UnsupportedCertificateVersion, CertificateFieldHasInvalidLength };
pub fn parseVersion(bytes: []const u8, version_elem: der.Element) ParseVersionError!Version {
- if (@bitCast(u8, version_elem.identifier) != 0xa0)
+ if (@as(u8, @bitCast(version_elem.identifier)) != 0xa0)
return .v1;
if (version_elem.slice.end - version_elem.slice.start != 3)
@@ -864,7 +864,7 @@ pub const der = struct {
pub fn parse(bytes: []const u8, index: u32) ParseElementError!Element {
var i = index;
- const identifier = @bitCast(Identifier, bytes[i]);
+ const identifier = @as(Identifier, @bitCast(bytes[i]));
i += 1;
const size_byte = bytes[i];
i += 1;
@@ -878,7 +878,7 @@ pub const der = struct {
};
}
- const len_size = @truncate(u7, size_byte);
+ const len_size = @as(u7, @truncate(size_byte));
if (len_size > @sizeOf(u32)) {
return error.CertificateFieldHasInvalidLength;
}
@@ -1042,10 +1042,10 @@ pub const rsa = struct {
var hashed: [Hash.digest_length]u8 = undefined;
while (idx < len) {
- c[0] = @intCast(u8, (counter >> 24) & 0xFF);
- c[1] = @intCast(u8, (counter >> 16) & 0xFF);
- c[2] = @intCast(u8, (counter >> 8) & 0xFF);
- c[3] = @intCast(u8, counter & 0xFF);
+ c[0] = @as(u8, @intCast((counter >> 24) & 0xFF));
+ c[1] = @as(u8, @intCast((counter >> 16) & 0xFF));
+ c[2] = @as(u8, @intCast((counter >> 8) & 0xFF));
+ c[3] = @as(u8, @intCast(counter & 0xFF));
std.mem.copyForwards(u8, hash[seed.len..], &c);
Hash.hash(&hash, &hashed, .{});
diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig
index 434de6e0a8..2a5555e301 100644
--- a/lib/std/crypto/Certificate/Bundle.zig
+++ b/lib/std/crypto/Certificate/Bundle.zig
@@ -131,7 +131,7 @@ pub fn rescanWindows(cb: *Bundle, gpa: Allocator) RescanWindowsError!void {
var ctx = w.crypt32.CertEnumCertificatesInStore(store, null);
while (ctx) |context| : (ctx = w.crypt32.CertEnumCertificatesInStore(store, ctx)) {
- const decoded_start = @intCast(u32, cb.bytes.items.len);
+ const decoded_start = @as(u32, @intCast(cb.bytes.items.len));
const encoded_cert = context.pbCertEncoded[0..context.cbCertEncoded];
try cb.bytes.appendSlice(gpa, encoded_cert);
try cb.parseCert(gpa, decoded_start, now_sec);
@@ -213,7 +213,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
const needed_capacity = std.math.cast(u32, decoded_size_upper_bound + size) orelse
return error.CertificateAuthorityBundleTooBig;
try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity);
- const end_reserved = @intCast(u32, cb.bytes.items.len + decoded_size_upper_bound);
+ const end_reserved = @as(u32, @intCast(cb.bytes.items.len + decoded_size_upper_bound));
const buffer = cb.bytes.allocatedSlice()[end_reserved..];
const end_index = try file.readAll(buffer);
const encoded_bytes = buffer[0..end_index];
@@ -230,7 +230,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
return error.MissingEndCertificateMarker;
start_index = cert_end + end_marker.len;
const encoded_cert = mem.trim(u8, encoded_bytes[cert_start..cert_end], " \t\r\n");
- const decoded_start = @intCast(u32, cb.bytes.items.len);
+ const decoded_start = @as(u32, @intCast(cb.bytes.items.len));
const dest_buf = cb.bytes.allocatedSlice()[decoded_start..];
cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert);
try cb.parseCert(gpa, decoded_start, now_sec);
diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig
index bd7100eb46..028275a06b 100644
--- a/lib/std/crypto/Certificate/Bundle/macos.zig
+++ b/lib/std/crypto/Certificate/Bundle/macos.zig
@@ -21,7 +21,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
const reader = stream.reader();
const db_header = try reader.readStructBig(ApplDbHeader);
- assert(mem.eql(u8, "kych", &@bitCast([4]u8, db_header.signature)));
+ assert(mem.eql(u8, "kych", &@as([4]u8, @bitCast(db_header.signature))));
try stream.seekTo(db_header.schema_offset);
@@ -42,7 +42,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
const table_header = try reader.readStructBig(TableHeader);
- if (@enumFromInt(std.os.darwin.cssm.DB_RECORDTYPE, table_header.table_id) != .X509_CERTIFICATE) {
+ if (@as(std.os.darwin.cssm.DB_RECORDTYPE, @enumFromInt(table_header.table_id)) != .X509_CERTIFICATE) {
continue;
}
@@ -61,7 +61,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
try cb.bytes.ensureUnusedCapacity(gpa, cert_header.cert_size);
- const cert_start = @intCast(u32, cb.bytes.items.len);
+ const cert_start = @as(u32, @intCast(cb.bytes.items.len));
const dest_buf = cb.bytes.allocatedSlice()[cert_start..];
cb.bytes.items.len += try reader.readAtLeast(dest_buf, cert_header.cert_size);
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index 9709a3a958..75633f7c69 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -625,7 +625,7 @@ test "Aegis MAC" {
const key = [_]u8{0x00} ** Aegis128LMac.key_length;
var msg: [64]u8 = undefined;
for (&msg, 0..) |*m, i| {
- m.* = @truncate(u8, i);
+ m.* = @as(u8, @truncate(i));
}
const st_init = Aegis128LMac.init(&key);
var st = st_init;
diff --git a/lib/std/crypto/aes/soft.zig b/lib/std/crypto/aes/soft.zig
index 4c2a8ff80d..0b15555ad0 100644
--- a/lib/std/crypto/aes/soft.zig
+++ b/lib/std/crypto/aes/soft.zig
@@ -51,13 +51,13 @@ pub const Block = struct {
const s3 = block.repr[3];
var x: [4]u32 = undefined;
- x = table_lookup(&table_encrypt, @truncate(u8, s0), @truncate(u8, s1 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 24));
+ x = table_lookup(&table_encrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 24)));
var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
- x = table_lookup(&table_encrypt, @truncate(u8, s1), @truncate(u8, s2 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 24));
+ x = table_lookup(&table_encrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 24)));
var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
- x = table_lookup(&table_encrypt, @truncate(u8, s2), @truncate(u8, s3 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 24));
+ x = table_lookup(&table_encrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 24)));
var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
- x = table_lookup(&table_encrypt, @truncate(u8, s3), @truncate(u8, s0 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 24));
+ x = table_lookup(&table_encrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 24)));
var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
t0 ^= round_key.repr[0];
@@ -77,31 +77,31 @@ pub const Block = struct {
var x: [4]u32 = undefined;
x = .{
- table_encrypt[0][@truncate(u8, s0)],
- table_encrypt[1][@truncate(u8, s1 >> 8)],
- table_encrypt[2][@truncate(u8, s2 >> 16)],
- table_encrypt[3][@truncate(u8, s3 >> 24)],
+ table_encrypt[0][@as(u8, @truncate(s0))],
+ table_encrypt[1][@as(u8, @truncate(s1 >> 8))],
+ table_encrypt[2][@as(u8, @truncate(s2 >> 16))],
+ table_encrypt[3][@as(u8, @truncate(s3 >> 24))],
};
var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
x = .{
- table_encrypt[0][@truncate(u8, s1)],
- table_encrypt[1][@truncate(u8, s2 >> 8)],
- table_encrypt[2][@truncate(u8, s3 >> 16)],
- table_encrypt[3][@truncate(u8, s0 >> 24)],
+ table_encrypt[0][@as(u8, @truncate(s1))],
+ table_encrypt[1][@as(u8, @truncate(s2 >> 8))],
+ table_encrypt[2][@as(u8, @truncate(s3 >> 16))],
+ table_encrypt[3][@as(u8, @truncate(s0 >> 24))],
};
var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
x = .{
- table_encrypt[0][@truncate(u8, s2)],
- table_encrypt[1][@truncate(u8, s3 >> 8)],
- table_encrypt[2][@truncate(u8, s0 >> 16)],
- table_encrypt[3][@truncate(u8, s1 >> 24)],
+ table_encrypt[0][@as(u8, @truncate(s2))],
+ table_encrypt[1][@as(u8, @truncate(s3 >> 8))],
+ table_encrypt[2][@as(u8, @truncate(s0 >> 16))],
+ table_encrypt[3][@as(u8, @truncate(s1 >> 24))],
};
var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
x = .{
- table_encrypt[0][@truncate(u8, s3)],
- table_encrypt[1][@truncate(u8, s0 >> 8)],
- table_encrypt[2][@truncate(u8, s1 >> 16)],
- table_encrypt[3][@truncate(u8, s2 >> 24)],
+ table_encrypt[0][@as(u8, @truncate(s3))],
+ table_encrypt[1][@as(u8, @truncate(s0 >> 8))],
+ table_encrypt[2][@as(u8, @truncate(s1 >> 16))],
+ table_encrypt[3][@as(u8, @truncate(s2 >> 24))],
};
var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
@@ -122,13 +122,13 @@ pub const Block = struct {
// Last round uses s-box directly and XORs to produce output.
var x: [4]u8 = undefined;
- x = sbox_lookup(&sbox_encrypt, @truncate(u8, s3 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s0));
+ x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0)));
var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- x = sbox_lookup(&sbox_encrypt, @truncate(u8, s0 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s1));
+ x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1)));
var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- x = sbox_lookup(&sbox_encrypt, @truncate(u8, s1 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s2));
+ x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2)));
var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- x = sbox_lookup(&sbox_encrypt, @truncate(u8, s2 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s3));
+ x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3)));
var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
t0 ^= round_key.repr[0];
@@ -147,13 +147,13 @@ pub const Block = struct {
const s3 = block.repr[3];
var x: [4]u32 = undefined;
- x = table_lookup(&table_decrypt, @truncate(u8, s0), @truncate(u8, s3 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 24));
+ x = table_lookup(&table_decrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 24)));
var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
- x = table_lookup(&table_decrypt, @truncate(u8, s1), @truncate(u8, s0 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 24));
+ x = table_lookup(&table_decrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 24)));
var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
- x = table_lookup(&table_decrypt, @truncate(u8, s2), @truncate(u8, s1 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 24));
+ x = table_lookup(&table_decrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 24)));
var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
- x = table_lookup(&table_decrypt, @truncate(u8, s3), @truncate(u8, s2 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 24));
+ x = table_lookup(&table_decrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 24)));
var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
t0 ^= round_key.repr[0];
@@ -173,31 +173,31 @@ pub const Block = struct {
var x: [4]u32 = undefined;
x = .{
- table_decrypt[0][@truncate(u8, s0)],
- table_decrypt[1][@truncate(u8, s3 >> 8)],
- table_decrypt[2][@truncate(u8, s2 >> 16)],
- table_decrypt[3][@truncate(u8, s1 >> 24)],
+ table_decrypt[0][@as(u8, @truncate(s0))],
+ table_decrypt[1][@as(u8, @truncate(s3 >> 8))],
+ table_decrypt[2][@as(u8, @truncate(s2 >> 16))],
+ table_decrypt[3][@as(u8, @truncate(s1 >> 24))],
};
var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
x = .{
- table_decrypt[0][@truncate(u8, s1)],
- table_decrypt[1][@truncate(u8, s0 >> 8)],
- table_decrypt[2][@truncate(u8, s3 >> 16)],
- table_decrypt[3][@truncate(u8, s2 >> 24)],
+ table_decrypt[0][@as(u8, @truncate(s1))],
+ table_decrypt[1][@as(u8, @truncate(s0 >> 8))],
+ table_decrypt[2][@as(u8, @truncate(s3 >> 16))],
+ table_decrypt[3][@as(u8, @truncate(s2 >> 24))],
};
var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
x = .{
- table_decrypt[0][@truncate(u8, s2)],
- table_decrypt[1][@truncate(u8, s1 >> 8)],
- table_decrypt[2][@truncate(u8, s0 >> 16)],
- table_decrypt[3][@truncate(u8, s3 >> 24)],
+ table_decrypt[0][@as(u8, @truncate(s2))],
+ table_decrypt[1][@as(u8, @truncate(s1 >> 8))],
+ table_decrypt[2][@as(u8, @truncate(s0 >> 16))],
+ table_decrypt[3][@as(u8, @truncate(s3 >> 24))],
};
var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
x = .{
- table_decrypt[0][@truncate(u8, s3)],
- table_decrypt[1][@truncate(u8, s2 >> 8)],
- table_decrypt[2][@truncate(u8, s1 >> 16)],
- table_decrypt[3][@truncate(u8, s0 >> 24)],
+ table_decrypt[0][@as(u8, @truncate(s3))],
+ table_decrypt[1][@as(u8, @truncate(s2 >> 8))],
+ table_decrypt[2][@as(u8, @truncate(s1 >> 16))],
+ table_decrypt[3][@as(u8, @truncate(s0 >> 24))],
};
var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
@@ -218,13 +218,13 @@ pub const Block = struct {
// Last round uses s-box directly and XORs to produce output.
var x: [4]u8 = undefined;
- x = sbox_lookup(&sbox_decrypt, @truncate(u8, s1 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s0));
+ x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0)));
var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- x = sbox_lookup(&sbox_decrypt, @truncate(u8, s2 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s1));
+ x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1)));
var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- x = sbox_lookup(&sbox_decrypt, @truncate(u8, s3 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s2));
+ x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2)));
var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
- x = sbox_lookup(&sbox_decrypt, @truncate(u8, s0 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s3));
+ x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3)));
var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
t0 ^= round_key.repr[0];
@@ -348,7 +348,7 @@ fn KeySchedule(comptime Aes: type) type {
const subw = struct {
// Apply sbox_encrypt to each byte in w.
fn func(w: u32) u32 {
- const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, w), @truncate(u8, w >> 8), @truncate(u8, w >> 16), @truncate(u8, w >> 24));
+ const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(w)), @as(u8, @truncate(w >> 8)), @as(u8, @truncate(w >> 16)), @as(u8, @truncate(w >> 24)));
return @as(u32, x[3]) << 24 | @as(u32, x[2]) << 16 | @as(u32, x[1]) << 8 | @as(u32, x[0]);
}
}.func;
@@ -386,7 +386,7 @@ fn KeySchedule(comptime Aes: type) type {
inline while (j < 4) : (j += 1) {
var rk = round_keys[(ei + j) / 4].repr[(ei + j) % 4];
if (i > 0 and i + 4 < total_words) {
- const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, rk >> 24), @truncate(u8, rk >> 16), @truncate(u8, rk >> 8), @truncate(u8, rk));
+ const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(rk >> 24)), @as(u8, @truncate(rk >> 16)), @as(u8, @truncate(rk >> 8)), @as(u8, @truncate(rk)));
const y = table_lookup(&table_decrypt, x[3], x[2], x[1], x[0]);
rk = y[0] ^ y[1] ^ y[2] ^ y[3];
}
@@ -664,7 +664,7 @@ fn mul(a: u8, b: u8) u8 {
}
}
- return @truncate(u8, s);
+ return @as(u8, @truncate(s));
}
const cache_line_bytes = 64;
diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig
index 6d5ce3779a..a05e8a7248 100644
--- a/lib/std/crypto/aes_ocb.zig
+++ b/lib/std/crypto/aes_ocb.zig
@@ -86,18 +86,18 @@ fn AesOcb(comptime Aes: anytype) type {
fn getOffset(aes_enc_ctx: EncryptCtx, npub: [nonce_length]u8) Block {
var nx = [_]u8{0} ** 16;
- nx[0] = @intCast(u8, @truncate(u7, tag_length * 8) << 1);
+ nx[0] = @as(u8, @intCast(@as(u7, @truncate(tag_length * 8)) << 1));
nx[16 - nonce_length - 1] = 1;
nx[nx.len - nonce_length ..].* = npub;
- const bottom = @truncate(u6, nx[15]);
+ const bottom = @as(u6, @truncate(nx[15]));
nx[15] &= 0xc0;
var ktop_: Block = undefined;
aes_enc_ctx.encrypt(&ktop_, &nx);
const ktop = mem.readIntBig(u128, &ktop_);
- var stretch = (@as(u192, ktop) << 64) | @as(u192, @truncate(u64, ktop >> 64) ^ @truncate(u64, ktop >> 56));
+ var stretch = (@as(u192, ktop) << 64) | @as(u192, @as(u64, @truncate(ktop >> 64)) ^ @as(u64, @truncate(ktop >> 56)));
var offset: Block = undefined;
- mem.writeIntBig(u128, &offset, @truncate(u128, stretch >> (64 - @as(u7, bottom))));
+ mem.writeIntBig(u128, &offset, @as(u128, @truncate(stretch >> (64 - @as(u7, bottom)))));
return offset;
}
diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig
index 40df3290c0..898bc24e6f 100644
--- a/lib/std/crypto/argon2.zig
+++ b/lib/std/crypto/argon2.zig
@@ -95,7 +95,7 @@ pub const Params = struct {
pub fn fromLimits(ops_limit: u32, mem_limit: usize) Self {
const m = mem_limit / 1024;
std.debug.assert(m <= max_int);
- return .{ .t = ops_limit, .m = @intCast(u32, m), .p = 1 };
+ return .{ .t = ops_limit, .m = @as(u32, @intCast(m)), .p = 1 };
}
};
@@ -111,26 +111,26 @@ fn initHash(
var tmp: [4]u8 = undefined;
var b2 = Blake2b512.init(.{});
mem.writeIntLittle(u32, parameters[0..4], params.p);
- mem.writeIntLittle(u32, parameters[4..8], @intCast(u32, dk_len));
+ mem.writeIntLittle(u32, parameters[4..8], @as(u32, @intCast(dk_len)));
mem.writeIntLittle(u32, parameters[8..12], params.m);
mem.writeIntLittle(u32, parameters[12..16], params.t);
mem.writeIntLittle(u32, parameters[16..20], version);
mem.writeIntLittle(u32, parameters[20..24], @intFromEnum(mode));
b2.update(&parameters);
- mem.writeIntLittle(u32, &tmp, @intCast(u32, password.len));
+ mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(password.len)));
b2.update(&tmp);
b2.update(password);
- mem.writeIntLittle(u32, &tmp, @intCast(u32, salt.len));
+ mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(salt.len)));
b2.update(&tmp);
b2.update(salt);
const secret = params.secret orelse "";
std.debug.assert(secret.len <= max_int);
- mem.writeIntLittle(u32, &tmp, @intCast(u32, secret.len));
+ mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(secret.len)));
b2.update(&tmp);
b2.update(secret);
const ad = params.ad orelse "";
std.debug.assert(ad.len <= max_int);
- mem.writeIntLittle(u32, &tmp, @intCast(u32, ad.len));
+ mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(ad.len)));
b2.update(&tmp);
b2.update(ad);
b2.final(h0[0..Blake2b512.digest_length]);
@@ -140,7 +140,7 @@ fn initHash(
fn blake2bLong(out: []u8, in: []const u8) void {
const H = Blake2b512;
var outlen_bytes: [4]u8 = undefined;
- mem.writeIntLittle(u32, &outlen_bytes, @intCast(u32, out.len));
+ mem.writeIntLittle(u32, &outlen_bytes, @as(u32, @intCast(out.len)));
var out_buf: [H.digest_length]u8 = undefined;
@@ -391,7 +391,7 @@ fn Rp(a: usize, b: usize, c: usize, d: usize) QuarterRound {
}
fn fBlaMka(x: u64, y: u64) u64 {
- const xy = @as(u64, @truncate(u32, x)) * @as(u64, @truncate(u32, y));
+ const xy = @as(u64, @as(u32, @truncate(x))) * @as(u64, @as(u32, @truncate(y)));
return x +% y +% 2 *% xy;
}
@@ -448,7 +448,7 @@ fn indexAlpha(
lane: u24,
index: u32,
) u32 {
- var ref_lane = @intCast(u32, rand >> 32) % threads;
+ var ref_lane = @as(u32, @intCast(rand >> 32)) % threads;
if (n == 0 and slice == 0) {
ref_lane = lane;
}
@@ -467,10 +467,10 @@ fn indexAlpha(
if (index == 0 or lane == ref_lane) {
m -= 1;
}
- var p = @as(u64, @truncate(u32, rand));
+ var p = @as(u64, @as(u32, @truncate(rand)));
p = (p * p) >> 32;
p = (p * m) >> 32;
- return ref_lane * lanes + @intCast(u32, ((s + m - (p + 1)) % lanes));
+ return ref_lane * lanes + @as(u32, @intCast(((s + m - (p + 1)) % lanes)));
}
/// Derives a key from the password, salt, and argon2 parameters.
diff --git a/lib/std/crypto/ascon.zig b/lib/std/crypto/ascon.zig
index ae4bb57d29..8aa0b109f2 100644
--- a/lib/std/crypto/ascon.zig
+++ b/lib/std/crypto/ascon.zig
@@ -95,8 +95,8 @@ pub fn State(comptime endian: builtin.Endian) type {
/// XOR a byte into the state at a given offset.
pub fn addByte(self: *Self, byte: u8, offset: usize) void {
const z = switch (endian) {
- .Big => 64 - 8 - 8 * @truncate(u6, offset % 8),
- .Little => 8 * @truncate(u6, offset % 8),
+ .Big => 64 - 8 - 8 * @as(u6, @truncate(offset % 8)),
+ .Little => 8 * @as(u6, @truncate(offset % 8)),
};
self.st[offset / 8] ^= @as(u64, byte) << z;
}
diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig
index 7bd140d584..87d2eef79a 100644
--- a/lib/std/crypto/bcrypt.zig
+++ b/lib/std/crypto/bcrypt.zig
@@ -376,10 +376,10 @@ pub const State = struct {
const Halves = struct { l: u32, r: u32 };
fn halfRound(state: *const State, i: u32, j: u32, n: usize) u32 {
- var r = state.sboxes[0][@truncate(u8, j >> 24)];
- r +%= state.sboxes[1][@truncate(u8, j >> 16)];
- r ^= state.sboxes[2][@truncate(u8, j >> 8)];
- r +%= state.sboxes[3][@truncate(u8, j)];
+ var r = state.sboxes[0][@as(u8, @truncate(j >> 24))];
+ r +%= state.sboxes[1][@as(u8, @truncate(j >> 16))];
+ r ^= state.sboxes[2][@as(u8, @truncate(j >> 8))];
+ r +%= state.sboxes[3][@as(u8, @truncate(j))];
return i ^ r ^ state.subkeys[n];
}
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index f47c334ee9..17f11382ca 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -54,8 +54,8 @@ pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, bytes / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(bytes / elapsed_s));
return throughput;
}
@@ -95,8 +95,8 @@ pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 {
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, bytes / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(bytes / elapsed_s));
return throughput;
}
@@ -125,8 +125,8 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, exchange_count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(exchange_count / elapsed_s));
return throughput;
}
@@ -148,8 +148,8 @@ pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, signatures_count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s));
return throughput;
}
@@ -172,8 +172,8 @@ pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime sign
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, signatures_count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s));
return throughput;
}
@@ -201,8 +201,8 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = batch.len * @intFromFloat(u64, signatures_count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = batch.len * @as(u64, @intFromFloat(signatures_count / elapsed_s));
return throughput;
}
@@ -227,8 +227,8 @@ pub fn benchmarkKem(comptime Kem: anytype, comptime kems_count: comptime_int) !u
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, kems_count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s));
return throughput;
}
@@ -249,8 +249,8 @@ pub fn benchmarkKemDecaps(comptime Kem: anytype, comptime kems_count: comptime_i
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, kems_count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s));
return throughput;
}
@@ -267,8 +267,8 @@ pub fn benchmarkKemKeyGen(comptime Kem: anytype, comptime kems_count: comptime_i
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, kems_count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s));
return throughput;
}
@@ -309,8 +309,8 @@ pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64
mem.doNotOptimizeAway(&in);
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, 2 * bytes / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(2 * bytes / elapsed_s));
return throughput;
}
@@ -338,8 +338,8 @@ pub fn benchmarkAes(comptime Aes: anytype, comptime count: comptime_int) !u64 {
mem.doNotOptimizeAway(&in);
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(count / elapsed_s));
return throughput;
}
@@ -367,8 +367,8 @@ pub fn benchmarkAes8(comptime Aes: anytype, comptime count: comptime_int) !u64 {
mem.doNotOptimizeAway(&in);
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, 8 * count / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(8 * count / elapsed_s));
return throughput;
}
@@ -406,7 +406,7 @@ fn benchmarkPwhash(
const password = "testpass" ** 2;
const opts = .{
.allocator = allocator,
- .params = @ptrCast(*const ty.Params, @alignCast(std.meta.alignment(ty.Params), params)).*,
+ .params = @as(*const ty.Params, @ptrCast(@alignCast(params))).*,
.encoding = .phc,
};
var buf: [256]u8 = undefined;
@@ -422,7 +422,7 @@ fn benchmarkPwhash(
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
const throughput = elapsed_s / count;
return throughput;
diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig
index 316ea5e6b7..ba07226d08 100644
--- a/lib/std/crypto/blake2.zig
+++ b/lib/std/crypto/blake2.zig
@@ -80,7 +80,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
const key_len = if (options.key) |key| key.len else 0;
// default parameters
- d.h[0] ^= 0x01010000 ^ @truncate(u32, key_len << 8) ^ @intCast(u32, options.expected_out_bits >> 3);
+ d.h[0] ^= 0x01010000 ^ @as(u32, @truncate(key_len << 8)) ^ @as(u32, @intCast(options.expected_out_bits >> 3));
d.t = 0;
d.buf_len = 0;
@@ -127,7 +127,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
// Copy any remainder for next pass.
const b_slice = b[off..];
@memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
- d.buf_len += @intCast(u8, b_slice.len);
+ d.buf_len += @as(u8, @intCast(b_slice.len));
}
pub fn final(d: *Self, out: *[digest_length]u8) void {
@@ -135,7 +135,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
d.t += d.buf_len;
d.round(d.buf[0..], true);
for (&d.h) |*x| x.* = mem.nativeToLittle(u32, x.*);
- out.* = @ptrCast(*[digest_length]u8, &d.h).*;
+ out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*;
}
fn round(d: *Self, b: *const [64]u8, last: bool) void {
@@ -152,8 +152,8 @@ pub fn Blake2s(comptime out_bits: usize) type {
v[k + 8] = iv[k];
}
- v[12] ^= @truncate(u32, d.t);
- v[13] ^= @intCast(u32, d.t >> 32);
+ v[12] ^= @as(u32, @truncate(d.t));
+ v[13] ^= @as(u32, @intCast(d.t >> 32));
if (last) v[14] = ~v[14];
const rounds = comptime [_]RoundParam{
@@ -563,7 +563,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
// Copy any remainder for next pass.
const b_slice = b[off..];
@memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
- d.buf_len += @intCast(u8, b_slice.len);
+ d.buf_len += @as(u8, @intCast(b_slice.len));
}
pub fn final(d: *Self, out: *[digest_length]u8) void {
@@ -571,7 +571,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
d.t += d.buf_len;
d.round(d.buf[0..], true);
for (&d.h) |*x| x.* = mem.nativeToLittle(u64, x.*);
- out.* = @ptrCast(*[digest_length]u8, &d.h).*;
+ out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*;
}
fn round(d: *Self, b: *const [128]u8, last: bool) void {
@@ -588,8 +588,8 @@ pub fn Blake2b(comptime out_bits: usize) type {
v[k + 8] = iv[k];
}
- v[12] ^= @truncate(u64, d.t);
- v[13] ^= @intCast(u64, d.t >> 64);
+ v[12] ^= @as(u64, @truncate(d.t));
+ v[13] ^= @as(u64, @intCast(d.t >> 64));
if (last) v[14] = ~v[14];
const rounds = comptime [_]RoundParam{
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 7ad1511e79..fc1859b99d 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -89,7 +89,7 @@ const CompressVectorized = struct {
counter: u64,
flags: u8,
) [16]u32 {
- const md = Lane{ @truncate(u32, counter), @truncate(u32, counter >> 32), block_len, @as(u32, flags) };
+ const md = Lane{ @as(u32, @truncate(counter)), @as(u32, @truncate(counter >> 32)), block_len, @as(u32, flags) };
var rows = Rows{ chaining_value[0..4].*, chaining_value[4..8].*, IV[0..4].*, md };
var m = Rows{ block_words[0..4].*, block_words[4..8].*, block_words[8..12].*, block_words[12..16].* };
@@ -134,7 +134,7 @@ const CompressVectorized = struct {
rows[2] ^= @Vector(4, u32){ chaining_value[0], chaining_value[1], chaining_value[2], chaining_value[3] };
rows[3] ^= @Vector(4, u32){ chaining_value[4], chaining_value[5], chaining_value[6], chaining_value[7] };
- return @bitCast([16]u32, rows);
+ return @as([16]u32, @bitCast(rows));
}
};
@@ -184,8 +184,8 @@ const CompressGeneric = struct {
IV[1],
IV[2],
IV[3],
- @truncate(u32, counter),
- @truncate(u32, counter >> 32),
+ @as(u32, @truncate(counter)),
+ @as(u32, @truncate(counter >> 32)),
block_len,
flags,
};
@@ -206,7 +206,7 @@ else
CompressGeneric.compress;
fn first8Words(words: [16]u32) [8]u32 {
- return @ptrCast(*const [8]u32, &words).*;
+ return @as(*const [8]u32, @ptrCast(&words)).*;
}
fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 {
@@ -285,7 +285,7 @@ const ChunkState = struct {
const want = BLOCK_LEN - self.block_len;
const take = @min(want, input.len);
@memcpy(self.block[self.block_len..][0..take], input[0..take]);
- self.block_len += @truncate(u8, take);
+ self.block_len += @as(u8, @truncate(take));
return input[take..];
}
@@ -658,7 +658,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
// Setup input pattern
var input_pattern: [251]u8 = undefined;
- for (&input_pattern, 0..) |*e, i| e.* = @truncate(u8, i);
+ for (&input_pattern, 0..) |*e, i| e.* = @as(u8, @truncate(i));
// Write repeating input pattern to hasher
var input_counter = input_len;
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index 776387cbd9..6688fb97fa 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -587,8 +587,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type {
const k = keyToWords(key);
var c: [4]u32 = undefined;
- c[0] = @truncate(u32, counter);
- c[1] = @truncate(u32, counter >> 32);
+ c[0] = @as(u32, @truncate(counter));
+ c[1] = @as(u32, @truncate(counter >> 32));
c[2] = mem.readIntLittle(u32, nonce[0..4]);
c[3] = mem.readIntLittle(u32, nonce[4..8]);
ChaChaImpl(rounds_nb).chacha20Xor(out, in, k, c, true);
@@ -600,8 +600,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type {
const k = keyToWords(key);
var c: [4]u32 = undefined;
- c[0] = @truncate(u32, counter);
- c[1] = @truncate(u32, counter >> 32);
+ c[0] = @as(u32, @truncate(counter));
+ c[1] = @as(u32, @truncate(counter >> 32));
c[2] = mem.readIntLittle(u32, nonce[0..4]);
c[3] = mem.readIntLittle(u32, nonce[4..8]);
ChaChaImpl(rounds_nb).chacha20Stream(out, k, c, true);
diff --git a/lib/std/crypto/ecdsa.zig b/lib/std/crypto/ecdsa.zig
index e552af2e26..1a5335b07e 100644
--- a/lib/std/crypto/ecdsa.zig
+++ b/lib/std/crypto/ecdsa.zig
@@ -122,9 +122,9 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
pub fn toDer(self: Signature, buf: *[der_encoded_max_length]u8) []u8 {
var fb = io.fixedBufferStream(buf);
const w = fb.writer();
- const r_len = @intCast(u8, self.r.len + (self.r[0] >> 7));
- const s_len = @intCast(u8, self.s.len + (self.s[0] >> 7));
- const seq_len = @intCast(u8, 2 + r_len + 2 + s_len);
+ const r_len = @as(u8, @intCast(self.r.len + (self.r[0] >> 7)));
+ const s_len = @as(u8, @intCast(self.s.len + (self.s[0] >> 7)));
+ const seq_len = @as(u8, @intCast(2 + r_len + 2 + s_len));
w.writeAll(&[_]u8{ 0x30, seq_len }) catch unreachable;
w.writeAll(&[_]u8{ 0x02, r_len }) catch unreachable;
if (self.r[0] >> 7 != 0) {
diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig
index 7b298c71c2..0a99058b21 100644
--- a/lib/std/crypto/ff.zig
+++ b/lib/std/crypto/ff.zig
@@ -100,7 +100,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
var x = x_;
var out = Self.zero;
for (0..out.limbs.capacity()) |i| {
- const t = if (@bitSizeOf(T) > t_bits) @truncate(TLimb, x) else x;
+ const t = if (@bitSizeOf(T) > t_bits) @as(TLimb, @truncate(x)) else x;
out.limbs.set(i, t);
x = math.shr(T, x, t_bits);
}
@@ -143,9 +143,9 @@ pub fn Uint(comptime max_bits: comptime_int) type {
var remaining_bits = t_bits;
var limb = self.limbs.get(i);
while (remaining_bits >= 8) {
- bytes[out_i] |= math.shl(u8, @truncate(u8, limb), shift);
+ bytes[out_i] |= math.shl(u8, @as(u8, @truncate(limb)), shift);
const consumed = 8 - shift;
- limb >>= @truncate(u4, consumed);
+ limb >>= @as(u4, @truncate(consumed));
remaining_bits -= consumed;
shift = 0;
switch (endian) {
@@ -169,7 +169,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
},
}
}
- bytes[out_i] |= @truncate(u8, limb);
+ bytes[out_i] |= @as(u8, @truncate(limb));
shift = remaining_bits;
}
}
@@ -190,7 +190,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
shift += 8;
if (shift >= t_bits) {
shift -= t_bits;
- out.limbs.set(out_i, @truncate(TLimb, out.limbs.get(out_i)));
+ out.limbs.set(out_i, @as(TLimb, @truncate(out.limbs.get(out_i))));
const overflow = math.shr(Limb, bi, 8 - shift);
out_i += 1;
if (out_i >= out.limbs.len) {
@@ -242,7 +242,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
/// Returns `true` if the integer is odd.
pub fn isOdd(x: Self) bool {
- return @bitCast(bool, @truncate(u1, x.limbs.get(0)));
+ return @as(bool, @bitCast(@as(u1, @truncate(x.limbs.get(0)))));
}
/// Adds `y` to `x`, and returns `true` if the operation overflowed.
@@ -273,8 +273,8 @@ pub fn Uint(comptime max_bits: comptime_int) type {
var carry: u1 = 0;
for (0..x.limbs_count()) |i| {
const res = x_limbs[i] + y_limbs[i] + carry;
- x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]);
- carry = @truncate(u1, res >> t_bits);
+ x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]);
+ carry = @as(u1, @truncate(res >> t_bits));
}
return carry;
}
@@ -288,8 +288,8 @@ pub fn Uint(comptime max_bits: comptime_int) type {
var borrow: u1 = 0;
for (0..x.limbs_count()) |i| {
const res = x_limbs[i] -% y_limbs[i] -% borrow;
- x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]);
- borrow = @truncate(u1, res >> t_bits);
+ x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]);
+ borrow = @as(u1, @truncate(res >> t_bits));
}
return borrow;
}
@@ -432,7 +432,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
inline for (0..comptime math.log2_int(usize, t_bits)) |_| {
y = y *% (2 -% lo *% y);
}
- const m0inv = (@as(Limb, 1) << t_bits) - (@truncate(TLimb, y));
+ const m0inv = (@as(Limb, 1) << t_bits) - (@as(TLimb, @truncate(y)));
const zero = Fe{ .v = FeUint.zero };
@@ -508,18 +508,18 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
var need_sub = false;
var i: usize = t_bits - 1;
while (true) : (i -= 1) {
- var carry = @truncate(u1, math.shr(Limb, y, i));
+ var carry = @as(u1, @truncate(math.shr(Limb, y, i)));
var borrow: u1 = 0;
for (0..self.limbs_count()) |j| {
const l = ct.select(need_sub, d_limbs[j], x_limbs[j]);
var res = (l << 1) + carry;
- x_limbs[j] = @truncate(TLimb, res);
- carry = @truncate(u1, res >> t_bits);
+ x_limbs[j] = @as(TLimb, @truncate(res));
+ carry = @as(u1, @truncate(res >> t_bits));
res = x_limbs[j] -% m_limbs[j] -% borrow;
- d_limbs[j] = @truncate(TLimb, res);
+ d_limbs[j] = @as(TLimb, @truncate(res));
- borrow = @truncate(u1, res >> t_bits);
+ borrow = @as(u1, @truncate(res >> t_bits));
}
need_sub = ct.eql(carry, borrow);
if (i == 0) break;
@@ -531,7 +531,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
pub fn add(self: Self, x: Fe, y: Fe) Fe {
var out = x;
const overflow = out.v.addWithOverflow(y.v);
- const underflow = @bitCast(u1, ct.limbsCmpLt(out.v, self.v));
+ const underflow = @as(u1, @bitCast(ct.limbsCmpLt(out.v, self.v)));
const need_sub = ct.eql(overflow, underflow);
_ = out.v.conditionalSubWithOverflow(need_sub, self.v);
return out;
@@ -540,7 +540,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
/// Subtracts two field elements (mod m).
pub fn sub(self: Self, x: Fe, y: Fe) Fe {
var out = x;
- const underflow = @bitCast(bool, out.v.subWithOverflow(y.v));
+ const underflow = @as(bool, @bitCast(out.v.subWithOverflow(y.v)));
_ = out.v.conditionalAddWithOverflow(underflow, self.v);
return out;
}
@@ -601,7 +601,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
var wide = ct.mulWide(a_limbs[i], b_limbs[0]);
var z_lo = @addWithOverflow(d_limbs[0], wide.lo);
- const f = @truncate(TLimb, z_lo[0] *% self.m0inv);
+ const f = @as(TLimb, @truncate(z_lo[0] *% self.m0inv));
var z_hi = wide.hi +% z_lo[1];
wide = ct.mulWide(f, m_limbs[0]);
z_lo = @addWithOverflow(z_lo[0], wide.lo);
@@ -620,13 +620,13 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
z_lo = @addWithOverflow(z_lo[0], carry);
z_hi +%= z_lo[1];
if (j > 0) {
- d_limbs[j - 1] = @truncate(TLimb, z_lo[0]);
+ d_limbs[j - 1] = @as(TLimb, @truncate(z_lo[0]));
}
carry = (z_hi << 1) | (z_lo[0] >> t_bits);
}
const z = overflow + carry;
- d_limbs[self.limbs_count() - 1] = @truncate(TLimb, z);
- overflow = @truncate(u1, z >> t_bits);
+ d_limbs[self.limbs_count() - 1] = @as(TLimb, @truncate(z));
+ overflow = @as(u1, @truncate(z >> t_bits));
}
return overflow;
}
@@ -735,7 +735,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
t0 = pc[k - 1];
} else {
for (pc, 0..) |t, i| {
- t0.v.cmov(ct.eql(k, @truncate(u8, i + 1)), t.v);
+ t0.v.cmov(ct.eql(k, @as(u8, @truncate(i + 1))), t.v);
}
}
const t1 = self.montgomeryMul(out, t0);
@@ -771,7 +771,7 @@ const ct_protected = struct {
fn eql(x: anytype, y: @TypeOf(x)) bool {
const c1 = @subWithOverflow(x, y)[1];
const c2 = @subWithOverflow(y, x)[1];
- return @bitCast(bool, 1 - (c1 | c2));
+ return @as(bool, @bitCast(1 - (c1 | c2)));
}
// Compares two big integers in constant time, returning true if x < y.
@@ -782,28 +782,28 @@ const ct_protected = struct {
var c: u1 = 0;
for (0..x.limbs_count()) |i| {
- c = @truncate(u1, (x_limbs[i] -% y_limbs[i] -% c) >> t_bits);
+ c = @as(u1, @truncate((x_limbs[i] -% y_limbs[i] -% c) >> t_bits));
}
- return @bitCast(bool, c);
+ return @as(bool, @bitCast(c));
}
// Compares two big integers in constant time, returning true if x >= y.
fn limbsCmpGeq(x: anytype, y: @TypeOf(x)) bool {
- return @bitCast(bool, 1 - @intFromBool(ct.limbsCmpLt(x, y)));
+ return @as(bool, @bitCast(1 - @intFromBool(ct.limbsCmpLt(x, y))));
}
// Multiplies two limbs and returns the result as a wide limb.
fn mulWide(x: Limb, y: Limb) WideLimb {
const half_bits = @typeInfo(Limb).Int.bits / 2;
const Half = meta.Int(.unsigned, half_bits);
- const x0 = @truncate(Half, x);
- const x1 = @truncate(Half, x >> half_bits);
- const y0 = @truncate(Half, y);
- const y1 = @truncate(Half, y >> half_bits);
+ const x0 = @as(Half, @truncate(x));
+ const x1 = @as(Half, @truncate(x >> half_bits));
+ const y0 = @as(Half, @truncate(y));
+ const y1 = @as(Half, @truncate(y >> half_bits));
const w0 = math.mulWide(Half, x0, y0);
const t = math.mulWide(Half, x1, y0) + (w0 >> half_bits);
- var w1: Limb = @truncate(Half, t);
- const w2 = @truncate(Half, t >> half_bits);
+ var w1: Limb = @as(Half, @truncate(t));
+ const w2 = @as(Half, @truncate(t >> half_bits));
w1 += math.mulWide(Half, x0, y1);
const hi = math.mulWide(Half, x1, y1) + w2 + (w1 >> half_bits);
const lo = x *% y;
@@ -847,8 +847,8 @@ const ct_unprotected = struct {
fn mulWide(x: Limb, y: Limb) WideLimb {
const wide = math.mulWide(Limb, x, y);
return .{
- .hi = @truncate(Limb, wide >> @typeInfo(Limb).Int.bits),
- .lo = @truncate(Limb, wide),
+ .hi = @as(Limb, @truncate(wide >> @typeInfo(Limb).Int.bits)),
+ .lo = @as(Limb, @truncate(wide)),
};
}
};
diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig
index 2fbff25f72..11379cc8e3 100644
--- a/lib/std/crypto/ghash_polyval.zig
+++ b/lib/std/crypto/ghash_polyval.zig
@@ -96,28 +96,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
const product = asm (
\\ vpclmulqdq $0x11, %[x], %[y], %[out]
: [out] "=x" (-> @Vector(2, u64)),
- : [x] "x" (@bitCast(@Vector(2, u64), x)),
- [y] "x" (@bitCast(@Vector(2, u64), y)),
+ : [x] "x" (@as(@Vector(2, u64), @bitCast(x))),
+ [y] "x" (@as(@Vector(2, u64), @bitCast(y))),
);
- return @bitCast(u128, product);
+ return @as(u128, @bitCast(product));
},
.lo => {
const product = asm (
\\ vpclmulqdq $0x00, %[x], %[y], %[out]
: [out] "=x" (-> @Vector(2, u64)),
- : [x] "x" (@bitCast(@Vector(2, u64), x)),
- [y] "x" (@bitCast(@Vector(2, u64), y)),
+ : [x] "x" (@as(@Vector(2, u64), @bitCast(x))),
+ [y] "x" (@as(@Vector(2, u64), @bitCast(y))),
);
- return @bitCast(u128, product);
+ return @as(u128, @bitCast(product));
},
.hi_lo => {
const product = asm (
\\ vpclmulqdq $0x10, %[x], %[y], %[out]
: [out] "=x" (-> @Vector(2, u64)),
- : [x] "x" (@bitCast(@Vector(2, u64), x)),
- [y] "x" (@bitCast(@Vector(2, u64), y)),
+ : [x] "x" (@as(@Vector(2, u64), @bitCast(x))),
+ [y] "x" (@as(@Vector(2, u64), @bitCast(y))),
);
- return @bitCast(u128, product);
+ return @as(u128, @bitCast(product));
},
}
}
@@ -129,28 +129,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
const product = asm (
\\ pmull2 %[out].1q, %[x].2d, %[y].2d
: [out] "=w" (-> @Vector(2, u64)),
- : [x] "w" (@bitCast(@Vector(2, u64), x)),
- [y] "w" (@bitCast(@Vector(2, u64), y)),
+ : [x] "w" (@as(@Vector(2, u64), @bitCast(x))),
+ [y] "w" (@as(@Vector(2, u64), @bitCast(y))),
);
- return @bitCast(u128, product);
+ return @as(u128, @bitCast(product));
},
.lo => {
const product = asm (
\\ pmull %[out].1q, %[x].1d, %[y].1d
: [out] "=w" (-> @Vector(2, u64)),
- : [x] "w" (@bitCast(@Vector(2, u64), x)),
- [y] "w" (@bitCast(@Vector(2, u64), y)),
+ : [x] "w" (@as(@Vector(2, u64), @bitCast(x))),
+ [y] "w" (@as(@Vector(2, u64), @bitCast(y))),
);
- return @bitCast(u128, product);
+ return @as(u128, @bitCast(product));
},
.hi_lo => {
const product = asm (
\\ pmull %[out].1q, %[x].1d, %[y].1d
: [out] "=w" (-> @Vector(2, u64)),
- : [x] "w" (@bitCast(@Vector(2, u64), x >> 64)),
- [y] "w" (@bitCast(@Vector(2, u64), y)),
+ : [x] "w" (@as(@Vector(2, u64), @bitCast(x >> 64))),
+ [y] "w" (@as(@Vector(2, u64), @bitCast(y))),
);
- return @bitCast(u128, product);
+ return @as(u128, @bitCast(product));
},
}
}
@@ -167,8 +167,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
// Software carryless multiplication of two 64-bit integers using native 128-bit registers.
fn clmulSoft128(x_: u128, y_: u128, comptime half: Selector) u128 {
- const x = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_);
- const y = @truncate(u64, if (half == .hi) y_ >> 64 else y_);
+ const x = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_));
+ const y = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_));
const x0 = x & 0x1111111111111110;
const x1 = x & 0x2222222222222220;
@@ -216,12 +216,12 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
// Software carryless multiplication of two 128-bit integers using 64-bit registers.
fn clmulSoft128_64(x_: u128, y_: u128, comptime half: Selector) u128 {
- const a = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_);
- const b = @truncate(u64, if (half == .hi) y_ >> 64 else y_);
- const a0 = @truncate(u32, a);
- const a1 = @truncate(u32, a >> 32);
- const b0 = @truncate(u32, b);
- const b1 = @truncate(u32, b >> 32);
+ const a = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_));
+ const b = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_));
+ const a0 = @as(u32, @truncate(a));
+ const a1 = @as(u32, @truncate(a >> 32));
+ const b0 = @as(u32, @truncate(b));
+ const b1 = @as(u32, @truncate(b >> 32));
const lo = clmulSoft32(a0, b0);
const hi = clmulSoft32(a1, b1);
const mid = clmulSoft32(a0 ^ a1, b0 ^ b1) ^ lo ^ hi;
@@ -256,8 +256,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
// Multiply two 128-bit integers in GF(2^128).
inline fn clmul128(x: u128, y: u128) I256 {
if (mul_algorithm == .karatsuba) {
- const x_hi = @truncate(u64, x >> 64);
- const y_hi = @truncate(u64, y >> 64);
+ const x_hi = @as(u64, @truncate(x >> 64));
+ const y_hi = @as(u64, @truncate(y >> 64));
const r_lo = clmul(x, y, .lo);
const r_hi = clmul(x, y, .hi);
const r_mid = clmul(x ^ x_hi, y ^ y_hi, .lo) ^ r_lo ^ r_hi;
@@ -407,7 +407,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
st.pad();
mem.writeInt(u128, out[0..16], st.acc, endian);
- utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Self)]);
+ utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Self)]);
}
/// Compute the GHASH of a message.
@@ -442,7 +442,7 @@ test "ghash2" {
var key: [16]u8 = undefined;
var i: usize = 0;
while (i < key.len) : (i += 1) {
- key[i] = @intCast(u8, i * 15 + 1);
+ key[i] = @as(u8, @intCast(i * 15 + 1));
}
const tvs = [_]struct { len: usize, hash: [:0]const u8 }{
.{ .len = 5263, .hash = "b9395f37c131cd403a327ccf82ec016a" },
@@ -461,7 +461,7 @@ test "ghash2" {
var m: [tv.len]u8 = undefined;
i = 0;
while (i < m.len) : (i += 1) {
- m[i] = @truncate(u8, i % 254 + 1);
+ m[i] = @as(u8, @truncate(i % 254 + 1));
}
var st = Ghash.init(&key);
st.update(&m);
diff --git a/lib/std/crypto/isap.zig b/lib/std/crypto/isap.zig
index 5b0da739de..1d17e32be8 100644
--- a/lib/std/crypto/isap.zig
+++ b/lib/std/crypto/isap.zig
@@ -67,7 +67,7 @@ pub const IsapA128A = struct {
var i: usize = 0;
while (i < y.len * 8 - 1) : (i += 1) {
const cur_byte_pos = i / 8;
- const cur_bit_pos = @truncate(u3, 7 - (i % 8));
+ const cur_bit_pos = @as(u3, @truncate(7 - (i % 8)));
const cur_bit = ((y[cur_byte_pos] >> cur_bit_pos) & 1) << 7;
isap.st.addByte(cur_bit, 0);
isap.st.permuteR(1);
diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig
index ddc9b1b847..d8130bc87a 100644
--- a/lib/std/crypto/keccak_p.zig
+++ b/lib/std/crypto/keccak_p.zig
@@ -33,7 +33,7 @@ pub fn KeccakF(comptime f: u11) type {
0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008,
};
var rc: [max_rounds]T = undefined;
- for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @truncate(T, c);
+ for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @as(T, @truncate(c));
break :rc rc;
};
@@ -75,7 +75,7 @@ pub fn KeccakF(comptime f: u11) type {
/// XOR a byte into the state at a given offset.
pub fn addByte(self: *Self, byte: u8, offset: usize) void {
- const z = @sizeOf(T) * @truncate(math.Log2Int(T), offset % @sizeOf(T));
+ const z = @sizeOf(T) * @as(math.Log2Int(T), @truncate(offset % @sizeOf(T)));
self.st[offset / @sizeOf(T)] ^= @as(T, byte) << z;
}
diff --git a/lib/std/crypto/kyber_d00.zig b/lib/std/crypto/kyber_d00.zig
index 3cb0f02c0d..390ff8e7f2 100644
--- a/lib/std/crypto/kyber_d00.zig
+++ b/lib/std/crypto/kyber_d00.zig
@@ -579,7 +579,7 @@ test "invNTTReductions bounds" {
if (j < 0) {
break;
}
- xs[@intCast(usize, j)] = 1;
+ xs[@as(usize, @intCast(j))] = 1;
}
}
}
@@ -615,7 +615,7 @@ fn invertMod(a: anytype, p: @TypeOf(a)) @TypeOf(a) {
// Reduce mod q for testing.
fn modQ32(x: i32) i16 {
- var y = @intCast(i16, @rem(x, @as(i32, Q)));
+ var y = @as(i16, @intCast(@rem(x, @as(i32, Q))));
if (y < 0) {
y += Q;
}
@@ -638,7 +638,7 @@ fn montReduce(x: i32) i16 {
// Note that x q' might be as big as 2³² and could overflow the int32
// multiplication in the last line. However for any int32s a and b,
// we have int32(int64(a)*int64(b)) = int32(a*b) and so the result is ok.
- const m = @truncate(i16, @truncate(i32, x *% qInv));
+ const m = @as(i16, @truncate(@as(i32, @truncate(x *% qInv))));
// Note that x - m q is divisible by R; indeed modulo R we have
//
@@ -652,7 +652,7 @@ fn montReduce(x: i32) i16 {
// and as both 2¹⁵ q ≤ m q, x < 2¹⁵ q, we have
// 2¹⁶ q ≤ x - m q < 2¹⁶ and so q ≤ (x - m q) / R < q as desired.
const yR = x - @as(i32, m) * @as(i32, Q);
- return @bitCast(i16, @truncate(u16, @bitCast(u32, yR) >> 16));
+ return @as(i16, @bitCast(@as(u16, @truncate(@as(u32, @bitCast(yR)) >> 16))));
}
test "Test montReduce" {
@@ -676,7 +676,7 @@ fn feToMont(x: i16) i16 {
test "Test feToMont" {
var x: i32 = -(1 << 15);
while (x < 1 << 15) : (x += 1) {
- const y = feToMont(@intCast(i16, x));
+ const y = feToMont(@as(i16, @intCast(x)));
try testing.expectEqual(modQ32(@as(i32, y)), modQ32(x * r_mod_q));
}
}
@@ -703,14 +703,14 @@ fn feBarrettReduce(x: i16) i16 {
// To actually compute this, note that
//
// ⌊x 20156/2²⁶⌋ = (20159 x) >> 26.
- return x -% @intCast(i16, (@as(i32, x) * 20159) >> 26) *% Q;
+ return x -% @as(i16, @intCast((@as(i32, x) * 20159) >> 26)) *% Q;
}
test "Test Barrett reduction" {
var x: i32 = -(1 << 15);
while (x < 1 << 15) : (x += 1) {
- var y1 = feBarrettReduce(@intCast(i16, x));
- const y2 = @mod(@intCast(i16, x), Q);
+ var y1 = feBarrettReduce(@as(i16, @intCast(x)));
+ const y2 = @mod(@as(i16, @intCast(x)), Q);
if (x < 0 and @rem(-x, Q) == 0) {
y1 -= Q;
}
@@ -729,9 +729,9 @@ fn csubq(x: i16) i16 {
test "Test csubq" {
var x: i32 = -29439;
while (x < 1 << 15) : (x += 1) {
- const y1 = csubq(@intCast(i16, x));
- var y2 = @intCast(i16, x);
- if (@intCast(i16, x) >= Q) {
+ const y1 = csubq(@as(i16, @intCast(x)));
+ var y2 = @as(i16, @intCast(x));
+ if (@as(i16, @intCast(x)) >= Q) {
y2 -= Q;
}
try testing.expectEqual(y1, y2);
@@ -762,7 +762,7 @@ fn computeZetas() [128]i16 {
@setEvalBranchQuota(10000);
var ret: [128]i16 = undefined;
for (&ret, 0..) |*r, i| {
- const t = @intCast(i16, mpow(@as(i32, zeta), @bitReverse(@intCast(u7, i)), Q));
+ const t = @as(i16, @intCast(mpow(@as(i32, zeta), @bitReverse(@as(u7, @intCast(i))), Q)));
r.* = csubq(feBarrettReduce(feToMont(t)));
}
return ret;
@@ -945,7 +945,7 @@ const Poly = struct {
if (i < 0) {
break;
}
- p.cs[@intCast(usize, i)] = feBarrettReduce(p.cs[@intCast(usize, i)]);
+ p.cs[@as(usize, @intCast(i))] = feBarrettReduce(p.cs[@as(usize, @intCast(i))]);
}
}
@@ -1020,8 +1020,8 @@ const Poly = struct {
// = ⌊(2ᵈ/q)x+½⌋ mod⁺ 2ᵈ
// = ⌊((x << d) + q/2) / q⌋ mod⁺ 2ᵈ
// = DIV((x << d) + q/2, q) & ((1<<d) - 1)
- const t = @intCast(u32, p.cs[in_off + i]) << d;
- in[i] = @intCast(u16, @divFloor(t + q_over_2, Q) & two_d_min_1);
+ const t = @as(u32, @intCast(p.cs[in_off + i])) << d;
+ in[i] = @as(u16, @intCast(@divFloor(t + q_over_2, Q) & two_d_min_1));
}
// Now we pack the d-bit integers from `in' into out as bytes.
@@ -1032,7 +1032,7 @@ const Poly = struct {
comptime var todo: usize = 8;
inline while (todo > 0) {
const out_shift = comptime 8 - todo;
- out[out_off + j] |= @truncate(u8, (in[i] >> in_shift) << out_shift);
+ out[out_off + j] |= @as(u8, @truncate((in[i] >> in_shift) << out_shift));
const done = comptime @min(@min(d, todo), d - in_shift);
todo -= done;
@@ -1094,7 +1094,7 @@ const Poly = struct {
// = ⌊(qx + 2ᵈ⁻¹)/2ᵈ⌋
// = (qx + (1<<(d-1))) >> d
const qx = @as(u32, out) * @as(u32, Q);
- ret.cs[out_off + i] = @intCast(i16, (qx + (1 << (d - 1))) >> d);
+ ret.cs[out_off + i] = @as(i16, @intCast((qx + (1 << (d - 1))) >> d));
}
in_off += in_batch_size;
@@ -1209,8 +1209,8 @@ const Poly = struct {
// Extract each a and b separately and set coefficient in polynomial.
inline for (0..batch_count) |j| {
const mask2 = comptime (1 << eta) - 1;
- const a = @intCast(i16, (d >> (comptime (2 * j * eta))) & mask2);
- const b = @intCast(i16, (d >> (comptime ((2 * j + 1) * eta))) & mask2);
+ const a = @as(i16, @intCast((d >> (comptime (2 * j * eta))) & mask2));
+ const b = @as(i16, @intCast((d >> (comptime ((2 * j + 1) * eta))) & mask2));
ret.cs[batch_count * i + j] = a - b;
}
}
@@ -1246,7 +1246,7 @@ const Poly = struct {
inline for (ts) |t| {
if (t < Q) {
- ret.cs[i] = @intCast(i16, t);
+ ret.cs[i] = @as(i16, @intCast(t));
i += 1;
if (i == N) {
@@ -1266,11 +1266,11 @@ const Poly = struct {
fn toBytes(p: Poly) [bytes_length]u8 {
var ret: [bytes_length]u8 = undefined;
for (0..comptime N / 2) |i| {
- const t0 = @intCast(u16, p.cs[2 * i]);
- const t1 = @intCast(u16, p.cs[2 * i + 1]);
- ret[3 * i] = @truncate(u8, t0);
- ret[3 * i + 1] = @truncate(u8, (t0 >> 8) | (t1 << 4));
- ret[3 * i + 2] = @truncate(u8, t1 >> 4);
+ const t0 = @as(u16, @intCast(p.cs[2 * i]));
+ const t1 = @as(u16, @intCast(p.cs[2 * i + 1]));
+ ret[3 * i] = @as(u8, @truncate(t0));
+ ret[3 * i + 1] = @as(u8, @truncate((t0 >> 8) | (t1 << 4)));
+ ret[3 * i + 2] = @as(u8, @truncate(t1 >> 4));
}
return ret;
}
@@ -1356,7 +1356,7 @@ fn Vec(comptime K: u8) type {
fn noise(comptime eta: u8, nonce: u8, seed: *const [32]u8) Self {
var ret: Self = undefined;
for (0..K) |i| {
- ret.ps[i] = Poly.noise(eta, nonce + @intCast(u8, i), seed);
+ ret.ps[i] = Poly.noise(eta, nonce + @as(u8, @intCast(i)), seed);
}
return ret;
}
@@ -1534,7 +1534,7 @@ test "Compression" {
test "noise" {
var seed: [32]u8 = undefined;
for (&seed, 0..) |*s, i| {
- s.* = @intCast(u8, i);
+ s.* = @as(u8, @intCast(i));
}
try testing.expectEqual(Poly.noise(3, 37, &seed).cs, .{
0, 0, 1, -1, 0, 2, 0, -1, -1, 3, 0, 1, -2, -2, 0, 1, -2,
@@ -1580,7 +1580,7 @@ test "noise" {
test "uniform sampling" {
var seed: [32]u8 = undefined;
for (&seed, 0..) |*s, i| {
- s.* = @intCast(u8, i);
+ s.* = @as(u8, @intCast(i));
}
try testing.expectEqual(Poly.uniform(seed, 1, 0).cs, .{
797, 993, 161, 6, 2608, 2385, 2096, 2661, 1676, 247, 2440,
@@ -1623,17 +1623,17 @@ test "Test inner PKE" {
var seed: [32]u8 = undefined;
var pt: [32]u8 = undefined;
for (&seed, &pt, 0..) |*s, *p, i| {
- s.* = @intCast(u8, i);
- p.* = @intCast(u8, i + 32);
+ s.* = @as(u8, @intCast(i));
+ p.* = @as(u8, @intCast(i + 32));
}
inline for (modes) |mode| {
for (0..100) |i| {
var pk: mode.InnerPk = undefined;
var sk: mode.InnerSk = undefined;
- seed[0] = @intCast(u8, i);
+ seed[0] = @as(u8, @intCast(i));
mode.innerKeyFromSeed(seed, &pk, &sk);
for (0..10) |j| {
- seed[1] = @intCast(u8, j);
+ seed[1] = @as(u8, @intCast(j));
try testing.expectEqual(sk.decrypt(&pk.encrypt(&pt, &seed)), pt);
}
}
@@ -1643,18 +1643,18 @@ test "Test inner PKE" {
test "Test happy flow" {
var seed: [64]u8 = undefined;
for (&seed, 0..) |*s, i| {
- s.* = @intCast(u8, i);
+ s.* = @as(u8, @intCast(i));
}
inline for (modes) |mode| {
for (0..100) |i| {
- seed[0] = @intCast(u8, i);
+ seed[0] = @as(u8, @intCast(i));
const kp = try mode.KeyPair.create(seed);
const sk = try mode.SecretKey.fromBytes(&kp.secret_key.toBytes());
try testing.expectEqual(sk, kp.secret_key);
const pk = try mode.PublicKey.fromBytes(&kp.public_key.toBytes());
try testing.expectEqual(pk, kp.public_key);
for (0..10) |j| {
- seed[1] = @intCast(u8, j);
+ seed[1] = @as(u8, @intCast(j));
const e = pk.encaps(seed[0..32].*);
try testing.expectEqual(e.shared_secret, try sk.decaps(&e.ciphertext));
}
@@ -1675,7 +1675,7 @@ test "NIST KAT test" {
const mode = modeHash[0];
var seed: [48]u8 = undefined;
for (&seed, 0..) |*s, i| {
- s.* = @intCast(u8, i);
+ s.* = @as(u8, @intCast(i));
}
var f = sha2.Sha256.init(.{});
const fw = f.writer();
diff --git a/lib/std/crypto/md5.zig b/lib/std/crypto/md5.zig
index bd4a78c032..b480cbcd8e 100644
--- a/lib/std/crypto/md5.zig
+++ b/lib/std/crypto/md5.zig
@@ -80,7 +80,7 @@ pub const Md5 = struct {
// Copy any remainder for next pass.
const b_slice = b[off..];
@memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
- d.buf_len += @intCast(u8, b_slice.len);
+ d.buf_len += @as(u8, @intCast(b_slice.len));
// Md5 uses the bottom 64-bits for length padding
d.total_len +%= b.len;
@@ -103,9 +103,9 @@ pub const Md5 = struct {
// Append message length.
var i: usize = 1;
var len = d.total_len >> 5;
- d.buf[56] = @intCast(u8, d.total_len & 0x1f) << 3;
+ d.buf[56] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
while (i < 8) : (i += 1) {
- d.buf[56 + i] = @intCast(u8, len & 0xff);
+ d.buf[56 + i] = @as(u8, @intCast(len & 0xff));
len >>= 8;
}
diff --git a/lib/std/crypto/pbkdf2.zig b/lib/std/crypto/pbkdf2.zig
index 115fd38b3d..2e0318369b 100644
--- a/lib/std/crypto/pbkdf2.zig
+++ b/lib/std/crypto/pbkdf2.zig
@@ -74,7 +74,7 @@ pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, com
// block
//
- const blocks_count = @intCast(u32, std.math.divCeil(usize, dk_len, h_len) catch unreachable);
+ const blocks_count = @as(u32, @intCast(std.math.divCeil(usize, dk_len, h_len) catch unreachable));
var r = dk_len % h_len;
if (r == 0) {
r = h_len;
diff --git a/lib/std/crypto/pcurves/common.zig b/lib/std/crypto/pcurves/common.zig
index 5d41bc190a..edc437517c 100644
--- a/lib/std/crypto/pcurves/common.zig
+++ b/lib/std/crypto/pcurves/common.zig
@@ -120,7 +120,7 @@ pub fn Field(comptime params: FieldParams) type {
/// Return true if the element is odd.
pub fn isOdd(fe: Fe) bool {
const s = fe.toBytes(.Little);
- return @truncate(u1, s[0]) != 0;
+ return @as(u1, @truncate(s[0])) != 0;
}
/// Conditonally replace a field element with `a` if `c` is positive.
@@ -179,7 +179,7 @@ pub fn Field(comptime params: FieldParams) type {
var x: T = n;
var t = a;
while (true) {
- if (@truncate(u1, x) != 0) fe = fe.mul(t);
+ if (@as(u1, @truncate(x)) != 0) fe = fe.mul(t);
x >>= 1;
if (x == 0) break;
t = t.sq();
@@ -233,7 +233,7 @@ pub fn Field(comptime params: FieldParams) type {
}
var v_opp: Limbs = undefined;
fiat.opp(&v_opp, v);
- fiat.selectznz(&v, @truncate(u1, f[f.len - 1] >> (@bitSizeOf(Word) - 1)), v, v_opp);
+ fiat.selectznz(&v, @as(u1, @truncate(f[f.len - 1] >> (@bitSizeOf(Word) - 1))), v, v_opp);
const precomp = blk: {
var precomp: Limbs = undefined;
diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig
index a797fbce3e..668c0115b2 100644
--- a/lib/std/crypto/pcurves/p256.zig
+++ b/lib/std/crypto/pcurves/p256.zig
@@ -318,7 +318,7 @@ pub const P256 = struct {
var t = P256.identityElement;
comptime var i: u8 = 1;
inline while (i < pc.len) : (i += 1) {
- t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8));
+ t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8)));
}
return t;
}
@@ -326,8 +326,8 @@ pub const P256 = struct {
fn slide(s: [32]u8) [2 * 32 + 1]i8 {
var e: [2 * 32 + 1]i8 = undefined;
for (s, 0..) |x, i| {
- e[i * 2 + 0] = @as(i8, @truncate(u4, x));
- e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+ e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+ e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
}
// Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
var carry: i8 = 0;
@@ -351,9 +351,9 @@ pub const P256 = struct {
while (true) : (pos -= 1) {
const slot = e[pos];
if (slot > 0) {
- q = q.add(pc[@intCast(usize, slot)]);
+ q = q.add(pc[@as(usize, @intCast(slot))]);
} else if (slot < 0) {
- q = q.sub(pc[@intCast(usize, -slot)]);
+ q = q.sub(pc[@as(usize, @intCast(-slot))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
@@ -366,7 +366,7 @@ pub const P256 = struct {
var q = P256.identityElement;
var pos: usize = 252;
while (true) : (pos -= 4) {
- const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+ const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
if (vartime) {
if (slot != 0) {
q = q.add(pc[slot]);
@@ -445,15 +445,15 @@ pub const P256 = struct {
while (true) : (pos -= 1) {
const slot1 = e1[pos];
if (slot1 > 0) {
- q = q.add(pc1[@intCast(usize, slot1)]);
+ q = q.add(pc1[@as(usize, @intCast(slot1))]);
} else if (slot1 < 0) {
- q = q.sub(pc1[@intCast(usize, -slot1)]);
+ q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
}
const slot2 = e2[pos];
if (slot2 > 0) {
- q = q.add(pc2[@intCast(usize, slot2)]);
+ q = q.add(pc2[@as(usize, @intCast(slot2))]);
} else if (slot2 < 0) {
- q = q.sub(pc2[@intCast(usize, -slot2)]);
+ q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
diff --git a/lib/std/crypto/pcurves/p256/p256_64.zig b/lib/std/crypto/pcurves/p256/p256_64.zig
index e8ba37e845..e8dbaead33 100644
--- a/lib/std/crypto/pcurves/p256/p256_64.zig
+++ b/lib/std/crypto/pcurves/p256/p256_64.zig
@@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
- out1.* = @truncate(u64, x);
- out2.* = @truncate(u64, x >> 64);
+ out1.* = @as(u64, @truncate(x));
+ out2.* = @as(u64, @truncate(x >> 64));
}
/// The function cmovznzU64 is a single-word conditional move.
@@ -1355,62 +1355,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
const x2 = (arg1[2]);
const x3 = (arg1[1]);
const x4 = (arg1[0]);
- const x5 = @truncate(u8, (x4 & @as(u64, 0xff)));
+ const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff))));
const x6 = (x4 >> 8);
- const x7 = @truncate(u8, (x6 & @as(u64, 0xff)));
+ const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff))));
const x8 = (x6 >> 8);
- const x9 = @truncate(u8, (x8 & @as(u64, 0xff)));
+ const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff))));
const x10 = (x8 >> 8);
- const x11 = @truncate(u8, (x10 & @as(u64, 0xff)));
+ const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff))));
const x12 = (x10 >> 8);
- const x13 = @truncate(u8, (x12 & @as(u64, 0xff)));
+ const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff))));
const x14 = (x12 >> 8);
- const x15 = @truncate(u8, (x14 & @as(u64, 0xff)));
+ const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff))));
const x16 = (x14 >> 8);
- const x17 = @truncate(u8, (x16 & @as(u64, 0xff)));
- const x18 = @truncate(u8, (x16 >> 8));
- const x19 = @truncate(u8, (x3 & @as(u64, 0xff)));
+ const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff))));
+ const x18 = @as(u8, @truncate((x16 >> 8)));
+ const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff))));
const x20 = (x3 >> 8);
- const x21 = @truncate(u8, (x20 & @as(u64, 0xff)));
+ const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff))));
const x22 = (x20 >> 8);
- const x23 = @truncate(u8, (x22 & @as(u64, 0xff)));
+ const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff))));
const x24 = (x22 >> 8);
- const x25 = @truncate(u8, (x24 & @as(u64, 0xff)));
+ const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff))));
const x26 = (x24 >> 8);
- const x27 = @truncate(u8, (x26 & @as(u64, 0xff)));
+ const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff))));
const x28 = (x26 >> 8);
- const x29 = @truncate(u8, (x28 & @as(u64, 0xff)));
+ const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff))));
const x30 = (x28 >> 8);
- const x31 = @truncate(u8, (x30 & @as(u64, 0xff)));
- const x32 = @truncate(u8, (x30 >> 8));
- const x33 = @truncate(u8, (x2 & @as(u64, 0xff)));
+ const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff))));
+ const x32 = @as(u8, @truncate((x30 >> 8)));
+ const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff))));
const x34 = (x2 >> 8);
- const x35 = @truncate(u8, (x34 & @as(u64, 0xff)));
+ const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff))));
const x36 = (x34 >> 8);
- const x37 = @truncate(u8, (x36 & @as(u64, 0xff)));
+ const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff))));
const x38 = (x36 >> 8);
- const x39 = @truncate(u8, (x38 & @as(u64, 0xff)));
+ const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff))));
const x40 = (x38 >> 8);
- const x41 = @truncate(u8, (x40 & @as(u64, 0xff)));
+ const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff))));
const x42 = (x40 >> 8);
- const x43 = @truncate(u8, (x42 & @as(u64, 0xff)));
+ const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff))));
const x44 = (x42 >> 8);
- const x45 = @truncate(u8, (x44 & @as(u64, 0xff)));
- const x46 = @truncate(u8, (x44 >> 8));
- const x47 = @truncate(u8, (x1 & @as(u64, 0xff)));
+ const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff))));
+ const x46 = @as(u8, @truncate((x44 >> 8)));
+ const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff))));
const x48 = (x1 >> 8);
- const x49 = @truncate(u8, (x48 & @as(u64, 0xff)));
+ const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff))));
const x50 = (x48 >> 8);
- const x51 = @truncate(u8, (x50 & @as(u64, 0xff)));
+ const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff))));
const x52 = (x50 >> 8);
- const x53 = @truncate(u8, (x52 & @as(u64, 0xff)));
+ const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff))));
const x54 = (x52 >> 8);
- const x55 = @truncate(u8, (x54 & @as(u64, 0xff)));
+ const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff))));
const x56 = (x54 >> 8);
- const x57 = @truncate(u8, (x56 & @as(u64, 0xff)));
+ const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff))));
const x58 = (x56 >> 8);
- const x59 = @truncate(u8, (x58 & @as(u64, 0xff)));
- const x60 = @truncate(u8, (x58 >> 8));
+ const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff))));
+ const x60 = @as(u8, @truncate((x58 >> 8)));
out1[0] = x5;
out1[1] = x7;
out1[2] = x9;
@@ -1593,7 +1593,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
var x1: u64 = undefined;
var x2: u1 = undefined;
addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1));
- const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1))));
+ const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1)))));
var x4: u64 = undefined;
var x5: u1 = undefined;
addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1));
@@ -1707,7 +1707,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
cmovznzU64(&x72, x3, (arg5[2]), x66);
var x73: u64 = undefined;
cmovznzU64(&x73, x3, (arg5[3]), x68);
- const x74 = @truncate(u1, (x22 & @as(u64, 0x1)));
+ const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1))));
var x75: u64 = undefined;
cmovznzU64(&x75, x74, @as(u64, 0x0), x7);
var x76: u64 = undefined;
diff --git a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig
index ea102360cf..152c2b8787 100644
--- a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig
+++ b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig
@@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
- out1.* = @truncate(u64, x);
- out2.* = @truncate(u64, x >> 64);
+ out1.* = @as(u64, @truncate(x));
+ out2.* = @as(u64, @truncate(x >> 64));
}
/// The function cmovznzU64 is a single-word conditional move.
@@ -1559,62 +1559,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
const x2 = (arg1[2]);
const x3 = (arg1[1]);
const x4 = (arg1[0]);
- const x5 = @truncate(u8, (x4 & @as(u64, 0xff)));
+ const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff))));
const x6 = (x4 >> 8);
- const x7 = @truncate(u8, (x6 & @as(u64, 0xff)));
+ const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff))));
const x8 = (x6 >> 8);
- const x9 = @truncate(u8, (x8 & @as(u64, 0xff)));
+ const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff))));
const x10 = (x8 >> 8);
- const x11 = @truncate(u8, (x10 & @as(u64, 0xff)));
+ const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff))));
const x12 = (x10 >> 8);
- const x13 = @truncate(u8, (x12 & @as(u64, 0xff)));
+ const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff))));
const x14 = (x12 >> 8);
- const x15 = @truncate(u8, (x14 & @as(u64, 0xff)));
+ const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff))));
const x16 = (x14 >> 8);
- const x17 = @truncate(u8, (x16 & @as(u64, 0xff)));
- const x18 = @truncate(u8, (x16 >> 8));
- const x19 = @truncate(u8, (x3 & @as(u64, 0xff)));
+ const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff))));
+ const x18 = @as(u8, @truncate((x16 >> 8)));
+ const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff))));
const x20 = (x3 >> 8);
- const x21 = @truncate(u8, (x20 & @as(u64, 0xff)));
+ const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff))));
const x22 = (x20 >> 8);
- const x23 = @truncate(u8, (x22 & @as(u64, 0xff)));
+ const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff))));
const x24 = (x22 >> 8);
- const x25 = @truncate(u8, (x24 & @as(u64, 0xff)));
+ const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff))));
const x26 = (x24 >> 8);
- const x27 = @truncate(u8, (x26 & @as(u64, 0xff)));
+ const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff))));
const x28 = (x26 >> 8);
- const x29 = @truncate(u8, (x28 & @as(u64, 0xff)));
+ const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff))));
const x30 = (x28 >> 8);
- const x31 = @truncate(u8, (x30 & @as(u64, 0xff)));
- const x32 = @truncate(u8, (x30 >> 8));
- const x33 = @truncate(u8, (x2 & @as(u64, 0xff)));
+ const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff))));
+ const x32 = @as(u8, @truncate((x30 >> 8)));
+ const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff))));
const x34 = (x2 >> 8);
- const x35 = @truncate(u8, (x34 & @as(u64, 0xff)));
+ const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff))));
const x36 = (x34 >> 8);
- const x37 = @truncate(u8, (x36 & @as(u64, 0xff)));
+ const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff))));
const x38 = (x36 >> 8);
- const x39 = @truncate(u8, (x38 & @as(u64, 0xff)));
+ const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff))));
const x40 = (x38 >> 8);
- const x41 = @truncate(u8, (x40 & @as(u64, 0xff)));
+ const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff))));
const x42 = (x40 >> 8);
- const x43 = @truncate(u8, (x42 & @as(u64, 0xff)));
+ const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff))));
const x44 = (x42 >> 8);
- const x45 = @truncate(u8, (x44 & @as(u64, 0xff)));
- const x46 = @truncate(u8, (x44 >> 8));
- const x47 = @truncate(u8, (x1 & @as(u64, 0xff)));
+ const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff))));
+ const x46 = @as(u8, @truncate((x44 >> 8)));
+ const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff))));
const x48 = (x1 >> 8);
- const x49 = @truncate(u8, (x48 & @as(u64, 0xff)));
+ const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff))));
const x50 = (x48 >> 8);
- const x51 = @truncate(u8, (x50 & @as(u64, 0xff)));
+ const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff))));
const x52 = (x50 >> 8);
- const x53 = @truncate(u8, (x52 & @as(u64, 0xff)));
+ const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff))));
const x54 = (x52 >> 8);
- const x55 = @truncate(u8, (x54 & @as(u64, 0xff)));
+ const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff))));
const x56 = (x54 >> 8);
- const x57 = @truncate(u8, (x56 & @as(u64, 0xff)));
+ const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff))));
const x58 = (x56 >> 8);
- const x59 = @truncate(u8, (x58 & @as(u64, 0xff)));
- const x60 = @truncate(u8, (x58 >> 8));
+ const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff))));
+ const x60 = @as(u8, @truncate((x58 >> 8)));
out1[0] = x5;
out1[1] = x7;
out1[2] = x9;
@@ -1797,7 +1797,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
var x1: u64 = undefined;
var x2: u1 = undefined;
addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1));
- const x3 = @truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1)));
+ const x3 = @as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1))));
var x4: u64 = undefined;
var x5: u1 = undefined;
addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1));
@@ -1911,7 +1911,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
cmovznzU64(&x72, x3, (arg5[2]), x66);
var x73: u64 = undefined;
cmovznzU64(&x73, x3, (arg5[3]), x68);
- const x74 = @truncate(u1, (x22 & @as(u64, 0x1)));
+ const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1))));
var x75: u64 = undefined;
cmovznzU64(&x75, x74, @as(u64, 0x0), x7);
var x76: u64 = undefined;
diff --git a/lib/std/crypto/pcurves/p384.zig b/lib/std/crypto/pcurves/p384.zig
index 3d96592f50..d5afd6eb4d 100644
--- a/lib/std/crypto/pcurves/p384.zig
+++ b/lib/std/crypto/pcurves/p384.zig
@@ -318,7 +318,7 @@ pub const P384 = struct {
var t = P384.identityElement;
comptime var i: u8 = 1;
inline while (i < pc.len) : (i += 1) {
- t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8));
+ t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8)));
}
return t;
}
@@ -326,8 +326,8 @@ pub const P384 = struct {
fn slide(s: [48]u8) [2 * 48 + 1]i8 {
var e: [2 * 48 + 1]i8 = undefined;
for (s, 0..) |x, i| {
- e[i * 2 + 0] = @as(i8, @truncate(u4, x));
- e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+ e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+ e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
}
// Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
var carry: i8 = 0;
@@ -351,9 +351,9 @@ pub const P384 = struct {
while (true) : (pos -= 1) {
const slot = e[pos];
if (slot > 0) {
- q = q.add(pc[@intCast(usize, slot)]);
+ q = q.add(pc[@as(usize, @intCast(slot))]);
} else if (slot < 0) {
- q = q.sub(pc[@intCast(usize, -slot)]);
+ q = q.sub(pc[@as(usize, @intCast(-slot))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
@@ -366,7 +366,7 @@ pub const P384 = struct {
var q = P384.identityElement;
var pos: usize = 380;
while (true) : (pos -= 4) {
- const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+ const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
if (vartime) {
if (slot != 0) {
q = q.add(pc[slot]);
@@ -445,15 +445,15 @@ pub const P384 = struct {
while (true) : (pos -= 1) {
const slot1 = e1[pos];
if (slot1 > 0) {
- q = q.add(pc1[@intCast(usize, slot1)]);
+ q = q.add(pc1[@as(usize, @intCast(slot1))]);
} else if (slot1 < 0) {
- q = q.sub(pc1[@intCast(usize, -slot1)]);
+ q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
}
const slot2 = e2[pos];
if (slot2 > 0) {
- q = q.add(pc2[@intCast(usize, slot2)]);
+ q = q.add(pc2[@as(usize, @intCast(slot2))]);
} else if (slot2 < 0) {
- q = q.sub(pc2[@intCast(usize, -slot2)]);
+ q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
diff --git a/lib/std/crypto/pcurves/p384/p384_64.zig b/lib/std/crypto/pcurves/p384/p384_64.zig
index 45c12835b3..f25a7d65b5 100644
--- a/lib/std/crypto/pcurves/p384/p384_64.zig
+++ b/lib/std/crypto/pcurves/p384/p384_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
- out1.* = @truncate(u64, x);
- out2.* = @truncate(u64, x >> 64);
+ out1.* = @as(u64, @truncate(x));
+ out2.* = @as(u64, @truncate(x >> 64));
}
/// The function cmovznzU64 is a single-word conditional move.
@@ -2928,90 +2928,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void {
const x4 = (arg1[2]);
const x5 = (arg1[1]);
const x6 = (arg1[0]);
- const x7 = @truncate(u8, (x6 & 0xff));
+ const x7 = @as(u8, @truncate((x6 & 0xff)));
const x8 = (x6 >> 8);
- const x9 = @truncate(u8, (x8 & 0xff));
+ const x9 = @as(u8, @truncate((x8 & 0xff)));
const x10 = (x8 >> 8);
- const x11 = @truncate(u8, (x10 & 0xff));
+ const x11 = @as(u8, @truncate((x10 & 0xff)));
const x12 = (x10 >> 8);
- const x13 = @truncate(u8, (x12 & 0xff));
+ const x13 = @as(u8, @truncate((x12 & 0xff)));
const x14 = (x12 >> 8);
- const x15 = @truncate(u8, (x14 & 0xff));
+ const x15 = @as(u8, @truncate((x14 & 0xff)));
const x16 = (x14 >> 8);
- const x17 = @truncate(u8, (x16 & 0xff));
+ const x17 = @as(u8, @truncate((x16 & 0xff)));
const x18 = (x16 >> 8);
- const x19 = @truncate(u8, (x18 & 0xff));
- const x20 = @truncate(u8, (x18 >> 8));
- const x21 = @truncate(u8, (x5 & 0xff));
+ const x19 = @as(u8, @truncate((x18 & 0xff)));
+ const x20 = @as(u8, @truncate((x18 >> 8)));
+ const x21 = @as(u8, @truncate((x5 & 0xff)));
const x22 = (x5 >> 8);
- const x23 = @truncate(u8, (x22 & 0xff));
+ const x23 = @as(u8, @truncate((x22 & 0xff)));
const x24 = (x22 >> 8);
- const x25 = @truncate(u8, (x24 & 0xff));
+ const x25 = @as(u8, @truncate((x24 & 0xff)));
const x26 = (x24 >> 8);
- const x27 = @truncate(u8, (x26 & 0xff));
+ const x27 = @as(u8, @truncate((x26 & 0xff)));
const x28 = (x26 >> 8);
- const x29 = @truncate(u8, (x28 & 0xff));
+ const x29 = @as(u8, @truncate((x28 & 0xff)));
const x30 = (x28 >> 8);
- const x31 = @truncate(u8, (x30 & 0xff));
+ const x31 = @as(u8, @truncate((x30 & 0xff)));
const x32 = (x30 >> 8);
- const x33 = @truncate(u8, (x32 & 0xff));
- const x34 = @truncate(u8, (x32 >> 8));
- const x35 = @truncate(u8, (x4 & 0xff));
+ const x33 = @as(u8, @truncate((x32 & 0xff)));
+ const x34 = @as(u8, @truncate((x32 >> 8)));
+ const x35 = @as(u8, @truncate((x4 & 0xff)));
const x36 = (x4 >> 8);
- const x37 = @truncate(u8, (x36 & 0xff));
+ const x37 = @as(u8, @truncate((x36 & 0xff)));
const x38 = (x36 >> 8);
- const x39 = @truncate(u8, (x38 & 0xff));
+ const x39 = @as(u8, @truncate((x38 & 0xff)));
const x40 = (x38 >> 8);
- const x41 = @truncate(u8, (x40 & 0xff));
+ const x41 = @as(u8, @truncate((x40 & 0xff)));
const x42 = (x40 >> 8);
- const x43 = @truncate(u8, (x42 & 0xff));
+ const x43 = @as(u8, @truncate((x42 & 0xff)));
const x44 = (x42 >> 8);
- const x45 = @truncate(u8, (x44 & 0xff));
+ const x45 = @as(u8, @truncate((x44 & 0xff)));
const x46 = (x44 >> 8);
- const x47 = @truncate(u8, (x46 & 0xff));
- const x48 = @truncate(u8, (x46 >> 8));
- const x49 = @truncate(u8, (x3 & 0xff));
+ const x47 = @as(u8, @truncate((x46 & 0xff)));
+ const x48 = @as(u8, @truncate((x46 >> 8)));
+ const x49 = @as(u8, @truncate((x3 & 0xff)));
const x50 = (x3 >> 8);
- const x51 = @truncate(u8, (x50 & 0xff));
+ const x51 = @as(u8, @truncate((x50 & 0xff)));
const x52 = (x50 >> 8);
- const x53 = @truncate(u8, (x52 & 0xff));
+ const x53 = @as(u8, @truncate((x52 & 0xff)));
const x54 = (x52 >> 8);
- const x55 = @truncate(u8, (x54 & 0xff));
+ const x55 = @as(u8, @truncate((x54 & 0xff)));
const x56 = (x54 >> 8);
- const x57 = @truncate(u8, (x56 & 0xff));
+ const x57 = @as(u8, @truncate((x56 & 0xff)));
const x58 = (x56 >> 8);
- const x59 = @truncate(u8, (x58 & 0xff));
+ const x59 = @as(u8, @truncate((x58 & 0xff)));
const x60 = (x58 >> 8);
- const x61 = @truncate(u8, (x60 & 0xff));
- const x62 = @truncate(u8, (x60 >> 8));
- const x63 = @truncate(u8, (x2 & 0xff));
+ const x61 = @as(u8, @truncate((x60 & 0xff)));
+ const x62 = @as(u8, @truncate((x60 >> 8)));
+ const x63 = @as(u8, @truncate((x2 & 0xff)));
const x64 = (x2 >> 8);
- const x65 = @truncate(u8, (x64 & 0xff));
+ const x65 = @as(u8, @truncate((x64 & 0xff)));
const x66 = (x64 >> 8);
- const x67 = @truncate(u8, (x66 & 0xff));
+ const x67 = @as(u8, @truncate((x66 & 0xff)));
const x68 = (x66 >> 8);
- const x69 = @truncate(u8, (x68 & 0xff));
+ const x69 = @as(u8, @truncate((x68 & 0xff)));
const x70 = (x68 >> 8);
- const x71 = @truncate(u8, (x70 & 0xff));
+ const x71 = @as(u8, @truncate((x70 & 0xff)));
const x72 = (x70 >> 8);
- const x73 = @truncate(u8, (x72 & 0xff));
+ const x73 = @as(u8, @truncate((x72 & 0xff)));
const x74 = (x72 >> 8);
- const x75 = @truncate(u8, (x74 & 0xff));
- const x76 = @truncate(u8, (x74 >> 8));
- const x77 = @truncate(u8, (x1 & 0xff));
+ const x75 = @as(u8, @truncate((x74 & 0xff)));
+ const x76 = @as(u8, @truncate((x74 >> 8)));
+ const x77 = @as(u8, @truncate((x1 & 0xff)));
const x78 = (x1 >> 8);
- const x79 = @truncate(u8, (x78 & 0xff));
+ const x79 = @as(u8, @truncate((x78 & 0xff)));
const x80 = (x78 >> 8);
- const x81 = @truncate(u8, (x80 & 0xff));
+ const x81 = @as(u8, @truncate((x80 & 0xff)));
const x82 = (x80 >> 8);
- const x83 = @truncate(u8, (x82 & 0xff));
+ const x83 = @as(u8, @truncate((x82 & 0xff)));
const x84 = (x82 >> 8);
- const x85 = @truncate(u8, (x84 & 0xff));
+ const x85 = @as(u8, @truncate((x84 & 0xff)));
const x86 = (x84 >> 8);
- const x87 = @truncate(u8, (x86 & 0xff));
+ const x87 = @as(u8, @truncate((x86 & 0xff)));
const x88 = (x86 >> 8);
- const x89 = @truncate(u8, (x88 & 0xff));
- const x90 = @truncate(u8, (x88 >> 8));
+ const x89 = @as(u8, @truncate((x88 & 0xff)));
+ const x90 = @as(u8, @truncate((x88 >> 8)));
out1[0] = x7;
out1[1] = x9;
out1[2] = x11;
@@ -3246,7 +3246,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
var x1: u64 = undefined;
var x2: u1 = undefined;
addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
- const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+ const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
var x4: u64 = undefined;
var x5: u1 = undefined;
addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -3408,7 +3408,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
cmovznzU64(&x102, x3, (arg5[4]), x94);
var x103: u64 = undefined;
cmovznzU64(&x103, x3, (arg5[5]), x96);
- const x104 = @truncate(u1, (x28 & 0x1));
+ const x104 = @as(u1, @truncate((x28 & 0x1)));
var x105: u64 = undefined;
cmovznzU64(&x105, x104, 0x0, x7);
var x106: u64 = undefined;
diff --git a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig
index 0ce7727148..fc787ba7b9 100644
--- a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig
+++ b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
- out1.* = @truncate(u64, x);
- out2.* = @truncate(u64, x >> 64);
+ out1.* = @as(u64, @truncate(x));
+ out2.* = @as(u64, @truncate(x >> 64));
}
/// The function cmovznzU64 is a single-word conditional move.
@@ -2982,90 +2982,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void {
const x4 = (arg1[2]);
const x5 = (arg1[1]);
const x6 = (arg1[0]);
- const x7 = @truncate(u8, (x6 & 0xff));
+ const x7 = @as(u8, @truncate((x6 & 0xff)));
const x8 = (x6 >> 8);
- const x9 = @truncate(u8, (x8 & 0xff));
+ const x9 = @as(u8, @truncate((x8 & 0xff)));
const x10 = (x8 >> 8);
- const x11 = @truncate(u8, (x10 & 0xff));
+ const x11 = @as(u8, @truncate((x10 & 0xff)));
const x12 = (x10 >> 8);
- const x13 = @truncate(u8, (x12 & 0xff));
+ const x13 = @as(u8, @truncate((x12 & 0xff)));
const x14 = (x12 >> 8);
- const x15 = @truncate(u8, (x14 & 0xff));
+ const x15 = @as(u8, @truncate((x14 & 0xff)));
const x16 = (x14 >> 8);
- const x17 = @truncate(u8, (x16 & 0xff));
+ const x17 = @as(u8, @truncate((x16 & 0xff)));
const x18 = (x16 >> 8);
- const x19 = @truncate(u8, (x18 & 0xff));
- const x20 = @truncate(u8, (x18 >> 8));
- const x21 = @truncate(u8, (x5 & 0xff));
+ const x19 = @as(u8, @truncate((x18 & 0xff)));
+ const x20 = @as(u8, @truncate((x18 >> 8)));
+ const x21 = @as(u8, @truncate((x5 & 0xff)));
const x22 = (x5 >> 8);
- const x23 = @truncate(u8, (x22 & 0xff));
+ const x23 = @as(u8, @truncate((x22 & 0xff)));
const x24 = (x22 >> 8);
- const x25 = @truncate(u8, (x24 & 0xff));
+ const x25 = @as(u8, @truncate((x24 & 0xff)));
const x26 = (x24 >> 8);
- const x27 = @truncate(u8, (x26 & 0xff));
+ const x27 = @as(u8, @truncate((x26 & 0xff)));
const x28 = (x26 >> 8);
- const x29 = @truncate(u8, (x28 & 0xff));
+ const x29 = @as(u8, @truncate((x28 & 0xff)));
const x30 = (x28 >> 8);
- const x31 = @truncate(u8, (x30 & 0xff));
+ const x31 = @as(u8, @truncate((x30 & 0xff)));
const x32 = (x30 >> 8);
- const x33 = @truncate(u8, (x32 & 0xff));
- const x34 = @truncate(u8, (x32 >> 8));
- const x35 = @truncate(u8, (x4 & 0xff));
+ const x33 = @as(u8, @truncate((x32 & 0xff)));
+ const x34 = @as(u8, @truncate((x32 >> 8)));
+ const x35 = @as(u8, @truncate((x4 & 0xff)));
const x36 = (x4 >> 8);
- const x37 = @truncate(u8, (x36 & 0xff));
+ const x37 = @as(u8, @truncate((x36 & 0xff)));
const x38 = (x36 >> 8);
- const x39 = @truncate(u8, (x38 & 0xff));
+ const x39 = @as(u8, @truncate((x38 & 0xff)));
const x40 = (x38 >> 8);
- const x41 = @truncate(u8, (x40 & 0xff));
+ const x41 = @as(u8, @truncate((x40 & 0xff)));
const x42 = (x40 >> 8);
- const x43 = @truncate(u8, (x42 & 0xff));
+ const x43 = @as(u8, @truncate((x42 & 0xff)));
const x44 = (x42 >> 8);
- const x45 = @truncate(u8, (x44 & 0xff));
+ const x45 = @as(u8, @truncate((x44 & 0xff)));
const x46 = (x44 >> 8);
- const x47 = @truncate(u8, (x46 & 0xff));
- const x48 = @truncate(u8, (x46 >> 8));
- const x49 = @truncate(u8, (x3 & 0xff));
+ const x47 = @as(u8, @truncate((x46 & 0xff)));
+ const x48 = @as(u8, @truncate((x46 >> 8)));
+ const x49 = @as(u8, @truncate((x3 & 0xff)));
const x50 = (x3 >> 8);
- const x51 = @truncate(u8, (x50 & 0xff));
+ const x51 = @as(u8, @truncate((x50 & 0xff)));
const x52 = (x50 >> 8);
- const x53 = @truncate(u8, (x52 & 0xff));
+ const x53 = @as(u8, @truncate((x52 & 0xff)));
const x54 = (x52 >> 8);
- const x55 = @truncate(u8, (x54 & 0xff));
+ const x55 = @as(u8, @truncate((x54 & 0xff)));
const x56 = (x54 >> 8);
- const x57 = @truncate(u8, (x56 & 0xff));
+ const x57 = @as(u8, @truncate((x56 & 0xff)));
const x58 = (x56 >> 8);
- const x59 = @truncate(u8, (x58 & 0xff));
+ const x59 = @as(u8, @truncate((x58 & 0xff)));
const x60 = (x58 >> 8);
- const x61 = @truncate(u8, (x60 & 0xff));
- const x62 = @truncate(u8, (x60 >> 8));
- const x63 = @truncate(u8, (x2 & 0xff));
+ const x61 = @as(u8, @truncate((x60 & 0xff)));
+ const x62 = @as(u8, @truncate((x60 >> 8)));
+ const x63 = @as(u8, @truncate((x2 & 0xff)));
const x64 = (x2 >> 8);
- const x65 = @truncate(u8, (x64 & 0xff));
+ const x65 = @as(u8, @truncate((x64 & 0xff)));
const x66 = (x64 >> 8);
- const x67 = @truncate(u8, (x66 & 0xff));
+ const x67 = @as(u8, @truncate((x66 & 0xff)));
const x68 = (x66 >> 8);
- const x69 = @truncate(u8, (x68 & 0xff));
+ const x69 = @as(u8, @truncate((x68 & 0xff)));
const x70 = (x68 >> 8);
- const x71 = @truncate(u8, (x70 & 0xff));
+ const x71 = @as(u8, @truncate((x70 & 0xff)));
const x72 = (x70 >> 8);
- const x73 = @truncate(u8, (x72 & 0xff));
+ const x73 = @as(u8, @truncate((x72 & 0xff)));
const x74 = (x72 >> 8);
- const x75 = @truncate(u8, (x74 & 0xff));
- const x76 = @truncate(u8, (x74 >> 8));
- const x77 = @truncate(u8, (x1 & 0xff));
+ const x75 = @as(u8, @truncate((x74 & 0xff)));
+ const x76 = @as(u8, @truncate((x74 >> 8)));
+ const x77 = @as(u8, @truncate((x1 & 0xff)));
const x78 = (x1 >> 8);
- const x79 = @truncate(u8, (x78 & 0xff));
+ const x79 = @as(u8, @truncate((x78 & 0xff)));
const x80 = (x78 >> 8);
- const x81 = @truncate(u8, (x80 & 0xff));
+ const x81 = @as(u8, @truncate((x80 & 0xff)));
const x82 = (x80 >> 8);
- const x83 = @truncate(u8, (x82 & 0xff));
+ const x83 = @as(u8, @truncate((x82 & 0xff)));
const x84 = (x82 >> 8);
- const x85 = @truncate(u8, (x84 & 0xff));
+ const x85 = @as(u8, @truncate((x84 & 0xff)));
const x86 = (x84 >> 8);
- const x87 = @truncate(u8, (x86 & 0xff));
+ const x87 = @as(u8, @truncate((x86 & 0xff)));
const x88 = (x86 >> 8);
- const x89 = @truncate(u8, (x88 & 0xff));
- const x90 = @truncate(u8, (x88 >> 8));
+ const x89 = @as(u8, @truncate((x88 & 0xff)));
+ const x90 = @as(u8, @truncate((x88 >> 8)));
out1[0] = x7;
out1[1] = x9;
out1[2] = x11;
@@ -3300,7 +3300,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
var x1: u64 = undefined;
var x2: u1 = undefined;
addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
- const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+ const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
var x4: u64 = undefined;
var x5: u1 = undefined;
addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -3462,7 +3462,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
cmovznzU64(&x102, x3, (arg5[4]), x94);
var x103: u64 = undefined;
cmovznzU64(&x103, x3, (arg5[5]), x96);
- const x104 = @truncate(u1, (x28 & 0x1));
+ const x104 = @as(u1, @truncate((x28 & 0x1)));
var x105: u64 = undefined;
cmovznzU64(&x105, x104, 0x0, x7);
var x106: u64 = undefined;
diff --git a/lib/std/crypto/pcurves/secp256k1.zig b/lib/std/crypto/pcurves/secp256k1.zig
index f0b086f974..cd7f1faf75 100644
--- a/lib/std/crypto/pcurves/secp256k1.zig
+++ b/lib/std/crypto/pcurves/secp256k1.zig
@@ -67,8 +67,8 @@ pub const Secp256k1 = struct {
const t1 = math.mulWide(u256, k, 21949224512762693861512883645436906316123769664773102907882521278123970637873);
const t2 = math.mulWide(u256, k, 103246583619904461035481197785446227098457807945486720222659797044629401272177);
- const c1 = @truncate(u128, t1 >> 384) + @truncate(u1, t1 >> 383);
- const c2 = @truncate(u128, t2 >> 384) + @truncate(u1, t2 >> 383);
+ const c1 = @as(u128, @truncate(t1 >> 384)) + @as(u1, @truncate(t1 >> 383));
+ const c2 = @as(u128, @truncate(t2 >> 384)) + @as(u1, @truncate(t2 >> 383));
var buf: [32]u8 = undefined;
@@ -346,7 +346,7 @@ pub const Secp256k1 = struct {
var t = Secp256k1.identityElement;
comptime var i: u8 = 1;
inline while (i < pc.len) : (i += 1) {
- t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8));
+ t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8)));
}
return t;
}
@@ -354,8 +354,8 @@ pub const Secp256k1 = struct {
fn slide(s: [32]u8) [2 * 32 + 1]i8 {
var e: [2 * 32 + 1]i8 = undefined;
for (s, 0..) |x, i| {
- e[i * 2 + 0] = @as(i8, @truncate(u4, x));
- e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+ e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+ e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
}
// Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
var carry: i8 = 0;
@@ -379,9 +379,9 @@ pub const Secp256k1 = struct {
while (true) : (pos -= 1) {
const slot = e[pos];
if (slot > 0) {
- q = q.add(pc[@intCast(usize, slot)]);
+ q = q.add(pc[@as(usize, @intCast(slot))]);
} else if (slot < 0) {
- q = q.sub(pc[@intCast(usize, -slot)]);
+ q = q.sub(pc[@as(usize, @intCast(-slot))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
@@ -394,7 +394,7 @@ pub const Secp256k1 = struct {
var q = Secp256k1.identityElement;
var pos: usize = 252;
while (true) : (pos -= 4) {
- const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+ const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
if (vartime) {
if (slot != 0) {
q = q.add(pc[slot]);
@@ -482,15 +482,15 @@ pub const Secp256k1 = struct {
while (true) : (pos -= 1) {
const slot1 = e1[pos];
if (slot1 > 0) {
- q = q.add(pc1[@intCast(usize, slot1)]);
+ q = q.add(pc1[@as(usize, @intCast(slot1))]);
} else if (slot1 < 0) {
- q = q.sub(pc1[@intCast(usize, -slot1)]);
+ q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
}
const slot2 = e2[pos];
if (slot2 > 0) {
- q = q.add(pc2[@intCast(usize, slot2)]);
+ q = q.add(pc2[@as(usize, @intCast(slot2))]);
} else if (slot2 < 0) {
- q = q.sub(pc2[@intCast(usize, -slot2)]);
+ q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
@@ -523,15 +523,15 @@ pub const Secp256k1 = struct {
while (true) : (pos -= 1) {
const slot1 = e1[pos];
if (slot1 > 0) {
- q = q.add(pc1[@intCast(usize, slot1)]);
+ q = q.add(pc1[@as(usize, @intCast(slot1))]);
} else if (slot1 < 0) {
- q = q.sub(pc1[@intCast(usize, -slot1)]);
+ q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
}
const slot2 = e2[pos];
if (slot2 > 0) {
- q = q.add(pc2[@intCast(usize, slot2)]);
+ q = q.add(pc2[@as(usize, @intCast(slot2))]);
} else if (slot2 < 0) {
- q = q.sub(pc2[@intCast(usize, -slot2)]);
+ q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig
index 5643ea88d5..ae3e97c619 100644
--- a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig
+++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
- out1.* = @truncate(u64, x);
- out2.* = @truncate(u64, x >> 64);
+ out1.* = @as(u64, @truncate(x));
+ out2.* = @as(u64, @truncate(x >> 64));
}
/// The function cmovznzU64 is a single-word conditional move.
@@ -1488,62 +1488,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
const x2 = (arg1[2]);
const x3 = (arg1[1]);
const x4 = (arg1[0]);
- const x5 = @truncate(u8, (x4 & 0xff));
+ const x5 = @as(u8, @truncate((x4 & 0xff)));
const x6 = (x4 >> 8);
- const x7 = @truncate(u8, (x6 & 0xff));
+ const x7 = @as(u8, @truncate((x6 & 0xff)));
const x8 = (x6 >> 8);
- const x9 = @truncate(u8, (x8 & 0xff));
+ const x9 = @as(u8, @truncate((x8 & 0xff)));
const x10 = (x8 >> 8);
- const x11 = @truncate(u8, (x10 & 0xff));
+ const x11 = @as(u8, @truncate((x10 & 0xff)));
const x12 = (x10 >> 8);
- const x13 = @truncate(u8, (x12 & 0xff));
+ const x13 = @as(u8, @truncate((x12 & 0xff)));
const x14 = (x12 >> 8);
- const x15 = @truncate(u8, (x14 & 0xff));
+ const x15 = @as(u8, @truncate((x14 & 0xff)));
const x16 = (x14 >> 8);
- const x17 = @truncate(u8, (x16 & 0xff));
- const x18 = @truncate(u8, (x16 >> 8));
- const x19 = @truncate(u8, (x3 & 0xff));
+ const x17 = @as(u8, @truncate((x16 & 0xff)));
+ const x18 = @as(u8, @truncate((x16 >> 8)));
+ const x19 = @as(u8, @truncate((x3 & 0xff)));
const x20 = (x3 >> 8);
- const x21 = @truncate(u8, (x20 & 0xff));
+ const x21 = @as(u8, @truncate((x20 & 0xff)));
const x22 = (x20 >> 8);
- const x23 = @truncate(u8, (x22 & 0xff));
+ const x23 = @as(u8, @truncate((x22 & 0xff)));
const x24 = (x22 >> 8);
- const x25 = @truncate(u8, (x24 & 0xff));
+ const x25 = @as(u8, @truncate((x24 & 0xff)));
const x26 = (x24 >> 8);
- const x27 = @truncate(u8, (x26 & 0xff));
+ const x27 = @as(u8, @truncate((x26 & 0xff)));
const x28 = (x26 >> 8);
- const x29 = @truncate(u8, (x28 & 0xff));
+ const x29 = @as(u8, @truncate((x28 & 0xff)));
const x30 = (x28 >> 8);
- const x31 = @truncate(u8, (x30 & 0xff));
- const x32 = @truncate(u8, (x30 >> 8));
- const x33 = @truncate(u8, (x2 & 0xff));
+ const x31 = @as(u8, @truncate((x30 & 0xff)));
+ const x32 = @as(u8, @truncate((x30 >> 8)));
+ const x33 = @as(u8, @truncate((x2 & 0xff)));
const x34 = (x2 >> 8);
- const x35 = @truncate(u8, (x34 & 0xff));
+ const x35 = @as(u8, @truncate((x34 & 0xff)));
const x36 = (x34 >> 8);
- const x37 = @truncate(u8, (x36 & 0xff));
+ const x37 = @as(u8, @truncate((x36 & 0xff)));
const x38 = (x36 >> 8);
- const x39 = @truncate(u8, (x38 & 0xff));
+ const x39 = @as(u8, @truncate((x38 & 0xff)));
const x40 = (x38 >> 8);
- const x41 = @truncate(u8, (x40 & 0xff));
+ const x41 = @as(u8, @truncate((x40 & 0xff)));
const x42 = (x40 >> 8);
- const x43 = @truncate(u8, (x42 & 0xff));
+ const x43 = @as(u8, @truncate((x42 & 0xff)));
const x44 = (x42 >> 8);
- const x45 = @truncate(u8, (x44 & 0xff));
- const x46 = @truncate(u8, (x44 >> 8));
- const x47 = @truncate(u8, (x1 & 0xff));
+ const x45 = @as(u8, @truncate((x44 & 0xff)));
+ const x46 = @as(u8, @truncate((x44 >> 8)));
+ const x47 = @as(u8, @truncate((x1 & 0xff)));
const x48 = (x1 >> 8);
- const x49 = @truncate(u8, (x48 & 0xff));
+ const x49 = @as(u8, @truncate((x48 & 0xff)));
const x50 = (x48 >> 8);
- const x51 = @truncate(u8, (x50 & 0xff));
+ const x51 = @as(u8, @truncate((x50 & 0xff)));
const x52 = (x50 >> 8);
- const x53 = @truncate(u8, (x52 & 0xff));
+ const x53 = @as(u8, @truncate((x52 & 0xff)));
const x54 = (x52 >> 8);
- const x55 = @truncate(u8, (x54 & 0xff));
+ const x55 = @as(u8, @truncate((x54 & 0xff)));
const x56 = (x54 >> 8);
- const x57 = @truncate(u8, (x56 & 0xff));
+ const x57 = @as(u8, @truncate((x56 & 0xff)));
const x58 = (x56 >> 8);
- const x59 = @truncate(u8, (x58 & 0xff));
- const x60 = @truncate(u8, (x58 >> 8));
+ const x59 = @as(u8, @truncate((x58 & 0xff)));
+ const x60 = @as(u8, @truncate((x58 >> 8)));
out1[0] = x5;
out1[1] = x7;
out1[2] = x9;
@@ -1726,7 +1726,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
var x1: u64 = undefined;
var x2: u1 = undefined;
addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
- const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+ const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
var x4: u64 = undefined;
var x5: u1 = undefined;
addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -1840,7 +1840,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
cmovznzU64(&x72, x3, (arg5[2]), x66);
var x73: u64 = undefined;
cmovznzU64(&x73, x3, (arg5[3]), x68);
- const x74 = @truncate(u1, (x22 & 0x1));
+ const x74 = @as(u1, @truncate((x22 & 0x1)));
var x75: u64 = undefined;
cmovznzU64(&x75, x74, 0x0, x7);
var x76: u64 = undefined;
diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig
index aca1bd3063..12c833bb33 100644
--- a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig
+++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
- out1.* = @truncate(u64, x);
- out2.* = @truncate(u64, x >> 64);
+ out1.* = @as(u64, @truncate(x));
+ out2.* = @as(u64, @truncate(x >> 64));
}
/// The function cmovznzU64 is a single-word conditional move.
@@ -1548,62 +1548,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
const x2 = (arg1[2]);
const x3 = (arg1[1]);
const x4 = (arg1[0]);
- const x5 = @truncate(u8, (x4 & 0xff));
+ const x5 = @as(u8, @truncate((x4 & 0xff)));
const x6 = (x4 >> 8);
- const x7 = @truncate(u8, (x6 & 0xff));
+ const x7 = @as(u8, @truncate((x6 & 0xff)));
const x8 = (x6 >> 8);
- const x9 = @truncate(u8, (x8 & 0xff));
+ const x9 = @as(u8, @truncate((x8 & 0xff)));
const x10 = (x8 >> 8);
- const x11 = @truncate(u8, (x10 & 0xff));
+ const x11 = @as(u8, @truncate((x10 & 0xff)));
const x12 = (x10 >> 8);
- const x13 = @truncate(u8, (x12 & 0xff));
+ const x13 = @as(u8, @truncate((x12 & 0xff)));
const x14 = (x12 >> 8);
- const x15 = @truncate(u8, (x14 & 0xff));
+ const x15 = @as(u8, @truncate((x14 & 0xff)));
const x16 = (x14 >> 8);
- const x17 = @truncate(u8, (x16 & 0xff));
- const x18 = @truncate(u8, (x16 >> 8));
- const x19 = @truncate(u8, (x3 & 0xff));
+ const x17 = @as(u8, @truncate((x16 & 0xff)));
+ const x18 = @as(u8, @truncate((x16 >> 8)));
+ const x19 = @as(u8, @truncate((x3 & 0xff)));
const x20 = (x3 >> 8);
- const x21 = @truncate(u8, (x20 & 0xff));
+ const x21 = @as(u8, @truncate((x20 & 0xff)));
const x22 = (x20 >> 8);
- const x23 = @truncate(u8, (x22 & 0xff));
+ const x23 = @as(u8, @truncate((x22 & 0xff)));
const x24 = (x22 >> 8);
- const x25 = @truncate(u8, (x24 & 0xff));
+ const x25 = @as(u8, @truncate((x24 & 0xff)));
const x26 = (x24 >> 8);
- const x27 = @truncate(u8, (x26 & 0xff));
+ const x27 = @as(u8, @truncate((x26 & 0xff)));
const x28 = (x26 >> 8);
- const x29 = @truncate(u8, (x28 & 0xff));
+ const x29 = @as(u8, @truncate((x28 & 0xff)));
const x30 = (x28 >> 8);
- const x31 = @truncate(u8, (x30 & 0xff));
- const x32 = @truncate(u8, (x30 >> 8));
- const x33 = @truncate(u8, (x2 & 0xff));
+ const x31 = @as(u8, @truncate((x30 & 0xff)));
+ const x32 = @as(u8, @truncate((x30 >> 8)));
+ const x33 = @as(u8, @truncate((x2 & 0xff)));
const x34 = (x2 >> 8);
- const x35 = @truncate(u8, (x34 & 0xff));
+ const x35 = @as(u8, @truncate((x34 & 0xff)));
const x36 = (x34 >> 8);
- const x37 = @truncate(u8, (x36 & 0xff));
+ const x37 = @as(u8, @truncate((x36 & 0xff)));
const x38 = (x36 >> 8);
- const x39 = @truncate(u8, (x38 & 0xff));
+ const x39 = @as(u8, @truncate((x38 & 0xff)));
const x40 = (x38 >> 8);
- const x41 = @truncate(u8, (x40 & 0xff));
+ const x41 = @as(u8, @truncate((x40 & 0xff)));
const x42 = (x40 >> 8);
- const x43 = @truncate(u8, (x42 & 0xff));
+ const x43 = @as(u8, @truncate((x42 & 0xff)));
const x44 = (x42 >> 8);
- const x45 = @truncate(u8, (x44 & 0xff));
- const x46 = @truncate(u8, (x44 >> 8));
- const x47 = @truncate(u8, (x1 & 0xff));
+ const x45 = @as(u8, @truncate((x44 & 0xff)));
+ const x46 = @as(u8, @truncate((x44 >> 8)));
+ const x47 = @as(u8, @truncate((x1 & 0xff)));
const x48 = (x1 >> 8);
- const x49 = @truncate(u8, (x48 & 0xff));
+ const x49 = @as(u8, @truncate((x48 & 0xff)));
const x50 = (x48 >> 8);
- const x51 = @truncate(u8, (x50 & 0xff));
+ const x51 = @as(u8, @truncate((x50 & 0xff)));
const x52 = (x50 >> 8);
- const x53 = @truncate(u8, (x52 & 0xff));
+ const x53 = @as(u8, @truncate((x52 & 0xff)));
const x54 = (x52 >> 8);
- const x55 = @truncate(u8, (x54 & 0xff));
+ const x55 = @as(u8, @truncate((x54 & 0xff)));
const x56 = (x54 >> 8);
- const x57 = @truncate(u8, (x56 & 0xff));
+ const x57 = @as(u8, @truncate((x56 & 0xff)));
const x58 = (x56 >> 8);
- const x59 = @truncate(u8, (x58 & 0xff));
- const x60 = @truncate(u8, (x58 >> 8));
+ const x59 = @as(u8, @truncate((x58 & 0xff)));
+ const x60 = @as(u8, @truncate((x58 >> 8)));
out1[0] = x5;
out1[1] = x7;
out1[2] = x9;
@@ -1786,7 +1786,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
var x1: u64 = undefined;
var x2: u1 = undefined;
addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
- const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+ const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
var x4: u64 = undefined;
var x5: u1 = undefined;
addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -1900,7 +1900,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
cmovznzU64(&x72, x3, (arg5[2]), x66);
var x73: u64 = undefined;
cmovznzU64(&x73, x3, (arg5[3]), x68);
- const x74 = @truncate(u1, (x22 & 0x1));
+ const x74 = @as(u1, @truncate((x22 & 0x1)));
var x75: u64 = undefined;
cmovznzU64(&x75, x74, 0x0, x7);
var x76: u64 = undefined;
diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig
index 1eeee39a5a..fecd7f1239 100644
--- a/lib/std/crypto/phc_encoding.zig
+++ b/lib/std/crypto/phc_encoding.zig
@@ -193,7 +193,7 @@ pub fn serialize(params: anytype, str: []u8) Error![]const u8 {
pub fn calcSize(params: anytype) usize {
var buf = io.countingWriter(io.null_writer);
serializeTo(params, buf.writer()) catch unreachable;
- return @intCast(usize, buf.bytes_written);
+ return @as(usize, @intCast(buf.bytes_written));
}
fn serializeTo(params: anytype, out: anytype) !void {
diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig
index 51e1c2ab24..5bcb75169d 100644
--- a/lib/std/crypto/poly1305.zig
+++ b/lib/std/crypto/poly1305.zig
@@ -76,12 +76,12 @@ pub const Poly1305 = struct {
const m1 = h1r0 +% h0r1;
const m2 = h2r0 +% h1r1;
- const t0 = @truncate(u64, m0);
- v = @addWithOverflow(@truncate(u64, m1), @truncate(u64, m0 >> 64));
+ const t0 = @as(u64, @truncate(m0));
+ v = @addWithOverflow(@as(u64, @truncate(m1)), @as(u64, @truncate(m0 >> 64)));
const t1 = v[0];
- v = add(@truncate(u64, m2), @truncate(u64, m1 >> 64), v[1]);
+ v = add(@as(u64, @truncate(m2)), @as(u64, @truncate(m1 >> 64)), v[1]);
const t2 = v[0];
- v = add(@truncate(u64, m3), @truncate(u64, m2 >> 64), v[1]);
+ v = add(@as(u64, @truncate(m3)), @as(u64, @truncate(m2 >> 64)), v[1]);
const t3 = v[0];
// Partial reduction
@@ -98,9 +98,9 @@ pub const Poly1305 = struct {
h1 = v[0];
h2 +%= v[1];
const cc = (cclo | (@as(u128, cchi) << 64)) >> 2;
- v = @addWithOverflow(h0, @truncate(u64, cc));
+ v = @addWithOverflow(h0, @as(u64, @truncate(cc)));
h0 = v[0];
- v = add(h1, @truncate(u64, cc >> 64), v[1]);
+ v = add(h1, @as(u64, @truncate(cc >> 64)), v[1]);
h1 = v[0];
h2 +%= v[1];
}
@@ -185,7 +185,7 @@ pub const Poly1305 = struct {
mem.writeIntLittle(u64, out[0..8], st.h[0]);
mem.writeIntLittle(u64, out[8..16], st.h[1]);
- utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Poly1305)]);
+ utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Poly1305)]);
}
pub fn create(out: *[mac_length]u8, msg: []const u8, key: *const [key_length]u8) void {
diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig
index c8a639ad0b..231f941099 100644
--- a/lib/std/crypto/salsa20.zig
+++ b/lib/std/crypto/salsa20.zig
@@ -337,8 +337,8 @@ pub fn Salsa(comptime rounds: comptime_int) type {
var d: [4]u32 = undefined;
d[0] = mem.readIntLittle(u32, nonce[0..4]);
d[1] = mem.readIntLittle(u32, nonce[4..8]);
- d[2] = @truncate(u32, counter);
- d[3] = @truncate(u32, counter >> 32);
+ d[2] = @as(u32, @truncate(counter));
+ d[3] = @as(u32, @truncate(counter >> 32));
SalsaImpl(rounds).salsaXor(out, in, keyToWords(key), d);
}
};
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index 97dd9b95d0..8745a3b34e 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -73,11 +73,11 @@ fn salsaXor(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16)
}
fn blockMix(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16) u32, r: u30) void {
- blockCopy(tmp, @alignCast(16, in[(2 * r - 1) * 16 ..]), 1);
+ blockCopy(tmp, @alignCast(in[(2 * r - 1) * 16 ..]), 1);
var i: usize = 0;
while (i < 2 * r) : (i += 2) {
- salsaXor(tmp, @alignCast(16, in[i * 16 ..]), @alignCast(16, out[i * 8 ..]));
- salsaXor(tmp, @alignCast(16, in[i * 16 + 16 ..]), @alignCast(16, out[i * 8 + r * 16 ..]));
+ salsaXor(tmp, @alignCast(in[i * 16 ..]), @alignCast(out[i * 8 ..]));
+ salsaXor(tmp, @alignCast(in[i * 16 + 16 ..]), @alignCast(out[i * 8 + r * 16 ..]));
}
}
@@ -87,8 +87,8 @@ fn integerify(b: []align(16) const u32, r: u30) u64 {
}
fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16) u32) void {
- var x = @alignCast(16, xy[0 .. 32 * r]);
- var y = @alignCast(16, xy[32 * r ..]);
+ var x: []align(16) u32 = @alignCast(xy[0 .. 32 * r]);
+ var y: []align(16) u32 = @alignCast(xy[32 * r ..]);
for (x, 0..) |*v1, j| {
v1.* = mem.readIntSliceLittle(u32, b[4 * j ..]);
@@ -97,21 +97,21 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
var tmp: [16]u32 align(16) = undefined;
var i: usize = 0;
while (i < n) : (i += 2) {
- blockCopy(@alignCast(16, v[i * (32 * r) ..]), x, 2 * r);
+ blockCopy(@alignCast(v[i * (32 * r) ..]), x, 2 * r);
blockMix(&tmp, x, y, r);
- blockCopy(@alignCast(16, v[(i + 1) * (32 * r) ..]), y, 2 * r);
+ blockCopy(@alignCast(v[(i + 1) * (32 * r) ..]), y, 2 * r);
blockMix(&tmp, y, x, r);
}
i = 0;
while (i < n) : (i += 2) {
- var j = @intCast(usize, integerify(x, r) & (n - 1));
- blockXor(x, @alignCast(16, v[j * (32 * r) ..]), 2 * r);
+ var j = @as(usize, @intCast(integerify(x, r) & (n - 1)));
+ blockXor(x, @alignCast(v[j * (32 * r) ..]), 2 * r);
blockMix(&tmp, x, y, r);
- j = @intCast(usize, integerify(y, r) & (n - 1));
- blockXor(y, @alignCast(16, v[j * (32 * r) ..]), 2 * r);
+ j = @as(usize, @intCast(integerify(y, r) & (n - 1)));
+ blockXor(y, @alignCast(v[j * (32 * r) ..]), 2 * r);
blockMix(&tmp, y, x, r);
}
@@ -147,12 +147,12 @@ pub const Params = struct {
const r: u30 = 8;
if (ops < mem_limit / 32) {
const max_n = ops / (r * 4);
- return Self{ .r = r, .p = 1, .ln = @intCast(u6, math.log2(max_n)) };
+ return Self{ .r = r, .p = 1, .ln = @as(u6, @intCast(math.log2(max_n))) };
} else {
- const max_n = mem_limit / (@intCast(usize, r) * 128);
- const ln = @intCast(u6, math.log2(max_n));
+ const max_n = mem_limit / (@as(usize, @intCast(r)) * 128);
+ const ln = @as(u6, @intCast(math.log2(max_n)));
const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln));
- return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln };
+ return Self{ .r = r, .p = @as(u30, @intCast(max_rp / @as(u64, r))), .ln = ln };
}
}
};
@@ -185,7 +185,7 @@ pub fn kdf(
const n64 = @as(u64, 1) << params.ln;
if (n64 > max_size) return KdfError.WeakParameters;
- const n = @intCast(usize, n64);
+ const n = @as(usize, @intCast(n64));
if (@as(u64, params.r) * @as(u64, params.p) >= 1 << 30 or
params.r > max_int / 128 / @as(u64, params.p) or
params.r > max_int / 256 or
@@ -201,7 +201,7 @@ pub fn kdf(
try pwhash.pbkdf2(dk, password, salt, 1, HmacSha256);
var i: u32 = 0;
while (i < params.p) : (i += 1) {
- smix(@alignCast(16, dk[i * 128 * params.r ..]), params.r, n, v, xy);
+ smix(@alignCast(dk[i * 128 * params.r ..]), params.r, n, v, xy);
}
try pwhash.pbkdf2(derived_key, password, dk, 1, HmacSha256);
}
@@ -309,7 +309,7 @@ const crypt_format = struct {
pub fn calcSize(params: anytype) usize {
var buf = io.countingWriter(io.null_writer);
serializeTo(params, buf.writer()) catch unreachable;
- return @intCast(usize, buf.bytes_written);
+ return @as(usize, @intCast(buf.bytes_written));
}
fn serializeTo(params: anytype, out: anytype) !void {
@@ -343,7 +343,7 @@ const crypt_format = struct {
fn intEncode(dst: []u8, src: anytype) void {
var n = src;
for (dst) |*x| {
- x.* = map64[@truncate(u6, n)];
+ x.* = map64[@as(u6, @truncate(n))];
n = math.shr(@TypeOf(src), n, 6);
}
}
@@ -352,7 +352,7 @@ const crypt_format = struct {
var v: T = 0;
for (src, 0..) |x, i| {
const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding;
- v |= @intCast(T, vi) << @intCast(math.Log2Int(T), i * 6);
+ v |= @as(T, @intCast(vi)) << @as(math.Log2Int(T), @intCast(i * 6));
}
return v;
}
@@ -366,10 +366,10 @@ const crypt_format = struct {
const leftover = src[i * 4 ..];
var v: u24 = 0;
for (leftover, 0..) |_, j| {
- v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @intCast(u5, j * 6);
+ v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @as(u5, @intCast(j * 6));
}
for (dst[i * 3 ..], 0..) |*x, j| {
- x.* = @truncate(u8, v >> @intCast(u5, j * 8));
+ x.* = @as(u8, @truncate(v >> @as(u5, @intCast(j * 8))));
}
}
@@ -382,7 +382,7 @@ const crypt_format = struct {
const leftover = src[i * 3 ..];
var v: u24 = 0;
for (leftover, 0..) |x, j| {
- v |= @as(u24, x) << @intCast(u5, j * 8);
+ v |= @as(u24, x) << @as(u5, @intCast(j * 8));
}
intEncode(dst[i * 4 ..], v);
}
diff --git a/lib/std/crypto/sha1.zig b/lib/std/crypto/sha1.zig
index 1f5f3eaae2..82e23e0647 100644
--- a/lib/std/crypto/sha1.zig
+++ b/lib/std/crypto/sha1.zig
@@ -75,7 +75,7 @@ pub const Sha1 = struct {
// Copy any remainder for next pass.
@memcpy(d.buf[d.buf_len..][0 .. b.len - off], b[off..]);
- d.buf_len += @intCast(u8, b[off..].len);
+ d.buf_len += @as(u8, @intCast(b[off..].len));
d.total_len += b.len;
}
@@ -97,9 +97,9 @@ pub const Sha1 = struct {
// Append message length.
var i: usize = 1;
var len = d.total_len >> 5;
- d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3;
+ d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
while (i < 8) : (i += 1) {
- d.buf[63 - i] = @intCast(u8, len & 0xff);
+ d.buf[63 - i] = @as(u8, @intCast(len & 0xff));
len >>= 8;
}
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index bd5a7cc5d4..ce543d3906 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -132,7 +132,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
// Copy any remainder for next pass.
const b_slice = b[off..];
@memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
- d.buf_len += @intCast(u8, b[off..].len);
+ d.buf_len += @as(u8, @intCast(b[off..].len));
d.total_len += b.len;
}
@@ -159,9 +159,9 @@ fn Sha2x32(comptime params: Sha2Params32) type {
// Append message length.
var i: usize = 1;
var len = d.total_len >> 5;
- d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3;
+ d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
while (i < 8) : (i += 1) {
- d.buf[63 - i] = @intCast(u8, len & 0xff);
+ d.buf[63 - i] = @as(u8, @intCast(len & 0xff));
len >>= 8;
}
@@ -194,7 +194,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
fn round(d: *Self, b: *const [64]u8) void {
var s: [64]u32 align(16) = undefined;
- for (@ptrCast(*align(1) const [16]u32, b), 0..) |*elem, i| {
+ for (@as(*align(1) const [16]u32, @ptrCast(b)), 0..) |*elem, i| {
s[i] = mem.readIntBig(u32, mem.asBytes(elem));
}
@@ -203,7 +203,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
.aarch64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) {
var x: v4u32 = d.s[0..4].*;
var y: v4u32 = d.s[4..8].*;
- const s_v = @ptrCast(*[16]v4u32, &s);
+ const s_v = @as(*[16]v4u32, @ptrCast(&s));
comptime var k: u8 = 0;
inline while (k < 16) : (k += 1) {
@@ -241,7 +241,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
.x86_64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sha)) {
var x: v4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] };
var y: v4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] };
- const s_v = @ptrCast(*[16]v4u32, &s);
+ const s_v = @as(*[16]v4u32, @ptrCast(&s));
comptime var k: u8 = 0;
inline while (k < 16) : (k += 1) {
@@ -273,7 +273,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
: [x] "=x" (-> v4u32),
: [_] "0" (x),
[y] "x" (y),
- [_] "{xmm0}" (@bitCast(v4u32, @bitCast(u128, w) >> 64)),
+ [_] "{xmm0}" (@as(v4u32, @bitCast(@as(u128, @bitCast(w)) >> 64))),
);
}
@@ -624,7 +624,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
// Copy any remainder for next pass.
const b_slice = b[off..];
@memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
- d.buf_len += @intCast(u8, b[off..].len);
+ d.buf_len += @as(u8, @intCast(b[off..].len));
d.total_len += b.len;
}
@@ -651,9 +651,9 @@ fn Sha2x64(comptime params: Sha2Params64) type {
// Append message length.
var i: usize = 1;
var len = d.total_len >> 5;
- d.buf[127] = @intCast(u8, d.total_len & 0x1f) << 3;
+ d.buf[127] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
while (i < 16) : (i += 1) {
- d.buf[127 - i] = @intCast(u8, len & 0xff);
+ d.buf[127 - i] = @as(u8, @intCast(len & 0xff));
len >>= 8;
}
diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig
index 70f4f2fd53..4399587397 100644
--- a/lib/std/crypto/siphash.zig
+++ b/lib/std/crypto/siphash.zig
@@ -83,13 +83,13 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
@call(.always_inline, round, .{ self, blob });
}
- self.msg_len +%= @truncate(u8, b.len);
+ self.msg_len +%= @as(u8, @truncate(b.len));
}
fn final(self: *Self, b: []const u8) T {
std.debug.assert(b.len < 8);
- self.msg_len +%= @truncate(u8, b.len);
+ self.msg_len +%= @as(u8, @truncate(b.len));
var buf = [_]u8{0} ** 8;
@memcpy(buf[0..b.len], b);
@@ -202,7 +202,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
const b_slice = b[off + aligned_len ..];
@memcpy(self.buf[self.buf_len..][0..b_slice.len], b_slice);
- self.buf_len += @intCast(u8, b_slice.len);
+ self.buf_len += @as(u8, @intCast(b_slice.len));
}
pub fn peek(self: Self) [mac_length]u8 {
@@ -329,7 +329,7 @@ test "siphash64-2-4 sanity" {
var buffer: [64]u8 = undefined;
for (vectors, 0..) |vector, i| {
- buffer[i] = @intCast(u8, i);
+ buffer[i] = @as(u8, @intCast(i));
var out: [siphash.mac_length]u8 = undefined;
siphash.create(&out, buffer[0..i], test_key);
@@ -409,7 +409,7 @@ test "siphash128-2-4 sanity" {
var buffer: [64]u8 = undefined;
for (vectors, 0..) |vector, i| {
- buffer[i] = @intCast(u8, i);
+ buffer[i] = @as(u8, @intCast(i));
var out: [siphash.mac_length]u8 = undefined;
siphash.create(&out, buffer[0..i], test_key[0..]);
@@ -420,7 +420,7 @@ test "siphash128-2-4 sanity" {
test "iterative non-divisible update" {
var buf: [1024]u8 = undefined;
for (&buf, 0..) |*e, i| {
- e.* = @truncate(u8, i);
+ e.* = @as(u8, @truncate(i));
}
const key = "0x128dad08f12307";
diff --git a/lib/std/crypto/tlcsprng.zig b/lib/std/crypto/tlcsprng.zig
index 54a30cfaba..344da9745d 100644
--- a/lib/std/crypto/tlcsprng.zig
+++ b/lib/std/crypto/tlcsprng.zig
@@ -102,7 +102,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
wipe_mem = mem.asBytes(&S.buf);
}
}
- const ctx = @ptrCast(*Context, wipe_mem.ptr);
+ const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
switch (ctx.init_state) {
.uninitialized => {
@@ -158,7 +158,7 @@ fn childAtForkHandler() callconv(.C) void {
}
fn fillWithCsprng(buffer: []u8) void {
- const ctx = @ptrCast(*Context, wipe_mem.ptr);
+ const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
return ctx.rng.fill(buffer);
}
@@ -174,7 +174,7 @@ fn initAndFill(buffer: []u8) void {
// the `std.options.cryptoRandomSeed` function is provided.
std.options.cryptoRandomSeed(&seed);
- const ctx = @ptrCast(*Context, wipe_mem.ptr);
+ const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
ctx.rng = Rng.init(seed);
std.crypto.utils.secureZero(u8, &seed);
diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig
index 4c03c48973..eb5a6b4c1a 100644
--- a/lib/std/crypto/tls.zig
+++ b/lib/std/crypto/tls.zig
@@ -371,12 +371,12 @@ pub fn hkdfExpandLabel(
const tls13 = "tls13 ";
var buf: [2 + 1 + tls13.len + max_label_len + 1 + max_context_len]u8 = undefined;
mem.writeIntBig(u16, buf[0..2], len);
- buf[2] = @intCast(u8, tls13.len + label.len);
+ buf[2] = @as(u8, @intCast(tls13.len + label.len));
buf[3..][0..tls13.len].* = tls13.*;
var i: usize = 3 + tls13.len;
@memcpy(buf[i..][0..label.len], label);
i += label.len;
- buf[i] = @intCast(u8, context.len);
+ buf[i] = @as(u8, @intCast(context.len));
i += 1;
@memcpy(buf[i..][0..context.len], context);
i += context.len;
@@ -411,24 +411,24 @@ pub inline fn enum_array(comptime E: type, comptime tags: []const E) [2 + @sizeO
assert(@sizeOf(E) == 2);
var result: [tags.len * 2]u8 = undefined;
for (tags, 0..) |elem, i| {
- result[i * 2] = @truncate(u8, @intFromEnum(elem) >> 8);
- result[i * 2 + 1] = @truncate(u8, @intFromEnum(elem));
+ result[i * 2] = @as(u8, @truncate(@intFromEnum(elem) >> 8));
+ result[i * 2 + 1] = @as(u8, @truncate(@intFromEnum(elem)));
}
return array(2, result);
}
pub inline fn int2(x: u16) [2]u8 {
return .{
- @truncate(u8, x >> 8),
- @truncate(u8, x),
+ @as(u8, @truncate(x >> 8)),
+ @as(u8, @truncate(x)),
};
}
pub inline fn int3(x: u24) [3]u8 {
return .{
- @truncate(u8, x >> 16),
- @truncate(u8, x >> 8),
- @truncate(u8, x),
+ @as(u8, @truncate(x >> 16)),
+ @as(u8, @truncate(x >> 8)),
+ @as(u8, @truncate(x)),
};
}
@@ -513,7 +513,7 @@ pub const Decoder = struct {
.Enum => |info| {
const int = d.decode(info.tag_type);
if (info.is_exhaustive) @compileError("exhaustive enum cannot be used");
- return @enumFromInt(T, int);
+ return @as(T, @enumFromInt(int));
},
else => @compileError("unsupported type: " ++ @typeName(T)),
}
diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig
index 94ecf0d3ef..6d5bb86fed 100644
--- a/lib/std/crypto/tls/Client.zig
+++ b/lib/std/crypto/tls/Client.zig
@@ -140,7 +140,7 @@ pub fn InitError(comptime Stream: type) type {
///
/// `host` is only borrowed during this function call.
pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) InitError(@TypeOf(stream))!Client {
- const host_len = @intCast(u16, host.len);
+ const host_len = @as(u16, @intCast(host.len));
var random_buffer: [128]u8 = undefined;
crypto.random.bytes(&random_buffer);
@@ -194,7 +194,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
int2(host_len);
const extensions_header =
- int2(@intCast(u16, extensions_payload.len + host_len)) ++
+ int2(@as(u16, @intCast(extensions_payload.len + host_len))) ++
extensions_payload;
const legacy_compression_methods = 0x0100;
@@ -209,13 +209,13 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
const out_handshake =
[_]u8{@intFromEnum(tls.HandshakeType.client_hello)} ++
- int3(@intCast(u24, client_hello.len + host_len)) ++
+ int3(@as(u24, @intCast(client_hello.len + host_len))) ++
client_hello;
const plaintext_header = [_]u8{
@intFromEnum(tls.ContentType.handshake),
0x03, 0x01, // legacy_record_version
- } ++ int2(@intCast(u16, out_handshake.len + host_len)) ++ out_handshake;
+ } ++ int2(@as(u16, @intCast(out_handshake.len + host_len))) ++ out_handshake;
{
var iovecs = [_]std.os.iovec_const{
@@ -457,7 +457,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
const auth_tag = record_decoder.array(P.AEAD.tag_length).*;
const V = @Vector(P.AEAD.nonce_length, u8);
const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
- const operand: V = pad ++ @bitCast([8]u8, big(read_seq));
+ const operand: V = pad ++ @as([8]u8, @bitCast(big(read_seq)));
read_seq += 1;
const nonce = @as(V, p.server_handshake_iv) ^ operand;
P.AEAD.decrypt(cleartext, ciphertext, auth_tag, record_header, nonce, p.server_handshake_key) catch
@@ -466,7 +466,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
},
};
- const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]);
+ const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1]));
if (inner_ct != .handshake) return error.TlsUnexpectedMessage;
var ctd = tls.Decoder.fromTheirSlice(cleartext[0 .. cleartext.len - 1]);
@@ -520,7 +520,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
const subject_cert: Certificate = .{
.buffer = certd.buf,
- .index = @intCast(u32, certd.idx),
+ .index = @as(u32, @intCast(certd.idx)),
};
const subject = try subject_cert.parse();
if (cert_index == 0) {
@@ -534,7 +534,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
if (pub_key.len > main_cert_pub_key_buf.len)
return error.CertificatePublicKeyInvalid;
@memcpy(main_cert_pub_key_buf[0..pub_key.len], pub_key);
- main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len);
+ main_cert_pub_key_len = @as(@TypeOf(main_cert_pub_key_len), @intCast(pub_key.len));
} else {
try prev_cert.verify(subject, now_sec);
}
@@ -679,7 +679,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
.write_seq = 0,
.partial_cleartext_idx = 0,
.partial_ciphertext_idx = 0,
- .partial_ciphertext_end = @intCast(u15, leftover.len),
+ .partial_ciphertext_end = @as(u15, @intCast(leftover.len)),
.received_close_notify = false,
.application_cipher = app_cipher,
.partially_read_buffer = undefined,
@@ -797,11 +797,11 @@ fn prepareCiphertextRecord(
const overhead_len = tls.record_header_len + P.AEAD.tag_length + 1;
const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len;
while (true) {
- const encrypted_content_len = @intCast(u16, @min(
+ const encrypted_content_len = @as(u16, @intCast(@min(
@min(bytes.len - bytes_i, max_ciphertext_len - 1),
ciphertext_buf.len - close_notify_alert_reserved -
overhead_len - ciphertext_end,
- ));
+ )));
if (encrypted_content_len == 0) return .{
.iovec_end = iovec_end,
.ciphertext_end = ciphertext_end,
@@ -826,7 +826,7 @@ fn prepareCiphertextRecord(
const auth_tag = ciphertext_buf[ciphertext_end..][0..P.AEAD.tag_length];
ciphertext_end += auth_tag.len;
const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
- const operand: V = pad ++ @bitCast([8]u8, big(c.write_seq));
+ const operand: V = pad ++ @as([8]u8, @bitCast(big(c.write_seq)));
c.write_seq += 1; // TODO send key_update on overflow
const nonce = @as(V, p.client_iv) ^ operand;
P.AEAD.encrypt(ciphertext, auth_tag, cleartext, ad, nonce, p.client_key);
@@ -920,7 +920,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
// Give away the buffered cleartext we have, if any.
const partial_cleartext = c.partially_read_buffer[c.partial_cleartext_idx..c.partial_ciphertext_idx];
if (partial_cleartext.len > 0) {
- const amt = @intCast(u15, vp.put(partial_cleartext));
+ const amt = @as(u15, @intCast(vp.put(partial_cleartext)));
c.partial_cleartext_idx += amt;
if (c.partial_cleartext_idx == c.partial_ciphertext_idx and
@@ -1037,7 +1037,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
in = 0;
continue;
}
- const ct = @enumFromInt(tls.ContentType, frag[in]);
+ const ct = @as(tls.ContentType, @enumFromInt(frag[in]));
in += 1;
const legacy_version = mem.readIntBig(u16, frag[in..][0..2]);
in += 2;
@@ -1070,8 +1070,8 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
switch (ct) {
.alert => {
if (in + 2 > frag.len) return error.TlsDecodeError;
- const level = @enumFromInt(tls.AlertLevel, frag[in]);
- const desc = @enumFromInt(tls.AlertDescription, frag[in + 1]);
+ const level = @as(tls.AlertLevel, @enumFromInt(frag[in]));
+ const desc = @as(tls.AlertDescription, @enumFromInt(frag[in + 1]));
_ = level;
try desc.toError();
@@ -1089,7 +1089,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
in += ciphertext_len;
const auth_tag = frag[in..][0..P.AEAD.tag_length].*;
const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
- const operand: V = pad ++ @bitCast([8]u8, big(c.read_seq));
+ const operand: V = pad ++ @as([8]u8, @bitCast(big(c.read_seq)));
const nonce: [P.AEAD.nonce_length]u8 = @as(V, p.server_iv) ^ operand;
const out_buf = vp.peek();
const cleartext_buf = if (ciphertext.len <= out_buf.len)
@@ -1105,11 +1105,11 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
c.read_seq = try std.math.add(u64, c.read_seq, 1);
- const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]);
+ const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1]));
switch (inner_ct) {
.alert => {
- const level = @enumFromInt(tls.AlertLevel, cleartext[0]);
- const desc = @enumFromInt(tls.AlertDescription, cleartext[1]);
+ const level = @as(tls.AlertLevel, @enumFromInt(cleartext[0]));
+ const desc = @as(tls.AlertDescription, @enumFromInt(cleartext[1]));
if (desc == .close_notify) {
c.received_close_notify = true;
c.partial_ciphertext_end = c.partial_ciphertext_idx;
@@ -1124,7 +1124,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
.handshake => {
var ct_i: usize = 0;
while (true) {
- const handshake_type = @enumFromInt(tls.HandshakeType, cleartext[ct_i]);
+ const handshake_type = @as(tls.HandshakeType, @enumFromInt(cleartext[ct_i]));
ct_i += 1;
const handshake_len = mem.readIntBig(u24, cleartext[ct_i..][0..3]);
ct_i += 3;
@@ -1148,7 +1148,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
}
c.read_seq = 0;
- switch (@enumFromInt(tls.KeyUpdateRequest, handshake[0])) {
+ switch (@as(tls.KeyUpdateRequest, @enumFromInt(handshake[0]))) {
.update_requested => {
switch (c.application_cipher) {
inline else => |*p| {
@@ -1186,13 +1186,13 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
c.partially_read_buffer[c.partial_ciphertext_idx..][0..msg.len],
msg,
);
- c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), c.partial_ciphertext_idx + msg.len);
+ c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(c.partial_ciphertext_idx + msg.len));
} else {
const amt = vp.put(msg);
if (amt < msg.len) {
const rest = msg[amt..];
c.partial_cleartext_idx = 0;
- c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), rest.len);
+ c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(rest.len));
@memcpy(c.partially_read_buffer[0..rest.len], rest);
}
}
@@ -1220,12 +1220,12 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize {
const saved_buf = frag[in..];
if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
// There is cleartext at the beginning already which we need to preserve.
- c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + saved_buf.len);
+ c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + saved_buf.len));
@memcpy(c.partially_read_buffer[c.partial_ciphertext_idx..][0..saved_buf.len], saved_buf);
} else {
c.partial_cleartext_idx = 0;
c.partial_ciphertext_idx = 0;
- c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), saved_buf.len);
+ c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(saved_buf.len));
@memcpy(c.partially_read_buffer[0..saved_buf.len], saved_buf);
}
return out;
@@ -1235,14 +1235,14 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize {
fn finishRead2(c: *Client, first: []const u8, frag1: []const u8, out: usize) usize {
if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
// There is cleartext at the beginning already which we need to preserve.
- c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + first.len + frag1.len);
+ c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + first.len + frag1.len));
// TODO: eliminate this call to copyForwards
std.mem.copyForwards(u8, c.partially_read_buffer[c.partial_ciphertext_idx..][0..first.len], first);
@memcpy(c.partially_read_buffer[c.partial_ciphertext_idx + first.len ..][0..frag1.len], frag1);
} else {
c.partial_cleartext_idx = 0;
c.partial_ciphertext_idx = 0;
- c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), first.len + frag1.len);
+ c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(first.len + frag1.len));
// TODO: eliminate this call to copyForwards
std.mem.copyForwards(u8, c.partially_read_buffer[0..first.len], first);
@memcpy(c.partially_read_buffer[first.len..][0..frag1.len], frag1);
diff --git a/lib/std/crypto/utils.zig b/lib/std/crypto/utils.zig
index 14a235e418..ab1b6eab6a 100644
--- a/lib/std/crypto/utils.zig
+++ b/lib/std/crypto/utils.zig
@@ -24,7 +24,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool {
const s = @typeInfo(C).Int.bits;
const Cu = std.meta.Int(.unsigned, s);
const Cext = std.meta.Int(.unsigned, s + 1);
- return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s));
+ return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s))));
},
.Vector => |info| {
const C = info.child;
@@ -35,7 +35,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool {
const s = @typeInfo(C).Int.bits;
const Cu = std.meta.Int(.unsigned, s);
const Cext = std.meta.Int(.unsigned, s + 1);
- return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s));
+ return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s))));
},
else => {
@compileError("Only arrays and vectors can be compared");
@@ -60,14 +60,14 @@ pub fn timingSafeCompare(comptime T: type, a: []const T, b: []const T, endian: E
i -= 1;
const x1 = a[i];
const x2 = b[i];
- gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq;
- eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
+ gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq;
+ eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits));
}
} else {
for (a, 0..) |x1, i| {
const x2 = b[i];
- gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq;
- eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
+ gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq;
+ eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits));
}
}
if (gt != 0) {
@@ -102,7 +102,7 @@ pub fn timingSafeAdd(comptime T: type, a: []const T, b: []const T, result: []T,
carry = ov1[1] | ov2[1];
}
}
- return @bitCast(bool, carry);
+ return @as(bool, @bitCast(carry));
}
/// Subtract two integers serialized as arrays of the same size, in constant time.
@@ -129,7 +129,7 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
borrow = ov1[1] | ov2[1];
}
}
- return @bitCast(bool, borrow);
+ return @as(bool, @bitCast(borrow));
}
/// Sets a slice to zeroes.
diff --git a/lib/std/cstr.zig b/lib/std/cstr.zig
index 9bd98a72b7..0888edf10d 100644
--- a/lib/std/cstr.zig
+++ b/lib/std/cstr.zig
@@ -89,12 +89,12 @@ pub const NullTerminated2DArray = struct {
return NullTerminated2DArray{
.allocator = allocator,
.byte_count = byte_count,
- .ptr = @ptrCast(?[*:null]?[*:0]u8, buf.ptr),
+ .ptr = @as(?[*:null]?[*:0]u8, @ptrCast(buf.ptr)),
};
}
pub fn deinit(self: *NullTerminated2DArray) void {
- const buf = @ptrCast([*]u8, self.ptr);
+ const buf = @as([*]u8, @ptrCast(self.ptr));
self.allocator.free(buf[0..self.byte_count]);
}
};
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index e0726d5444..44f6ce1367 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -460,8 +460,8 @@ pub const StackIterator = struct {
// We are unable to determine validity of memory for freestanding targets
if (native_os == .freestanding) return true;
- const aligned_address = address & ~@intCast(usize, (mem.page_size - 1));
- const aligned_memory = @ptrFromInt([*]align(mem.page_size) u8, aligned_address)[0..mem.page_size];
+ const aligned_address = address & ~@as(usize, @intCast((mem.page_size - 1)));
+ const aligned_memory = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_address))[0..mem.page_size];
if (native_os != .windows) {
if (native_os != .wasi) {
@@ -511,7 +511,7 @@ pub const StackIterator = struct {
if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)) or !isValidMemory(fp))
return null;
- const new_fp = math.add(usize, @ptrFromInt(*const usize, fp).*, fp_bias) catch return null;
+ const new_fp = math.add(usize, @as(*const usize, @ptrFromInt(fp)).*, fp_bias) catch return null;
// Sanity check: the stack grows down thus all the parent frames must be
// be at addresses that are greater (or equal) than the previous one.
@@ -520,9 +520,9 @@ pub const StackIterator = struct {
if (new_fp != 0 and new_fp < self.fp)
return null;
- const new_pc = @ptrFromInt(
+ const new_pc = @as(
*const usize,
- math.add(usize, fp, pc_offset) catch return null,
+ @ptrFromInt(math.add(usize, fp, pc_offset) catch return null),
).*;
self.fp = new_fp;
@@ -555,10 +555,10 @@ pub fn writeCurrentStackTrace(
pub noinline fn walkStackWindows(addresses: []usize) usize {
if (builtin.cpu.arch == .x86) {
// RtlVirtualUnwind doesn't exist on x86
- return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @ptrCast(**anyopaque, addresses.ptr), null);
+ return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @as(**anyopaque, @ptrCast(addresses.ptr)), null);
}
- const tib = @ptrCast(*const windows.NT_TIB, &windows.teb().Reserved1);
+ const tib = @as(*const windows.NT_TIB, @ptrCast(&windows.teb().Reserved1));
var context: windows.CONTEXT = std.mem.zeroes(windows.CONTEXT);
windows.ntdll.RtlCaptureContext(&context);
@@ -584,7 +584,7 @@ pub noinline fn walkStackWindows(addresses: []usize) usize {
);
} else {
// leaf function
- context.setIp(@ptrFromInt(*u64, current_regs.sp).*);
+ context.setIp(@as(*u64, @ptrFromInt(current_regs.sp)).*);
context.setSp(current_regs.sp + @sizeOf(usize));
}
@@ -734,7 +734,7 @@ fn printLineInfo(
if (printLineFromFile(out_stream, li)) {
if (li.column > 0) {
// The caret already takes one char
- const space_needed = @intCast(usize, li.column - 1);
+ const space_needed = @as(usize, @intCast(li.column - 1));
try out_stream.writeByteNTimes(' ', space_needed);
try tty_config.setColor(out_stream, .green);
@@ -883,7 +883,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) error{Overflow}![]const u8
pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo {
nosuspend {
const mapped_mem = try mapWholeFile(elf_file);
- const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
+ const hdr = @as(*const elf.Ehdr, @ptrCast(&mapped_mem[0]));
if (!mem.eql(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
@@ -896,14 +896,13 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
const shoff = hdr.e_shoff;
const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx);
- const str_shdr = @ptrCast(
- *const elf.Shdr,
- @alignCast(@alignOf(elf.Shdr), &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow]),
- );
+ const str_shdr: *const elf.Shdr = @ptrCast(@alignCast(
+ &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow],
+ ));
const header_strings = mapped_mem[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size];
- const shdrs = @ptrCast(
+ const shdrs = @as(
[*]const elf.Shdr,
- @alignCast(@alignOf(elf.Shdr), &mapped_mem[shoff]),
+ @ptrCast(@alignCast(&mapped_mem[shoff])),
)[0..hdr.e_shnum];
var opt_debug_info: ?[]const u8 = null;
@@ -982,10 +981,7 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo {
const mapped_mem = try mapWholeFile(macho_file);
- const hdr = @ptrCast(
- *const macho.mach_header_64,
- @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr),
- );
+ const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr));
if (hdr.magic != macho.MH_MAGIC_64)
return error.InvalidDebugInfo;
@@ -998,9 +994,9 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
else => {},
} else return error.MissingDebugInfo;
- const syms = @ptrCast(
+ const syms = @as(
[*]const macho.nlist_64,
- @alignCast(@alignOf(macho.nlist_64), &mapped_mem[symtab.symoff]),
+ @ptrCast(@alignCast(&mapped_mem[symtab.symoff])),
)[0..symtab.nsyms];
const strings = mapped_mem[symtab.stroff..][0 .. symtab.strsize - 1 :0];
@@ -1055,7 +1051,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
},
.fun_strx => {
state = .fun_size;
- last_sym.size = @intCast(u32, sym.n_value);
+ last_sym.size = @as(u32, @intCast(sym.n_value));
},
else => return error.InvalidDebugInfo,
}
@@ -1283,10 +1279,10 @@ pub const DebugInfo = struct {
var it = macho.LoadCommandIterator{
.ncmds = header.ncmds,
- .buffer = @alignCast(@alignOf(u64), @ptrFromInt(
+ .buffer = @alignCast(@as(
[*]u8,
- @intFromPtr(header) + @sizeOf(macho.mach_header_64),
- ))[0..header.sizeofcmds],
+ @ptrFromInt(@intFromPtr(header) + @sizeOf(macho.mach_header_64)),
+ )[0..header.sizeofcmds]),
};
while (it.next()) |cmd| switch (cmd.cmd()) {
.SEGMENT_64 => {
@@ -1332,7 +1328,7 @@ pub const DebugInfo = struct {
return obj_di;
}
- const mapped_module = @ptrFromInt([*]const u8, module.base_address)[0..module.size];
+ const mapped_module = @as([*]const u8, @ptrFromInt(module.base_address))[0..module.size];
const obj_di = try self.allocator.create(ModuleDebugInfo);
errdefer self.allocator.destroy(obj_di);
@@ -1465,10 +1461,7 @@ pub const ModuleDebugInfo = switch (native_os) {
const o_file = try fs.cwd().openFile(o_file_path, .{ .intended_io_mode = .blocking });
const mapped_mem = try mapWholeFile(o_file);
- const hdr = @ptrCast(
- *const macho.mach_header_64,
- @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr),
- );
+ const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr));
if (hdr.magic != std.macho.MH_MAGIC_64)
return error.InvalidDebugInfo;
@@ -1487,21 +1480,18 @@ pub const ModuleDebugInfo = switch (native_os) {
if (segcmd == null or symtabcmd == null) return error.MissingDebugInfo;
// Parse symbols
- const strtab = @ptrCast(
+ const strtab = @as(
[*]const u8,
- &mapped_mem[symtabcmd.?.stroff],
+ @ptrCast(&mapped_mem[symtabcmd.?.stroff]),
)[0 .. symtabcmd.?.strsize - 1 :0];
- const symtab = @ptrCast(
+ const symtab = @as(
[*]const macho.nlist_64,
- @alignCast(
- @alignOf(macho.nlist_64),
- &mapped_mem[symtabcmd.?.symoff],
- ),
+ @ptrCast(@alignCast(&mapped_mem[symtabcmd.?.symoff])),
)[0..symtabcmd.?.nsyms];
// TODO handle tentative (common) symbols
var addr_table = std.StringHashMap(u64).init(allocator);
- try addr_table.ensureTotalCapacity(@intCast(u32, symtab.len));
+ try addr_table.ensureTotalCapacity(@as(u32, @intCast(symtab.len)));
for (symtab) |sym| {
if (sym.n_strx == 0) continue;
if (sym.undf() or sym.tentative() or sym.abs()) continue;
@@ -1943,49 +1933,49 @@ fn dumpSegfaultInfoPosix(sig: i32, addr: usize, ctx_ptr: ?*const anyopaque) void
switch (native_arch) {
.x86 => {
- const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
- const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]);
- const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);
+ const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
+ const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP]));
+ const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP]));
dumpStackTraceFromBase(bp, ip);
},
.x86_64 => {
- const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+ const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
const ip = switch (native_os) {
- .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
- .freebsd => @intCast(usize, ctx.mcontext.rip),
- .openbsd => @intCast(usize, ctx.sc_rip),
- .macos => @intCast(usize, ctx.mcontext.ss.rip),
+ .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])),
+ .freebsd => @as(usize, @intCast(ctx.mcontext.rip)),
+ .openbsd => @as(usize, @intCast(ctx.sc_rip)),
+ .macos => @as(usize, @intCast(ctx.mcontext.ss.rip)),
else => unreachable,
};
const bp = switch (native_os) {
- .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
- .openbsd => @intCast(usize, ctx.sc_rbp),
- .freebsd => @intCast(usize, ctx.mcontext.rbp),
- .macos => @intCast(usize, ctx.mcontext.ss.rbp),
+ .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])),
+ .openbsd => @as(usize, @intCast(ctx.sc_rbp)),
+ .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)),
+ .macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)),
else => unreachable,
};
dumpStackTraceFromBase(bp, ip);
},
.arm => {
- const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
- const ip = @intCast(usize, ctx.mcontext.arm_pc);
- const bp = @intCast(usize, ctx.mcontext.arm_fp);
+ const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
+ const ip = @as(usize, @intCast(ctx.mcontext.arm_pc));
+ const bp = @as(usize, @intCast(ctx.mcontext.arm_fp));
dumpStackTraceFromBase(bp, ip);
},
.aarch64 => {
- const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+ const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
const ip = switch (native_os) {
- .macos => @intCast(usize, ctx.mcontext.ss.pc),
- .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]),
- .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr),
- else => @intCast(usize, ctx.mcontext.pc),
+ .macos => @as(usize, @intCast(ctx.mcontext.ss.pc)),
+ .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])),
+ .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)),
+ else => @as(usize, @intCast(ctx.mcontext.pc)),
};
// x29 is the ABI-designated frame pointer
const bp = switch (native_os) {
- .macos => @intCast(usize, ctx.mcontext.ss.fp),
- .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]),
- .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]),
- else => @intCast(usize, ctx.mcontext.regs[29]),
+ .macos => @as(usize, @intCast(ctx.mcontext.ss.fp)),
+ .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])),
+ .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])),
+ else => @as(usize, @intCast(ctx.mcontext.regs[29])),
};
dumpStackTraceFromBase(bp, ip);
},
diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig
index 4de08b25d7..aa1ac6959f 100644
--- a/lib/std/dwarf.zig
+++ b/lib/std/dwarf.zig
@@ -462,7 +462,7 @@ const LineNumberProgram = struct {
});
return debug.LineInfo{
- .line = if (self.prev_line >= 0) @intCast(u64, self.prev_line) else 0,
+ .line = if (self.prev_line >= 0) @as(u64, @intCast(self.prev_line)) else 0,
.column = self.prev_column,
.file_name = file_name,
};
@@ -533,7 +533,7 @@ fn parseFormValueConstant(in_stream: anytype, signed: bool, endian: std.builtin.
-1 => blk: {
if (signed) {
const x = try nosuspend leb.readILEB128(i64, in_stream);
- break :blk @bitCast(u64, x);
+ break :blk @as(u64, @bitCast(x));
} else {
const x = try nosuspend leb.readULEB128(u64, in_stream);
break :blk x;
@@ -939,12 +939,12 @@ pub const DwarfInfo = struct {
.Const => |c| try c.asUnsignedLe(),
.RangeListOffset => |idx| off: {
if (compile_unit.is_64) {
- const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx);
+ const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 8 * idx));
if (offset_loc + 8 > debug_ranges.len) return badDwarf();
const offset = mem.readInt(u64, debug_ranges[offset_loc..][0..8], di.endian);
break :off compile_unit.rnglists_base + offset;
} else {
- const offset_loc = @intCast(usize, compile_unit.rnglists_base + 4 * idx);
+ const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 4 * idx));
if (offset_loc + 4 > debug_ranges.len) return badDwarf();
const offset = mem.readInt(u32, debug_ranges[offset_loc..][0..4], di.endian);
break :off compile_unit.rnglists_base + offset;
@@ -1134,7 +1134,7 @@ pub const DwarfInfo = struct {
),
};
if (attr.form_id == FORM.implicit_const) {
- result.attrs.items[i].value.Const.payload = @bitCast(u64, attr.payload);
+ result.attrs.items[i].value.Const.payload = @as(u64, @bitCast(attr.payload));
}
}
return result;
@@ -1438,7 +1438,7 @@ pub const DwarfInfo = struct {
const addr_size = debug_addr[compile_unit.addr_base - 2];
const seg_size = debug_addr[compile_unit.addr_base - 1];
- const byte_offset = @intCast(usize, compile_unit.addr_base + (addr_size + seg_size) * index);
+ const byte_offset = @as(usize, @intCast(compile_unit.addr_base + (addr_size + seg_size) * index));
if (byte_offset + addr_size > debug_addr.len) return badDwarf();
return switch (addr_size) {
1 => debug_addr[byte_offset],
diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig
index 38c5de9cad..3342ac3f6d 100644
--- a/lib/std/dynamic_library.zig
+++ b/lib/std/dynamic_library.zig
@@ -71,18 +71,18 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator {
while (_DYNAMIC[i].d_tag != elf.DT_NULL) : (i += 1) {
switch (_DYNAMIC[i].d_tag) {
elf.DT_DEBUG => {
- const ptr = @ptrFromInt(?*RDebug, _DYNAMIC[i].d_val);
+ const ptr = @as(?*RDebug, @ptrFromInt(_DYNAMIC[i].d_val));
if (ptr) |r_debug| {
if (r_debug.r_version != 1) return error.InvalidExe;
break :init r_debug.r_map;
}
},
elf.DT_PLTGOT => {
- const ptr = @ptrFromInt(?[*]usize, _DYNAMIC[i].d_val);
+ const ptr = @as(?[*]usize, @ptrFromInt(_DYNAMIC[i].d_val));
if (ptr) |got_table| {
// The address to the link_map structure is stored in
// the second slot
- break :init @ptrFromInt(?*LinkMap, got_table[1]);
+ break :init @as(?*LinkMap, @ptrFromInt(got_table[1]));
}
},
else => {},
@@ -132,7 +132,7 @@ pub const ElfDynLib = struct {
);
defer os.munmap(file_bytes);
- const eh = @ptrCast(*elf.Ehdr, file_bytes.ptr);
+ const eh = @as(*elf.Ehdr, @ptrCast(file_bytes.ptr));
if (!mem.eql(u8, eh.e_ident[0..4], elf.MAGIC)) return error.NotElfFile;
if (eh.e_type != elf.ET.DYN) return error.NotDynamicLibrary;
@@ -149,10 +149,10 @@ pub const ElfDynLib = struct {
i += 1;
ph_addr += eh.e_phentsize;
}) {
- const ph = @ptrFromInt(*elf.Phdr, ph_addr);
+ const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr));
switch (ph.p_type) {
elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz),
- elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, elf_addr + ph.p_offset),
+ elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(elf_addr + ph.p_offset)),
else => {},
}
}
@@ -180,7 +180,7 @@ pub const ElfDynLib = struct {
i += 1;
ph_addr += eh.e_phentsize;
}) {
- const ph = @ptrFromInt(*elf.Phdr, ph_addr);
+ const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr));
switch (ph.p_type) {
elf.PT_LOAD => {
// The VirtAddr may not be page-aligned; in such case there will be
@@ -188,7 +188,7 @@ pub const ElfDynLib = struct {
const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1);
const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size);
- const ptr = @ptrFromInt([*]align(mem.page_size) u8, aligned_addr);
+ const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr));
const prot = elfToMmapProt(ph.p_flags);
if ((ph.p_flags & elf.PF_W) == 0) {
// If it does not need write access, it can be mapped from the fd.
@@ -228,11 +228,11 @@ pub const ElfDynLib = struct {
while (dynv[i] != 0) : (i += 2) {
const p = base + dynv[i + 1];
switch (dynv[i]) {
- elf.DT_STRTAB => maybe_strings = @ptrFromInt([*:0]u8, p),
- elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p),
- elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]os.Elf_Symndx, p),
- elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p),
- elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p),
+ elf.DT_STRTAB => maybe_strings = @as([*:0]u8, @ptrFromInt(p)),
+ elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)),
+ elf.DT_HASH => maybe_hashtab = @as([*]os.Elf_Symndx, @ptrFromInt(p)),
+ elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)),
+ elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)),
else => {},
}
}
@@ -261,7 +261,7 @@ pub const ElfDynLib = struct {
pub fn lookup(self: *ElfDynLib, comptime T: type, name: [:0]const u8) ?T {
if (self.lookupAddress("", name)) |symbol| {
- return @ptrFromInt(T, symbol);
+ return @as(T, @ptrFromInt(symbol));
} else {
return null;
}
@@ -276,8 +276,8 @@ pub const ElfDynLib = struct {
var i: usize = 0;
while (i < self.hashtab[1]) : (i += 1) {
- if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info & 0xf) & OK_TYPES)) continue;
- if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info >> 4) & OK_BINDS)) continue;
+ if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info & 0xf)) & OK_TYPES)) continue;
+ if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info >> 4)) & OK_BINDS)) continue;
if (0 == self.syms[i].st_shndx) continue;
if (!mem.eql(u8, name, mem.sliceTo(self.strings + self.syms[i].st_name, 0))) continue;
if (maybe_versym) |versym| {
@@ -301,15 +301,15 @@ pub const ElfDynLib = struct {
fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*:0]u8) bool {
var def = def_arg;
- const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+ const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff;
while (true) {
if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
break;
if (def.vd_next == 0)
return false;
- def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next);
+ def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next));
}
- const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux);
+ const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux));
return mem.eql(u8, vername, mem.sliceTo(strings + aux.vda_name, 0));
}
@@ -347,7 +347,7 @@ pub const WindowsDynLib = struct {
pub fn lookup(self: *WindowsDynLib, comptime T: type, name: [:0]const u8) ?T {
if (windows.kernel32.GetProcAddress(self.dll, name.ptr)) |addr| {
- return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), addr));
+ return @as(T, @ptrCast(@alignCast(addr)));
} else {
return null;
}
@@ -381,7 +381,7 @@ pub const DlDynlib = struct {
// dlsym (and other dl-functions) secretly take shadow parameter - return address on stack
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66826
if (@call(.never_tail, system.dlsym, .{ self.handle, name.ptr })) |symbol| {
- return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), symbol));
+ return @as(T, @ptrCast(@alignCast(symbol)));
} else {
return null;
}
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index 9a71f73e05..d464d7d12b 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -434,8 +434,8 @@ pub const Header = struct {
}
pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header {
- const hdr32 = @ptrCast(*const Elf32_Ehdr, hdr_buf);
- const hdr64 = @ptrCast(*const Elf64_Ehdr, hdr_buf);
+ const hdr32 = @as(*const Elf32_Ehdr, @ptrCast(hdr_buf));
+ const hdr64 = @as(*const Elf64_Ehdr, @ptrCast(hdr_buf));
if (!mem.eql(u8, hdr32.e_ident[0..4], MAGIC)) return error.InvalidElfMagic;
if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
@@ -454,7 +454,7 @@ pub const Header = struct {
const machine = if (need_bswap) blk: {
const value = @intFromEnum(hdr32.e_machine);
- break :blk @enumFromInt(EM, @byteSwap(value));
+ break :blk @as(EM, @enumFromInt(@byteSwap(value)));
} else hdr32.e_machine;
return @as(Header, .{
@@ -725,10 +725,10 @@ pub const Elf32_Sym = extern struct {
st_shndx: Elf32_Section,
pub inline fn st_type(self: @This()) u4 {
- return @truncate(u4, self.st_info);
+ return @as(u4, @truncate(self.st_info));
}
pub inline fn st_bind(self: @This()) u4 {
- return @truncate(u4, self.st_info >> 4);
+ return @as(u4, @truncate(self.st_info >> 4));
}
};
pub const Elf64_Sym = extern struct {
@@ -740,10 +740,10 @@ pub const Elf64_Sym = extern struct {
st_size: Elf64_Xword,
pub inline fn st_type(self: @This()) u4 {
- return @truncate(u4, self.st_info);
+ return @as(u4, @truncate(self.st_info));
}
pub inline fn st_bind(self: @This()) u4 {
- return @truncate(u4, self.st_info >> 4);
+ return @as(u4, @truncate(self.st_info >> 4));
}
};
pub const Elf32_Syminfo = extern struct {
@@ -759,10 +759,10 @@ pub const Elf32_Rel = extern struct {
r_info: Elf32_Word,
pub inline fn r_sym(self: @This()) u24 {
- return @truncate(u24, self.r_info >> 8);
+ return @as(u24, @truncate(self.r_info >> 8));
}
pub inline fn r_type(self: @This()) u8 {
- return @truncate(u8, self.r_info);
+ return @as(u8, @truncate(self.r_info));
}
};
pub const Elf64_Rel = extern struct {
@@ -770,10 +770,10 @@ pub const Elf64_Rel = extern struct {
r_info: Elf64_Xword,
pub inline fn r_sym(self: @This()) u32 {
- return @truncate(u32, self.r_info >> 32);
+ return @as(u32, @truncate(self.r_info >> 32));
}
pub inline fn r_type(self: @This()) u32 {
- return @truncate(u32, self.r_info);
+ return @as(u32, @truncate(self.r_info));
}
};
pub const Elf32_Rela = extern struct {
@@ -782,10 +782,10 @@ pub const Elf32_Rela = extern struct {
r_addend: Elf32_Sword,
pub inline fn r_sym(self: @This()) u24 {
- return @truncate(u24, self.r_info >> 8);
+ return @as(u24, @truncate(self.r_info >> 8));
}
pub inline fn r_type(self: @This()) u8 {
- return @truncate(u8, self.r_info);
+ return @as(u8, @truncate(self.r_info));
}
};
pub const Elf64_Rela = extern struct {
@@ -794,10 +794,10 @@ pub const Elf64_Rela = extern struct {
r_addend: Elf64_Sxword,
pub inline fn r_sym(self: @This()) u32 {
- return @truncate(u32, self.r_info >> 32);
+ return @as(u32, @truncate(self.r_info >> 32));
}
pub inline fn r_type(self: @This()) u32 {
- return @truncate(u32, self.r_info);
+ return @as(u32, @truncate(self.r_info));
}
};
pub const Elf32_Dyn = extern struct {
diff --git a/lib/std/enums.zig b/lib/std/enums.zig
index a5ceebc9b1..9931b1d7c1 100644
--- a/lib/std/enums.zig
+++ b/lib/std/enums.zig
@@ -16,7 +16,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
fields = fields ++ &[_]StructField{.{
.name = field.name,
.type = Data,
- .default_value = if (field_default) |d| @ptrCast(?*const anyopaque, &d) else null,
+ .default_value = if (field_default) |d| @as(?*const anyopaque, @ptrCast(&d)) else null,
.is_comptime = false,
.alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0,
}};
@@ -61,7 +61,7 @@ test tagName {
const E = enum(u8) { a, b, _ };
try testing.expect(tagName(E, .a) != null);
try testing.expectEqualStrings("a", tagName(E, .a).?);
- try testing.expect(tagName(E, @enumFromInt(E, 42)) == null);
+ try testing.expect(tagName(E, @as(E, @enumFromInt(42))) == null);
}
/// Determines the length of a direct-mapped enum array, indexed by
@@ -156,7 +156,7 @@ pub fn directEnumArrayDefault(
var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined;
inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| {
const enum_value = @field(E, f.name);
- const index = @intCast(usize, @intFromEnum(enum_value));
+ const index = @as(usize, @intCast(@intFromEnum(enum_value)));
result[index] = @field(init_values, f.name);
}
return result;
@@ -341,7 +341,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
var self = initWithCount(0);
inline for (@typeInfo(E).Enum.fields) |field| {
const c = @field(init_counts, field.name);
- const key = @enumFromInt(E, field.value);
+ const key = @as(E, @enumFromInt(field.value));
self.counts.set(key, c);
}
return self;
@@ -412,7 +412,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
/// asserts operation will not overflow any key.
pub fn addSetAssertSafe(self: *Self, other: Self) void {
inline for (@typeInfo(E).Enum.fields) |field| {
- const key = @enumFromInt(E, field.value);
+ const key = @as(E, @enumFromInt(field.value));
self.addAssertSafe(key, other.getCount(key));
}
}
@@ -420,7 +420,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
/// Increases the all key counts by given multiset.
pub fn addSet(self: *Self, other: Self) error{Overflow}!void {
inline for (@typeInfo(E).Enum.fields) |field| {
- const key = @enumFromInt(E, field.value);
+ const key = @as(E, @enumFromInt(field.value));
try self.add(key, other.getCount(key));
}
}
@@ -430,7 +430,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
/// then that key will have a key count of zero.
pub fn removeSet(self: *Self, other: Self) void {
inline for (@typeInfo(E).Enum.fields) |field| {
- const key = @enumFromInt(E, field.value);
+ const key = @as(E, @enumFromInt(field.value));
self.remove(key, other.getCount(key));
}
}
@@ -439,7 +439,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
/// given multiset.
pub fn eql(self: Self, other: Self) bool {
inline for (@typeInfo(E).Enum.fields) |field| {
- const key = @enumFromInt(E, field.value);
+ const key = @as(E, @enumFromInt(field.value));
if (self.getCount(key) != other.getCount(key)) {
return false;
}
@@ -451,7 +451,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
/// equal to the given multiset.
pub fn subsetOf(self: Self, other: Self) bool {
inline for (@typeInfo(E).Enum.fields) |field| {
- const key = @enumFromInt(E, field.value);
+ const key = @as(E, @enumFromInt(field.value));
if (self.getCount(key) > other.getCount(key)) {
return false;
}
@@ -463,7 +463,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
/// equal to the given multiset.
pub fn supersetOf(self: Self, other: Self) bool {
inline for (@typeInfo(E).Enum.fields) |field| {
- const key = @enumFromInt(E, field.value);
+ const key = @as(E, @enumFromInt(field.value));
if (self.getCount(key) < other.getCount(key)) {
return false;
}
@@ -1281,10 +1281,10 @@ test "std.enums.ensureIndexer" {
pub const Key = u32;
pub const count: usize = 8;
pub fn indexOf(k: Key) usize {
- return @intCast(usize, k);
+ return @as(usize, @intCast(k));
}
pub fn keyForIndex(index: usize) Key {
- return @intCast(Key, index);
+ return @as(Key, @intCast(index));
}
});
}
@@ -1323,14 +1323,14 @@ pub fn EnumIndexer(comptime E: type) type {
pub const Key = E;
pub const count = fields_len;
pub fn indexOf(e: E) usize {
- return @intCast(usize, @intFromEnum(e) - min);
+ return @as(usize, @intCast(@intFromEnum(e) - min));
}
pub fn keyForIndex(i: usize) E {
// TODO fix addition semantics. This calculation
// gives up some safety to avoid artificially limiting
// the range of signed enum values to max_isize.
- const enum_value = if (min < 0) @bitCast(isize, i) +% min else i + min;
- return @enumFromInt(E, @intCast(std.meta.Tag(E), enum_value));
+ const enum_value = if (min < 0) @as(isize, @bitCast(i)) +% min else i + min;
+ return @as(E, @enumFromInt(@as(std.meta.Tag(E), @intCast(enum_value))));
}
};
}
diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig
index 9da3943d5d..8608298c29 100644
--- a/lib/std/event/lock.zig
+++ b/lib/std/event/lock.zig
@@ -55,7 +55,7 @@ pub const Lock = struct {
const head = switch (self.head) {
UNLOCKED => unreachable,
LOCKED => null,
- else => @ptrFromInt(*Waiter, self.head),
+ else => @as(*Waiter, @ptrFromInt(self.head)),
};
if (head) |h| {
@@ -102,7 +102,7 @@ pub const Lock = struct {
break :blk null;
},
else => {
- const waiter = @ptrFromInt(*Waiter, self.lock.head);
+ const waiter = @as(*Waiter, @ptrFromInt(self.lock.head));
self.lock.head = if (waiter.next == null) LOCKED else @intFromPtr(waiter.next);
if (waiter.next) |next|
next.tail = waiter.tail;
@@ -130,7 +130,7 @@ test "std.event.Lock" {
var lock = Lock{};
testLock(&lock);
- const expected_result = [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
+ const expected_result = [1]i32{3 * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len;
try testing.expectEqualSlices(i32, &expected_result, &shared_test_data);
}
fn testLock(lock: *Lock) void {
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 7eec26a2b1..b5021a5378 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -556,7 +556,7 @@ pub const Loop = struct {
self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.IN);
},
.macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT);
+ self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT);
},
else => @compileError("Unsupported OS"),
}
@@ -568,7 +568,7 @@ pub const Loop = struct {
self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT);
},
.macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
+ self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
},
else => @compileError("Unsupported OS"),
}
@@ -580,8 +580,8 @@ pub const Loop = struct {
self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT | os.linux.EPOLL.IN);
},
.macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
- self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT);
- self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
+ self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT);
+ self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
},
else => @compileError("Unsupported OS"),
}
@@ -1415,7 +1415,7 @@ pub const Loop = struct {
var events: [1]os.linux.epoll_event = undefined;
const count = os.epoll_wait(self.os_data.epollfd, events[0..], -1);
for (events[0..count]) |ev| {
- const resume_node = @ptrFromInt(*ResumeNode, ev.data.ptr);
+ const resume_node = @as(*ResumeNode, @ptrFromInt(ev.data.ptr));
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
@@ -1439,7 +1439,7 @@ pub const Loop = struct {
const empty_kevs = &[0]os.Kevent{};
const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
for (eventlist[0..count]) |ev| {
- const resume_node = @ptrFromInt(*ResumeNode, ev.udata);
+ const resume_node = @as(*ResumeNode, @ptrFromInt(ev.udata));
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig
index c19330d5a9..47ddf74fd5 100644
--- a/lib/std/event/rwlock.zig
+++ b/lib/std/event/rwlock.zig
@@ -223,7 +223,7 @@ test "std.event.RwLock" {
_ = testLock(std.heap.page_allocator, &lock);
- const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
+ const expected_result = [1]i32{shared_it_count * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len;
try testing.expectEqualSlices(i32, expected_result, shared_test_data);
}
fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
@@ -244,12 +244,12 @@ fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
}
for (write_nodes) |*write_node| {
- const casted = @ptrCast(*const @Frame(writeRunner), write_node.data);
+ const casted = @as(*const @Frame(writeRunner), @ptrCast(write_node.data));
await casted;
allocator.destroy(casted);
}
for (read_nodes) |*read_node| {
- const casted = @ptrCast(*const @Frame(readRunner), read_node.data);
+ const casted = @as(*const @Frame(readRunner), @ptrCast(read_node.data));
await casted;
allocator.destroy(casted);
}
@@ -287,6 +287,6 @@ fn readRunner(lock: *RwLock) callconv(.Async) void {
defer handle.release();
try testing.expect(shared_test_index == 0);
- try testing.expect(shared_test_data[i] == @intCast(i32, shared_count));
+ try testing.expect(shared_test_data[i] == @as(i32, @intCast(shared_count)));
}
}
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index d983aba369..7af21c86df 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -396,7 +396,7 @@ pub const ArgState = struct {
}
// Mark this argument as used
- self.used_args |= @as(ArgSetType, 1) << @intCast(u5, next_index);
+ self.used_args |= @as(ArgSetType, 1) << @as(u5, @intCast(next_index));
return next_index;
}
};
@@ -1056,7 +1056,7 @@ pub fn formatFloatScientific(
options: FormatOptions,
writer: anytype,
) !void {
- var x = @floatCast(f64, value);
+ var x = @as(f64, @floatCast(value));
// Errol doesn't handle these special cases.
if (math.signbit(x)) {
@@ -1167,9 +1167,9 @@ pub fn formatFloatHexadecimal(
const exponent_mask = (1 << exponent_bits) - 1;
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
- const as_bits = @bitCast(TU, value);
+ const as_bits = @as(TU, @bitCast(value));
var mantissa = as_bits & mantissa_mask;
- var exponent: i32 = @truncate(u16, (as_bits >> mantissa_bits) & exponent_mask);
+ var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask));
const is_denormal = exponent == 0 and mantissa != 0;
const is_zero = exponent == 0 and mantissa == 0;
@@ -1218,7 +1218,7 @@ pub fn formatFloatHexadecimal(
// Drop the excess bits.
mantissa >>= 2;
// Restore the alignment.
- mantissa <<= @intCast(math.Log2Int(TU), (mantissa_digits - precision) * 4);
+ mantissa <<= @as(math.Log2Int(TU), @intCast((mantissa_digits - precision) * 4));
const overflow = mantissa & (1 << 1 + mantissa_digits * 4) != 0;
// Prefer a normalized result in case of overflow.
@@ -1296,7 +1296,7 @@ pub fn formatFloatDecimal(
errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Decimal);
// exp < 0 means the leading is always 0 as errol result is normalized.
- var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
+ var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0;
// the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
@@ -1325,7 +1325,7 @@ pub fn formatFloatDecimal(
// Zero-fill until we reach significant digits or run out of precision.
if (float_decimal.exp <= 0) {
- const zero_digit_count = @intCast(usize, -float_decimal.exp);
+ const zero_digit_count = @as(usize, @intCast(-float_decimal.exp));
const zeros_to_print = @min(zero_digit_count, precision);
var i: usize = 0;
@@ -1354,7 +1354,7 @@ pub fn formatFloatDecimal(
}
} else {
// exp < 0 means the leading is always 0 as errol result is normalized.
- var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
+ var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0;
// the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
@@ -1380,7 +1380,7 @@ pub fn formatFloatDecimal(
// Zero-fill until we reach significant digits or run out of precision.
if (float_decimal.exp < 0) {
- const zero_digit_count = @intCast(usize, -float_decimal.exp);
+ const zero_digit_count = @as(usize, @intCast(-float_decimal.exp));
var i: usize = 0;
while (i < zero_digit_count) : (i += 1) {
@@ -1423,21 +1423,21 @@ pub fn formatInt(
if (base == 10) {
while (a >= 100) : (a = @divTrunc(a, 100)) {
index -= 2;
- buf[index..][0..2].* = digits2(@intCast(usize, a % 100));
+ buf[index..][0..2].* = digits2(@as(usize, @intCast(a % 100)));
}
if (a < 10) {
index -= 1;
- buf[index] = '0' + @intCast(u8, a);
+ buf[index] = '0' + @as(u8, @intCast(a));
} else {
index -= 2;
- buf[index..][0..2].* = digits2(@intCast(usize, a));
+ buf[index..][0..2].* = digits2(@as(usize, @intCast(a)));
}
} else {
while (true) {
const digit = a % base;
index -= 1;
- buf[index] = digitToChar(@intCast(u8, digit), case);
+ buf[index] = digitToChar(@as(u8, @intCast(digit)), case);
a /= base;
if (a == 0) break;
}
@@ -1595,10 +1595,10 @@ test "fmtDuration" {
fn formatDurationSigned(ns: i64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
if (ns < 0) {
- const data = FormatDurationData{ .ns = @intCast(u64, -ns), .negative = true };
+ const data = FormatDurationData{ .ns = @as(u64, @intCast(-ns)), .negative = true };
try formatDuration(data, fmt, options, writer);
} else {
- const data = FormatDurationData{ .ns = @intCast(u64, ns) };
+ const data = FormatDurationData{ .ns = @as(u64, @intCast(ns)) };
try formatDuration(data, fmt, options, writer);
}
}
@@ -1846,7 +1846,7 @@ fn parseWithSign(
// The first digit of a negative number.
// Consider parsing "-4" as an i3.
// This should work, but positive 4 overflows i3, so we can't cast the digit to T and subtract.
- x = math.cast(T, -@intCast(i8, digit)) orelse return error.Overflow;
+ x = math.cast(T, -@as(i8, @intCast(digit))) orelse return error.Overflow;
continue;
}
x = try add(T, x, math.cast(T, digit) orelse return error.Overflow);
@@ -2099,7 +2099,7 @@ test "optional" {
try expectFmt("optional: null\n", "optional: {?}\n", .{value});
}
{
- const value = @ptrFromInt(?*i32, 0xf000d000);
+ const value = @as(?*i32, @ptrFromInt(0xf000d000));
try expectFmt("optional: *i32@f000d000\n", "optional: {*}\n", .{value});
}
}
@@ -2218,7 +2218,7 @@ test "slice" {
}
{
var runtime_zero: usize = 0;
- const value = @ptrFromInt([*]align(1) const []const u8, 0xdeadbeef)[runtime_zero..runtime_zero];
+ const value = @as([*]align(1) const []const u8, @ptrFromInt(0xdeadbeef))[runtime_zero..runtime_zero];
try expectFmt("slice: []const u8@deadbeef\n", "slice: {*}\n", .{value});
}
{
@@ -2248,17 +2248,17 @@ test "escape non-printable" {
test "pointer" {
{
- const value = @ptrFromInt(*align(1) i32, 0xdeadbeef);
+ const value = @as(*align(1) i32, @ptrFromInt(0xdeadbeef));
try expectFmt("pointer: i32@deadbeef\n", "pointer: {}\n", .{value});
try expectFmt("pointer: i32@deadbeef\n", "pointer: {*}\n", .{value});
}
const FnPtr = *align(1) const fn () void;
{
- const value = @ptrFromInt(FnPtr, 0xdeadbeef);
+ const value = @as(FnPtr, @ptrFromInt(0xdeadbeef));
try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value});
}
{
- const value = @ptrFromInt(FnPtr, 0xdeadbeef);
+ const value = @as(FnPtr, @ptrFromInt(0xdeadbeef));
try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value});
}
}
@@ -2267,12 +2267,12 @@ test "cstr" {
try expectFmt(
"cstr: Test C\n",
"cstr: {s}\n",
- .{@ptrCast([*c]const u8, "Test C")},
+ .{@as([*c]const u8, @ptrCast("Test C"))},
);
try expectFmt(
"cstr: Test C\n",
"cstr: {s:10}\n",
- .{@ptrCast([*c]const u8, "Test C")},
+ .{@as([*c]const u8, @ptrCast("Test C"))},
);
}
@@ -2360,11 +2360,11 @@ test "non-exhaustive enum" {
};
try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {}\n", .{Enum.One});
try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {}\n", .{Enum.Two});
- try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@enumFromInt(Enum, 0x1234)});
+ try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@as(Enum, @enumFromInt(0x1234))});
try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {x}\n", .{Enum.One});
try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {x}\n", .{Enum.Two});
try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {X}\n", .{Enum.Two});
- try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@enumFromInt(Enum, 0x1234)});
+ try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@as(Enum, @enumFromInt(0x1234))});
}
test "float.scientific" {
@@ -2376,11 +2376,11 @@ test "float.scientific" {
test "float.scientific.precision" {
try expectFmt("f64: 1.40971e-42", "f64: {e:.5}", .{@as(f64, 1.409706e-42)});
- try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 814313563)))});
- try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1006632960)))});
+ try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 814313563))))});
+ try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1006632960))))});
// libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05.
// In fact, libc doesn't round a lot of 5 cases up when one past the precision point.
- try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1203982400)))});
+ try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1203982400))))});
}
test "float.special" {
@@ -2472,22 +2472,22 @@ test "float.decimal" {
}
test "float.libc.sanity" {
- try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 916964781)))});
- try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 925353389)))});
- try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1036831278)))});
- try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1065353133)))});
- try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1092616192)))});
+ try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 916964781))))});
+ try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 925353389))))});
+ try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1036831278))))});
+ try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1065353133))))});
+ try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1092616192))))});
// libc differences
//
// This is 0.015625 exactly according to gdb. We thus round down,
// however glibc rounds up for some reason. This occurs for all
// floats of the form x.yyyy25 on a precision point.
- try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1015021568)))});
+ try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1015021568))))});
// errol3 rounds to ... 630 but libc rounds to ...632. Grisu3
// also rounds to 630 so I'm inclined to believe libc is not
// optimal here.
- try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1518338049)))});
+ try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1518338049))))});
}
test "custom" {
diff --git a/lib/std/fmt/errol.zig b/lib/std/fmt/errol.zig
index b438733589..af686d6448 100644
--- a/lib/std/fmt/errol.zig
+++ b/lib/std/fmt/errol.zig
@@ -29,11 +29,11 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro
switch (mode) {
RoundMode.Decimal => {
if (float_decimal.exp >= 0) {
- round_digit = precision + @intCast(usize, float_decimal.exp);
+ round_digit = precision + @as(usize, @intCast(float_decimal.exp));
} else {
// if a small negative exp, then adjust we need to offset by the number
// of leading zeros that will occur.
- const min_exp_required = @intCast(usize, -float_decimal.exp);
+ const min_exp_required = @as(usize, @intCast(-float_decimal.exp));
if (precision > min_exp_required) {
round_digit = precision - min_exp_required;
}
@@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro
float_decimal.exp += 1;
// Re-size the buffer to use the reserved leading byte.
- const one_before = @ptrFromInt([*]u8, @intFromPtr(&float_decimal.digits[0]) - 1);
+ const one_before = @as([*]u8, @ptrFromInt(@intFromPtr(&float_decimal.digits[0]) - 1));
float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
float_decimal.digits[0] = '1';
return;
@@ -80,7 +80,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro
/// Corrected Errol3 double to ASCII conversion.
pub fn errol3(value: f64, buffer: []u8) FloatDecimal {
- const bits = @bitCast(u64, value);
+ const bits = @as(u64, @bitCast(value));
const i = tableLowerBound(bits);
if (i < enum3.len and enum3[i] == bits) {
const data = enum3_data[i];
@@ -113,16 +113,16 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal {
// normalize the midpoint
const e = math.frexp(val).exponent;
- var exp = @intFromFloat(i16, @floor(307 + @floatFromInt(f64, e) * 0.30103));
+ var exp = @as(i16, @intFromFloat(@floor(307 + @as(f64, @floatFromInt(e)) * 0.30103)));
if (exp < 20) {
exp = 20;
- } else if (@intCast(usize, exp) >= lookup_table.len) {
- exp = @intCast(i16, lookup_table.len - 1);
+ } else if (@as(usize, @intCast(exp)) >= lookup_table.len) {
+ exp = @as(i16, @intCast(lookup_table.len - 1));
}
- var mid = lookup_table[@intCast(usize, exp)];
+ var mid = lookup_table[@as(usize, @intCast(exp))];
mid = hpProd(mid, val);
- const lten = lookup_table[@intCast(usize, exp)].val;
+ const lten = lookup_table[@as(usize, @intCast(exp))].val;
exp -= 307;
@@ -171,25 +171,25 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal {
var buf_index: usize = 0;
const bound = buffer.len - 1;
while (buf_index < bound) {
- var hdig = @intFromFloat(u8, @floor(high.val));
- if ((high.val == @floatFromInt(f64, hdig)) and (high.off < 0)) hdig -= 1;
+ var hdig = @as(u8, @intFromFloat(@floor(high.val)));
+ if ((high.val == @as(f64, @floatFromInt(hdig))) and (high.off < 0)) hdig -= 1;
- var ldig = @intFromFloat(u8, @floor(low.val));
- if ((low.val == @floatFromInt(f64, ldig)) and (low.off < 0)) ldig -= 1;
+ var ldig = @as(u8, @intFromFloat(@floor(low.val)));
+ if ((low.val == @as(f64, @floatFromInt(ldig))) and (low.off < 0)) ldig -= 1;
if (ldig != hdig) break;
buffer[buf_index] = hdig + '0';
buf_index += 1;
- high.val -= @floatFromInt(f64, hdig);
- low.val -= @floatFromInt(f64, ldig);
+ high.val -= @as(f64, @floatFromInt(hdig));
+ low.val -= @as(f64, @floatFromInt(ldig));
hpMul10(&high);
hpMul10(&low);
}
const tmp = (high.val + low.val) / 2.0;
- var mdig = @intFromFloat(u8, @floor(tmp + 0.5));
- if ((@floatFromInt(f64, mdig) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1;
+ var mdig = @as(u8, @intFromFloat(@floor(tmp + 0.5)));
+ if ((@as(f64, @floatFromInt(mdig)) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1;
buffer[buf_index] = mdig + '0';
buf_index += 1;
@@ -248,9 +248,9 @@ fn split(val: f64, hi: *f64, lo: *f64) void {
}
fn gethi(in: f64) f64 {
- const bits = @bitCast(u64, in);
+ const bits = @as(u64, @bitCast(in));
const new_bits = bits & 0xFFFFFFFFF8000000;
- return @bitCast(f64, new_bits);
+ return @as(f64, @bitCast(new_bits));
}
/// Normalize the number by factoring in the error.
@@ -303,21 +303,21 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
assert((val > 9.007199254740992e15) and val < (3.40282366920938e38));
- var mid = @intFromFloat(u128, val);
+ var mid = @as(u128, @intFromFloat(val));
var low: u128 = mid - fpeint((fpnext(val) - val) / 2.0);
var high: u128 = mid + fpeint((val - fpprev(val)) / 2.0);
- if (@bitCast(u64, val) & 0x1 != 0) {
+ if (@as(u64, @bitCast(val)) & 0x1 != 0) {
high -= 1;
} else {
low -= 1;
}
- var l64 = @intCast(u64, low % pow19);
- const lf = @intCast(u64, (low / pow19) % pow19);
+ var l64 = @as(u64, @intCast(low % pow19));
+ const lf = @as(u64, @intCast((low / pow19) % pow19));
- var h64 = @intCast(u64, high % pow19);
- const hf = @intCast(u64, (high / pow19) % pow19);
+ var h64 = @as(u64, @intCast(high % pow19));
+ const hf = @as(u64, @intCast((high / pow19) % pow19));
if (lf != hf) {
l64 = lf;
@@ -333,7 +333,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
x *= 10;
}
}
- const m64 = @truncate(u64, @divTrunc(mid, x));
+ const m64 = @as(u64, @truncate(@divTrunc(mid, x)));
if (lf != hf) mi += 19;
@@ -349,7 +349,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
return FloatDecimal{
.digits = buffer[0..buf_index],
- .exp = @intCast(i32, buf_index) + mi,
+ .exp = @as(i32, @intCast(buf_index)) + mi,
};
}
@@ -360,33 +360,33 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
fn errolFixed(val: f64, buffer: []u8) FloatDecimal {
assert((val >= 16.0) and (val < 9.007199254740992e15));
- const u = @intFromFloat(u64, val);
- const n = @floatFromInt(f64, u);
+ const u = @as(u64, @intFromFloat(val));
+ const n = @as(f64, @floatFromInt(u));
var mid = val - n;
var lo = ((fpprev(val) - n) + mid) / 2.0;
var hi = ((fpnext(val) - n) + mid) / 2.0;
var buf_index = u64toa(u, buffer);
- var exp = @intCast(i32, buf_index);
+ var exp = @as(i32, @intCast(buf_index));
var j = buf_index;
buffer[j] = 0;
if (mid != 0.0) {
while (mid != 0.0) {
lo *= 10.0;
- const ldig = @intFromFloat(i32, lo);
- lo -= @floatFromInt(f64, ldig);
+ const ldig = @as(i32, @intFromFloat(lo));
+ lo -= @as(f64, @floatFromInt(ldig));
mid *= 10.0;
- const mdig = @intFromFloat(i32, mid);
- mid -= @floatFromInt(f64, mdig);
+ const mdig = @as(i32, @intFromFloat(mid));
+ mid -= @as(f64, @floatFromInt(mdig));
hi *= 10.0;
- const hdig = @intFromFloat(i32, hi);
- hi -= @floatFromInt(f64, hdig);
+ const hdig = @as(i32, @intFromFloat(hi));
+ hi -= @as(f64, @floatFromInt(hdig));
- buffer[j] = @intCast(u8, mdig + '0');
+ buffer[j] = @as(u8, @intCast(mdig + '0'));
j += 1;
if (hdig != ldig or j > 50) break;
@@ -413,11 +413,11 @@ fn errolFixed(val: f64, buffer: []u8) FloatDecimal {
}
fn fpnext(val: f64) f64 {
- return @bitCast(f64, @bitCast(u64, val) +% 1);
+ return @as(f64, @bitCast(@as(u64, @bitCast(val)) +% 1));
}
fn fpprev(val: f64) f64 {
- return @bitCast(f64, @bitCast(u64, val) -% 1);
+ return @as(f64, @bitCast(@as(u64, @bitCast(val)) -% 1));
}
pub const c_digits_lut = [_]u8{
@@ -453,7 +453,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
var buf_index: usize = 0;
if (value < kTen8) {
- const v = @intCast(u32, value);
+ const v = @as(u32, @intCast(value));
if (v < 10000) {
const d1: u32 = (v / 100) << 1;
const d2: u32 = (v % 100) << 1;
@@ -508,8 +508,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buf_index += 1;
}
} else if (value < kTen16) {
- const v0: u32 = @intCast(u32, value / kTen8);
- const v1: u32 = @intCast(u32, value % kTen8);
+ const v0: u32 = @as(u32, @intCast(value / kTen8));
+ const v1: u32 = @as(u32, @intCast(value % kTen8));
const b0: u32 = v0 / 10000;
const c0: u32 = v0 % 10000;
@@ -579,11 +579,11 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buffer[buf_index] = c_digits_lut[d8 + 1];
buf_index += 1;
} else {
- const a = @intCast(u32, value / kTen16); // 1 to 1844
+ const a = @as(u32, @intCast(value / kTen16)); // 1 to 1844
value %= kTen16;
if (a < 10) {
- buffer[buf_index] = '0' + @intCast(u8, a);
+ buffer[buf_index] = '0' + @as(u8, @intCast(a));
buf_index += 1;
} else if (a < 100) {
const i: u32 = a << 1;
@@ -592,7 +592,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buffer[buf_index] = c_digits_lut[i + 1];
buf_index += 1;
} else if (a < 1000) {
- buffer[buf_index] = '0' + @intCast(u8, a / 100);
+ buffer[buf_index] = '0' + @as(u8, @intCast(a / 100));
buf_index += 1;
const i: u32 = (a % 100) << 1;
@@ -613,8 +613,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
buf_index += 1;
}
- const v0 = @intCast(u32, value / kTen8);
- const v1 = @intCast(u32, value % kTen8);
+ const v0 = @as(u32, @intCast(value / kTen8));
+ const v1 = @as(u32, @intCast(value % kTen8));
const b0: u32 = v0 / 10000;
const c0: u32 = v0 % 10000;
@@ -672,10 +672,10 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
}
fn fpeint(from: f64) u128 {
- const bits = @bitCast(u64, from);
+ const bits = @as(u64, @bitCast(from));
assert((bits & ((1 << 52) - 1)) == 0);
- return @as(u128, 1) << @truncate(u7, (bits >> 52) -% 1023);
+ return @as(u128, 1) << @as(u7, @truncate((bits >> 52) -% 1023));
}
/// Given two different integers with the same length in terms of the number
diff --git a/lib/std/fmt/parse_float.zig b/lib/std/fmt/parse_float.zig
index b14fe5ca3c..98fbe28032 100644
--- a/lib/std/fmt/parse_float.zig
+++ b/lib/std/fmt/parse_float.zig
@@ -78,7 +78,7 @@ test "fmt.parseFloat nan and inf" {
inline for ([_]type{ f16, f32, f64, f128 }) |T| {
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- try expectEqual(@bitCast(Z, try parseFloat(T, "nAn")), @bitCast(Z, std.math.nan(T)));
+ try expectEqual(@as(Z, @bitCast(try parseFloat(T, "nAn"))), @as(Z, @bitCast(std.math.nan(T))));
try expectEqual(try parseFloat(T, "inF"), std.math.inf(T));
try expectEqual(try parseFloat(T, "-INF"), -std.math.inf(T));
}
diff --git a/lib/std/fmt/parse_float/common.zig b/lib/std/fmt/parse_float/common.zig
index c1b34a081b..8dba3b4498 100644
--- a/lib/std/fmt/parse_float/common.zig
+++ b/lib/std/fmt/parse_float/common.zig
@@ -32,7 +32,7 @@ pub fn BiasedFp(comptime T: type) type {
pub fn toFloat(self: Self, comptime FloatT: type, negative: bool) FloatT {
var word = self.f;
- word |= @intCast(MantissaT, self.e) << std.math.floatMantissaBits(FloatT);
+ word |= @as(MantissaT, @intCast(self.e)) << std.math.floatMantissaBits(FloatT);
var f = floatFromUnsigned(FloatT, MantissaT, word);
if (negative) f = -f;
return f;
@@ -42,10 +42,10 @@ pub fn BiasedFp(comptime T: type) type {
pub fn floatFromUnsigned(comptime T: type, comptime MantissaT: type, v: MantissaT) T {
return switch (T) {
- f16 => @bitCast(f16, @truncate(u16, v)),
- f32 => @bitCast(f32, @truncate(u32, v)),
- f64 => @bitCast(f64, @truncate(u64, v)),
- f128 => @bitCast(f128, v),
+ f16 => @as(f16, @bitCast(@as(u16, @truncate(v)))),
+ f32 => @as(f32, @bitCast(@as(u32, @truncate(v)))),
+ f64 => @as(f64, @bitCast(@as(u64, @truncate(v)))),
+ f128 => @as(f128, @bitCast(v)),
else => unreachable,
};
}
diff --git a/lib/std/fmt/parse_float/convert_eisel_lemire.zig b/lib/std/fmt/parse_float/convert_eisel_lemire.zig
index 5c49553a14..6831a308ea 100644
--- a/lib/std/fmt/parse_float/convert_eisel_lemire.zig
+++ b/lib/std/fmt/parse_float/convert_eisel_lemire.zig
@@ -36,7 +36,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
}
// Normalize our significant digits, so the most-significant bit is set.
- const lz = @clz(@bitCast(u64, w));
+ const lz = @clz(@as(u64, @bitCast(w)));
w = math.shl(u64, w, lz);
const r = computeProductApprox(q, w, float_info.mantissa_explicit_bits + 3);
@@ -62,9 +62,9 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
}
}
- const upper_bit = @intCast(i32, r.hi >> 63);
- var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3);
- var power2 = power(@intCast(i32, q)) + upper_bit - @intCast(i32, lz) - float_info.minimum_exponent;
+ const upper_bit = @as(i32, @intCast(r.hi >> 63));
+ var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3);
+ var power2 = power(@as(i32, @intCast(q))) + upper_bit - @as(i32, @intCast(lz)) - float_info.minimum_exponent;
if (power2 <= 0) {
if (-power2 + 1 >= 64) {
// Have more than 64 bits below the minimum exponent, must be 0.
@@ -93,7 +93,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
q >= float_info.min_exponent_round_to_even and
q <= float_info.max_exponent_round_to_even and
mantissa & 3 == 1 and
- math.shl(u64, mantissa, (upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3)) == r.hi)
+ math.shl(u64, mantissa, (upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3)) == r.hi)
{
// Zero the lowest bit, so we don't round up.
mantissa &= ~@as(u64, 1);
@@ -139,8 +139,8 @@ const U128 = struct {
pub fn mul(a: u64, b: u64) U128 {
const x = @as(u128, a) * b;
return .{
- .hi = @truncate(u64, x >> 64),
- .lo = @truncate(u64, x),
+ .hi = @as(u64, @truncate(x >> 64)),
+ .lo = @as(u64, @truncate(x)),
};
}
};
@@ -161,7 +161,7 @@ fn computeProductApprox(q: i64, w: u64, comptime precision: usize) U128 {
// 5^q < 2^64, then the multiplication always provides an exact value.
// That means whenever we need to round ties to even, we always have
// an exact value.
- const index = @intCast(usize, q - @intCast(i64, eisel_lemire_smallest_power_of_five));
+ const index = @as(usize, @intCast(q - @as(i64, @intCast(eisel_lemire_smallest_power_of_five))));
const pow5 = eisel_lemire_table_powers_of_five_128[index];
// Only need one multiplication as long as there is 1 zero but
diff --git a/lib/std/fmt/parse_float/convert_fast.zig b/lib/std/fmt/parse_float/convert_fast.zig
index 2124e436ab..a148d3946f 100644
--- a/lib/std/fmt/parse_float/convert_fast.zig
+++ b/lib/std/fmt/parse_float/convert_fast.zig
@@ -108,19 +108,19 @@ pub fn convertFast(comptime T: type, n: Number(T)) ?T {
var value: T = 0;
if (n.exponent <= info.max_exponent_fast_path) {
// normal fast path
- value = @floatFromInt(T, n.mantissa);
+ value = @as(T, @floatFromInt(n.mantissa));
value = if (n.exponent < 0)
- value / fastPow10(T, @intCast(usize, -n.exponent))
+ value / fastPow10(T, @as(usize, @intCast(-n.exponent)))
else
- value * fastPow10(T, @intCast(usize, n.exponent));
+ value * fastPow10(T, @as(usize, @intCast(n.exponent)));
} else {
// disguised fast path
const shift = n.exponent - info.max_exponent_fast_path;
- const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @intCast(usize, shift))) catch return null;
+ const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @as(usize, @intCast(shift)))) catch return null;
if (mantissa > info.max_mantissa_fast_path) {
return null;
}
- value = @floatFromInt(T, mantissa) * fastPow10(T, info.max_exponent_fast_path);
+ value = @as(T, @floatFromInt(mantissa)) * fastPow10(T, info.max_exponent_fast_path);
}
if (n.negative) {
diff --git a/lib/std/fmt/parse_float/convert_hex.zig b/lib/std/fmt/parse_float/convert_hex.zig
index 3b3f797216..815331347c 100644
--- a/lib/std/fmt/parse_float/convert_hex.zig
+++ b/lib/std/fmt/parse_float/convert_hex.zig
@@ -81,7 +81,7 @@ pub fn convertHex(comptime T: type, n_: Number(T)) T {
}
var bits = n.mantissa & ((1 << mantissa_bits) - 1);
- bits |= @intCast(MantissaT, (n.exponent - exp_bias) & ((1 << exp_bits) - 1)) << mantissa_bits;
+ bits |= @as(MantissaT, @intCast((n.exponent - exp_bias) & ((1 << exp_bits) - 1))) << mantissa_bits;
if (n.negative) {
bits |= 1 << (mantissa_bits + exp_bits);
}
diff --git a/lib/std/fmt/parse_float/convert_slow.zig b/lib/std/fmt/parse_float/convert_slow.zig
index 225a1e208c..53cb12ef13 100644
--- a/lib/std/fmt/parse_float/convert_slow.zig
+++ b/lib/std/fmt/parse_float/convert_slow.zig
@@ -48,13 +48,13 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) {
var exp2: i32 = 0;
// Shift right toward (1/2 .. 1]
while (d.decimal_point > 0) {
- const n = @intCast(usize, d.decimal_point);
+ const n = @as(usize, @intCast(d.decimal_point));
const shift = getShift(n);
d.rightShift(shift);
if (d.decimal_point < -Decimal(T).decimal_point_range) {
return BiasedFp(T).zero();
}
- exp2 += @intCast(i32, shift);
+ exp2 += @as(i32, @intCast(shift));
}
// Shift left toward (1/2 .. 1]
while (d.decimal_point <= 0) {
@@ -66,7 +66,7 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) {
else => 1,
};
} else {
- const n = @intCast(usize, -d.decimal_point);
+ const n = @as(usize, @intCast(-d.decimal_point));
break :blk getShift(n);
}
};
@@ -74,17 +74,17 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) {
if (d.decimal_point > Decimal(T).decimal_point_range) {
return BiasedFp(T).inf(T);
}
- exp2 -= @intCast(i32, shift);
+ exp2 -= @as(i32, @intCast(shift));
}
// We are now in the range [1/2 .. 1] but the binary format uses [1 .. 2]
exp2 -= 1;
while (min_exponent + 1 > exp2) {
- var n = @intCast(usize, (min_exponent + 1) - exp2);
+ var n = @as(usize, @intCast((min_exponent + 1) - exp2));
if (n > max_shift) {
n = max_shift;
}
d.rightShift(n);
- exp2 += @intCast(i32, n);
+ exp2 += @as(i32, @intCast(n));
}
if (exp2 - min_exponent >= infinite_power) {
return BiasedFp(T).inf(T);
diff --git a/lib/std/fmt/parse_float/decimal.zig b/lib/std/fmt/parse_float/decimal.zig
index 5bb5fa8d5e..f8d736a065 100644
--- a/lib/std/fmt/parse_float/decimal.zig
+++ b/lib/std/fmt/parse_float/decimal.zig
@@ -114,7 +114,7 @@ pub fn Decimal(comptime T: type) type {
return math.maxInt(MantissaT);
}
- const dp = @intCast(usize, self.decimal_point);
+ const dp = @as(usize, @intCast(self.decimal_point));
var n: MantissaT = 0;
var i: usize = 0;
@@ -155,7 +155,7 @@ pub fn Decimal(comptime T: type) type {
const quotient = n / 10;
const remainder = n - (10 * quotient);
if (write_index < max_digits) {
- self.digits[write_index] = @intCast(u8, remainder);
+ self.digits[write_index] = @as(u8, @intCast(remainder));
} else if (remainder > 0) {
self.truncated = true;
}
@@ -167,7 +167,7 @@ pub fn Decimal(comptime T: type) type {
const quotient = n / 10;
const remainder = n - (10 * quotient);
if (write_index < max_digits) {
- self.digits[write_index] = @intCast(u8, remainder);
+ self.digits[write_index] = @as(u8, @intCast(remainder));
} else if (remainder > 0) {
self.truncated = true;
}
@@ -178,7 +178,7 @@ pub fn Decimal(comptime T: type) type {
if (self.num_digits > max_digits) {
self.num_digits = max_digits;
}
- self.decimal_point += @intCast(i32, num_new_digits);
+ self.decimal_point += @as(i32, @intCast(num_new_digits));
self.trim();
}
@@ -202,7 +202,7 @@ pub fn Decimal(comptime T: type) type {
}
}
- self.decimal_point -= @intCast(i32, read_index) - 1;
+ self.decimal_point -= @as(i32, @intCast(read_index)) - 1;
if (self.decimal_point < -decimal_point_range) {
self.num_digits = 0;
self.decimal_point = 0;
@@ -212,14 +212,14 @@ pub fn Decimal(comptime T: type) type {
const mask = math.shl(MantissaT, 1, shift) - 1;
while (read_index < self.num_digits) {
- const new_digit = @intCast(u8, math.shr(MantissaT, n, shift));
+ const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift)));
n = (10 * (n & mask)) + self.digits[read_index];
read_index += 1;
self.digits[write_index] = new_digit;
write_index += 1;
}
while (n > 0) {
- const new_digit = @intCast(u8, math.shr(MantissaT, n, shift));
+ const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift)));
n = 10 * (n & mask);
if (write_index < max_digits) {
self.digits[write_index] = new_digit;
@@ -268,7 +268,7 @@ pub fn Decimal(comptime T: type) type {
while (stream.scanDigit(10)) |digit| {
d.tryAddDigit(digit);
}
- d.decimal_point = @intCast(i32, marker) - @intCast(i32, stream.offsetTrue());
+ d.decimal_point = @as(i32, @intCast(marker)) - @as(i32, @intCast(stream.offsetTrue()));
}
if (d.num_digits != 0) {
// Ignore trailing zeros if any
@@ -284,9 +284,9 @@ pub fn Decimal(comptime T: type) type {
i -= 1;
if (i == 0) break;
}
- d.decimal_point += @intCast(i32, n_trailing_zeros);
+ d.decimal_point += @as(i32, @intCast(n_trailing_zeros));
d.num_digits -= n_trailing_zeros;
- d.decimal_point += @intCast(i32, d.num_digits);
+ d.decimal_point += @as(i32, @intCast(d.num_digits));
if (d.num_digits > max_digits) {
d.truncated = true;
d.num_digits = max_digits;
diff --git a/lib/std/fmt/parse_float/parse.zig b/lib/std/fmt/parse_float/parse.zig
index 9f6e75b29a..a31df31312 100644
--- a/lib/std/fmt/parse_float/parse.zig
+++ b/lib/std/fmt/parse_float/parse.zig
@@ -21,7 +21,7 @@ fn parse8Digits(v_: u64) u64 {
v = (v * 10) + (v >> 8); // will not overflow, fits in 63 bits
const v1 = (v & mask) *% mul1;
const v2 = ((v >> 16) & mask) *% mul2;
- return @as(u64, @truncate(u32, (v1 +% v2) >> 32));
+ return @as(u64, @as(u32, @truncate((v1 +% v2) >> 32)));
}
/// Parse digits until a non-digit character is found.
@@ -106,7 +106,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
var mantissa: MantissaT = 0;
tryParseDigits(MantissaT, stream, &mantissa, info.base);
var int_end = stream.offsetTrue();
- var n_digits = @intCast(isize, stream.offsetTrue());
+ var n_digits = @as(isize, @intCast(stream.offsetTrue()));
// the base being 16 implies a 0x prefix, which shouldn't be included in the digit count
if (info.base == 16) n_digits -= 2;
@@ -117,8 +117,8 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
const marker = stream.offsetTrue();
tryParseDigits(MantissaT, stream, &mantissa, info.base);
const n_after_dot = stream.offsetTrue() - marker;
- exponent = -@intCast(i64, n_after_dot);
- n_digits += @intCast(isize, n_after_dot);
+ exponent = -@as(i64, @intCast(n_after_dot));
+ n_digits += @as(isize, @intCast(n_after_dot));
}
// adjust required shift to offset mantissa for base-16 (2^4)
@@ -163,7 +163,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
// '0' = '.' + 2
const next = stream.firstUnchecked();
if (next != '_') {
- n_digits -= @intCast(isize, next -| ('0' - 1));
+ n_digits -= @as(isize, @intCast(next -| ('0' - 1)));
} else {
stream.underscore_count += 1;
}
@@ -179,7 +179,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
exponent = blk: {
if (mantissa >= min_n_digit_int(MantissaT, info.max_mantissa_digits)) {
// big int
- break :blk @intCast(i64, int_end) - @intCast(i64, stream.offsetTrue());
+ break :blk @as(i64, @intCast(int_end)) - @as(i64, @intCast(stream.offsetTrue()));
} else {
// the next byte must be present and be '.'
// We know this is true because we had more than 19
@@ -190,7 +190,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
stream.advance(1);
var marker = stream.offsetTrue();
tryParseNDigits(MantissaT, stream, &mantissa, info.base, info.max_mantissa_digits);
- break :blk @intCast(i64, marker) - @intCast(i64, stream.offsetTrue());
+ break :blk @as(i64, @intCast(marker)) - @as(i64, @intCast(stream.offsetTrue()));
}
};
// add back the explicit part
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 8e828fd334..cb6ce2032e 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -373,13 +373,13 @@ pub const IterableDir = struct {
}
}
self.index = 0;
- self.end_index = @intCast(usize, rc);
+ self.end_index = @as(usize, @intCast(rc));
}
- const darwin_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+ const darwin_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
const next_index = self.index + darwin_entry.reclen();
self.index = next_index;
- const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen];
+ const name = @as([*]u8, @ptrCast(&darwin_entry.d_name))[0..darwin_entry.d_namlen];
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (darwin_entry.d_ino == 0)) {
continue :start_over;
@@ -421,13 +421,13 @@ pub const IterableDir = struct {
}
if (rc == 0) return null;
self.index = 0;
- self.end_index = @intCast(usize, rc);
+ self.end_index = @as(usize, @intCast(rc));
}
- const entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+ const entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
const next_index = self.index + entry.reclen();
self.index = next_index;
- const name = mem.sliceTo(@ptrCast([*:0]u8, &entry.d_name), 0);
+ const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&entry.d_name)), 0);
if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
continue :start_over;
@@ -485,13 +485,13 @@ pub const IterableDir = struct {
}
if (rc == 0) return null;
self.index = 0;
- self.end_index = @intCast(usize, rc);
+ self.end_index = @as(usize, @intCast(rc));
}
- const bsd_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+ const bsd_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
const next_index = self.index + bsd_entry.reclen();
self.index = next_index;
- const name = @ptrCast([*]u8, &bsd_entry.d_name)[0..bsd_entry.d_namlen];
+ const name = @as([*]u8, @ptrCast(&bsd_entry.d_name))[0..bsd_entry.d_namlen];
const skip_zero_fileno = switch (builtin.os.tag) {
// d_fileno=0 is used to mark invalid entries or deleted files.
@@ -567,12 +567,12 @@ pub const IterableDir = struct {
}
}
self.index = 0;
- self.end_index = @intCast(usize, rc);
+ self.end_index = @as(usize, @intCast(rc));
}
- const haiku_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+ const haiku_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
const next_index = self.index + haiku_entry.reclen();
self.index = next_index;
- const name = mem.sliceTo(@ptrCast([*:0]u8, &haiku_entry.d_name), 0);
+ const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&haiku_entry.d_name)), 0);
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (haiku_entry.d_ino == 0)) {
continue :start_over;
@@ -672,11 +672,11 @@ pub const IterableDir = struct {
self.index = 0;
self.end_index = rc;
}
- const linux_entry = @ptrCast(*align(1) linux.dirent64, &self.buf[self.index]);
+ const linux_entry = @as(*align(1) linux.dirent64, @ptrCast(&self.buf[self.index]));
const next_index = self.index + linux_entry.reclen();
self.index = next_index;
- const name = mem.sliceTo(@ptrCast([*:0]u8, &linux_entry.d_name), 0);
+ const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&linux_entry.d_name)), 0);
// skip . and .. entries
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
@@ -750,15 +750,14 @@ pub const IterableDir = struct {
}
}
- const aligned_ptr = @alignCast(@alignOf(w.FILE_BOTH_DIR_INFORMATION), &self.buf[self.index]);
- const dir_info = @ptrCast(*w.FILE_BOTH_DIR_INFORMATION, aligned_ptr);
+ const dir_info: *w.FILE_BOTH_DIR_INFORMATION = @ptrCast(@alignCast(&self.buf[self.index]));
if (dir_info.NextEntryOffset != 0) {
self.index += dir_info.NextEntryOffset;
} else {
self.index = self.buf.len;
}
- const name_utf16le = @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2];
+ const name_utf16le = @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2];
if (mem.eql(u16, name_utf16le, &[_]u16{'.'}) or mem.eql(u16, name_utf16le, &[_]u16{ '.', '.' }))
continue;
@@ -835,7 +834,7 @@ pub const IterableDir = struct {
self.index = 0;
self.end_index = bufused;
}
- const entry = @ptrCast(*align(1) w.dirent_t, &self.buf[self.index]);
+ const entry = @as(*align(1) w.dirent_t, @ptrCast(&self.buf[self.index]));
const entry_size = @sizeOf(w.dirent_t);
const name_index = self.index + entry_size;
if (name_index + entry.d_namlen > self.end_index) {
@@ -1789,7 +1788,7 @@ pub const Dir = struct {
.fd = undefined,
};
- const path_len_bytes = @intCast(u16, mem.sliceTo(sub_path_w, 0).len * 2);
+ const path_len_bytes = @as(u16, @intCast(mem.sliceTo(sub_path_w, 0).len * 2));
var nt_name = w.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index 0c6e8a24f7..e9448aa5d3 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -368,7 +368,7 @@ pub const File = struct {
return Stat{
.inode = st.ino,
- .size = @bitCast(u64, st.size),
+ .size = @as(u64, @bitCast(st.size)),
.mode = st.mode,
.kind = kind,
.atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
@@ -398,7 +398,7 @@ pub const File = struct {
}
return Stat{
.inode = info.InternalInformation.IndexNumber,
- .size = @bitCast(u64, info.StandardInformation.EndOfFile),
+ .size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)),
.mode = 0,
.kind = if (info.StandardInformation.Directory == 0) .file else .directory,
.atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
@@ -650,7 +650,7 @@ pub const File = struct {
/// Returns the size of the file
pub fn size(self: Self) u64 {
- return @intCast(u64, self.stat.size);
+ return @as(u64, @intCast(self.stat.size));
}
/// Returns a `Permissions` struct, representing the permissions on the file
@@ -855,7 +855,7 @@ pub const File = struct {
if (info.BasicInformation.FileAttributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) {
var reparse_buf: [windows.MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 = undefined;
try windows.DeviceIoControl(self.handle, windows.FSCTL_GET_REPARSE_POINT, null, reparse_buf[0..]);
- const reparse_struct = @ptrCast(*const windows.REPARSE_DATA_BUFFER, @alignCast(@alignOf(windows.REPARSE_DATA_BUFFER), &reparse_buf[0]));
+ const reparse_struct: *const windows.REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0]));
break :reparse_blk reparse_struct.ReparseTag;
}
break :reparse_blk 0;
@@ -864,7 +864,7 @@ pub const File = struct {
break :blk MetadataWindows{
.attributes = info.BasicInformation.FileAttributes,
.reparse_tag = reparse_tag,
- ._size = @bitCast(u64, info.StandardInformation.EndOfFile),
+ ._size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)),
.access_time = windows.fromSysTime(info.BasicInformation.LastAccessTime),
.modified_time = windows.fromSysTime(info.BasicInformation.LastWriteTime),
.creation_time = windows.fromSysTime(info.BasicInformation.CreationTime),
@@ -881,16 +881,16 @@ pub const File = struct {
.NOSYS => {
const st = try os.fstat(self.handle);
- stx.mode = @intCast(u16, st.mode);
+ stx.mode = @as(u16, @intCast(st.mode));
// Hacky conversion from timespec to statx_timestamp
stx.atime = std.mem.zeroes(os.linux.statx_timestamp);
stx.atime.tv_sec = st.atim.tv_sec;
- stx.atime.tv_nsec = @intCast(u32, st.atim.tv_nsec); // Guaranteed to succeed (tv_nsec is always below 10^9)
+ stx.atime.tv_nsec = @as(u32, @intCast(st.atim.tv_nsec)); // Guaranteed to succeed (tv_nsec is always below 10^9)
stx.mtime = std.mem.zeroes(os.linux.statx_timestamp);
stx.mtime.tv_sec = st.mtim.tv_sec;
- stx.mtime.tv_nsec = @intCast(u32, st.mtim.tv_nsec);
+ stx.mtime.tv_nsec = @as(u32, @intCast(st.mtim.tv_nsec));
stx.mask = os.linux.STATX_BASIC_STATS | os.linux.STATX_MTIME;
},
@@ -1414,7 +1414,7 @@ pub const File = struct {
amt = try os.sendfile(out_fd, in_fd, offset + off, count - off, zero_iovec, trailers, flags);
off += amt;
}
- amt = @intCast(usize, off - count);
+ amt = @as(usize, @intCast(off - count));
}
var i: usize = 0;
while (i < trailers.len) {
diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig
index 4f7ba9af62..2f599c3213 100644
--- a/lib/std/fs/get_app_data_dir.zig
+++ b/lib/std/fs/get_app_data_dir.zig
@@ -23,7 +23,7 @@ pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDi
&dir_path_ptr,
)) {
os.windows.S_OK => {
- defer os.windows.ole32.CoTaskMemFree(@ptrCast(*anyopaque, dir_path_ptr));
+ defer os.windows.ole32.CoTaskMemFree(@as(*anyopaque, @ptrCast(dir_path_ptr)));
const global_dir = unicode.utf16leToUtf8Alloc(allocator, mem.sliceTo(dir_path_ptr, 0)) catch |err| switch (err) {
error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index 75c9b1df78..7ed7a75ea9 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -17,7 +17,7 @@ pub const Preopens = struct {
pub fn find(p: Preopens, name: []const u8) ?os.fd_t {
for (p.names, 0..) |elem_name, i| {
if (mem.eql(u8, elem_name, name)) {
- return @intCast(os.fd_t, i);
+ return @as(os.fd_t, @intCast(i));
}
}
return null;
@@ -34,7 +34,7 @@ pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens {
names.appendAssumeCapacity("stdout"); // 1
names.appendAssumeCapacity("stderr"); // 2
while (true) {
- const fd = @intCast(wasi.fd_t, names.items.len);
+ const fd = @as(wasi.fd_t, @intCast(names.items.len));
var prestat: prestat_t = undefined;
switch (wasi.fd_prestat_get(fd, &prestat)) {
.SUCCESS => {},
diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig
index 0deaa86468..280c8888e6 100644
--- a/lib/std/fs/watch.zig
+++ b/lib/std/fs/watch.zig
@@ -279,7 +279,7 @@ pub fn Watch(comptime V: type) type {
while (!put.cancelled) {
kev.* = os.Kevent{
- .ident = @intCast(usize, fd),
+ .ident = @as(usize, @intCast(fd)),
.filter = os.EVFILT_VNODE,
.flags = os.EV_ADD | os.EV_ENABLE | os.EV_CLEAR | os.EV_ONESHOT |
os.NOTE_WRITE | os.NOTE_DELETE | os.NOTE_REVOKE,
@@ -487,14 +487,14 @@ pub fn Watch(comptime V: type) type {
var ptr: [*]u8 = &event_buf;
const end_ptr = ptr + bytes_transferred;
while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) {
- const ev = @ptrCast(*const windows.FILE_NOTIFY_INFORMATION, ptr);
+ const ev = @as(*const windows.FILE_NOTIFY_INFORMATION, @ptrCast(ptr));
const emit = switch (ev.Action) {
windows.FILE_ACTION_REMOVED => WatchEventId.Delete,
windows.FILE_ACTION_MODIFIED => .CloseWrite,
else => null,
};
if (emit) |id| {
- const basename_ptr = @ptrCast([*]u16, ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION));
+ const basename_ptr = @as([*]u16, @ptrCast(ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION)));
const basename_utf16le = basename_ptr[0 .. ev.FileNameLength / 2];
var basename_data: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const basename = basename_data[0 .. std.unicode.utf16leToUtf8(&basename_data, basename_utf16le) catch unreachable];
@@ -510,7 +510,7 @@ pub fn Watch(comptime V: type) type {
}
if (ev.NextEntryOffset == 0) break;
- ptr = @alignCast(@alignOf(windows.FILE_NOTIFY_INFORMATION), ptr + ev.NextEntryOffset);
+ ptr = @alignCast(ptr + ev.NextEntryOffset);
}
}
}
@@ -586,10 +586,10 @@ pub fn Watch(comptime V: type) type {
var ptr: [*]u8 = &event_buf;
const end_ptr = ptr + bytes_read;
while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) {
- const ev = @ptrCast(*const os.linux.inotify_event, ptr);
+ const ev = @as(*const os.linux.inotify_event, @ptrCast(ptr));
if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
- const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr));
+ const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr)));
const dir = &self.os_data.wd_table.get(ev.wd).?;
if (dir.file_table.getEntry(basename)) |file_value| {
@@ -615,7 +615,7 @@ pub fn Watch(comptime V: type) type {
} else if (ev.mask & os.linux.IN_DELETE == os.linux.IN_DELETE) {
// File or directory was removed or deleted
const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
- const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr));
+ const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr)));
const dir = &self.os_data.wd_table.get(ev.wd).?;
if (dir.file_table.getEntry(basename)) |file_value| {
@@ -628,7 +628,7 @@ pub fn Watch(comptime V: type) type {
}
}
- ptr = @alignCast(@alignOf(os.linux.inotify_event), ptr + @sizeOf(os.linux.inotify_event) + ev.len);
+ ptr = @alignCast(ptr + @sizeOf(os.linux.inotify_event) + ev.len);
}
}
}
diff --git a/lib/std/hash/adler.zig b/lib/std/hash/adler.zig
index 78f52b539b..200dc9aafe 100644
--- a/lib/std/hash/adler.zig
+++ b/lib/std/hash/adler.zig
@@ -118,7 +118,7 @@ test "adler32 very long with variation" {
var i: usize = 0;
while (i < result.len) : (i += 1) {
- result[i] = @truncate(u8, i);
+ result[i] = @as(u8, @truncate(i));
}
break :blk result;
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index f33bd635fc..c5c6c585eb 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -92,10 +92,10 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
// Help the optimizer see that hashing an int is easy by inlining!
// TODO Check if the situation is better after #561 is resolved.
.Int => |int| switch (int.signedness) {
- .signed => hash(hasher, @bitCast(@Type(.{ .Int = .{
+ .signed => hash(hasher, @as(@Type(.{ .Int = .{
.bits = int.bits,
.signedness = .unsigned,
- } }), key), strat),
+ } }), @bitCast(key)), strat),
.unsigned => {
if (comptime meta.trait.hasUniqueRepresentation(Key)) {
@call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) });
diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig
index 62df89f0ae..699de5ceb4 100644
--- a/lib/std/hash/benchmark.zig
+++ b/lib/std/hash/benchmark.zig
@@ -122,13 +122,13 @@ pub fn benchmarkHash(comptime H: anytype, bytes: usize, allocator: std.mem.Alloc
for (0..blocks_count) |i| {
h.update(blocks[i * alignment ..][0..block_size]);
}
- const final = if (H.has_crypto_api) @truncate(u64, h.finalInt()) else h.final();
+ const final = if (H.has_crypto_api) @as(u64, @truncate(h.finalInt())) else h.final();
std.mem.doNotOptimizeAway(final);
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
return Result{
.hash = final,
@@ -152,7 +152,7 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize
const final = blk: {
if (H.init_u8s) |init| {
if (H.has_crypto_api) {
- break :blk @truncate(u64, H.ty.toInt(small_key, init[0..H.ty.key_length]));
+ break :blk @as(u64, @truncate(H.ty.toInt(small_key, init[0..H.ty.key_length])));
} else {
break :blk H.ty.hash(init, small_key);
}
@@ -166,8 +166,8 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
std.mem.doNotOptimizeAway(sum);
diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig
index d0884b135f..8040c99b84 100644
--- a/lib/std/hash/cityhash.zig
+++ b/lib/std/hash/cityhash.zig
@@ -2,7 +2,7 @@ const std = @import("std");
inline fn offsetPtr(ptr: [*]const u8, offset: usize) [*]const u8 {
// ptr + offset doesn't work at comptime so we need this instead.
- return @ptrCast([*]const u8, &ptr[offset]);
+ return @as([*]const u8, @ptrCast(&ptr[offset]));
}
fn fetch32(ptr: [*]const u8, offset: usize) u32 {
@@ -49,18 +49,18 @@ pub const CityHash32 = struct {
}
fn hash32Len0To4(str: []const u8) u32 {
- const len: u32 = @truncate(u32, str.len);
+ const len: u32 = @as(u32, @truncate(str.len));
var b: u32 = 0;
var c: u32 = 9;
for (str) |v| {
- b = b *% c1 +% @bitCast(u32, @intCast(i32, @bitCast(i8, v)));
+ b = b *% c1 +% @as(u32, @bitCast(@as(i32, @intCast(@as(i8, @bitCast(v))))));
c ^= b;
}
return fmix(mur(b, mur(len, c)));
}
fn hash32Len5To12(str: []const u8) u32 {
- var a: u32 = @truncate(u32, str.len);
+ var a: u32 = @as(u32, @truncate(str.len));
var b: u32 = a *% 5;
var c: u32 = 9;
const d: u32 = b;
@@ -73,7 +73,7 @@ pub const CityHash32 = struct {
}
fn hash32Len13To24(str: []const u8) u32 {
- const len: u32 = @truncate(u32, str.len);
+ const len: u32 = @as(u32, @truncate(str.len));
const a: u32 = fetch32(str.ptr, (str.len >> 1) - 4);
const b: u32 = fetch32(str.ptr, 4);
const c: u32 = fetch32(str.ptr, str.len - 8);
@@ -95,7 +95,7 @@ pub const CityHash32 = struct {
}
}
- const len: u32 = @truncate(u32, str.len);
+ const len: u32 = @as(u32, @truncate(str.len));
var h: u32 = len;
var g: u32 = c1 *% len;
var f: u32 = g;
@@ -220,9 +220,9 @@ pub const CityHash64 = struct {
const a: u8 = str[0];
const b: u8 = str[str.len >> 1];
const c: u8 = str[str.len - 1];
- const y: u32 = @intCast(u32, a) +% (@intCast(u32, b) << 8);
- const z: u32 = @truncate(u32, str.len) +% (@intCast(u32, c) << 2);
- return shiftmix(@intCast(u64, y) *% k2 ^ @intCast(u64, z) *% k0) *% k2;
+ const y: u32 = @as(u32, @intCast(a)) +% (@as(u32, @intCast(b)) << 8);
+ const z: u32 = @as(u32, @truncate(str.len)) +% (@as(u32, @intCast(c)) << 2);
+ return shiftmix(@as(u64, @intCast(y)) *% k2 ^ @as(u64, @intCast(z)) *% k0) *% k2;
}
return k2;
}
@@ -309,7 +309,7 @@ pub const CityHash64 = struct {
var w: WeakPair = weakHashLen32WithSeeds(offsetPtr(str.ptr, str.len - 32), y +% k1, x);
x = x *% k1 +% fetch64(str.ptr, 0);
- len = (len - 1) & ~@intCast(u64, 63);
+ len = (len - 1) & ~@as(u64, @intCast(63));
var ptr: [*]const u8 = str.ptr;
while (true) {
@@ -353,19 +353,19 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 {
var i: u32 = 0;
while (i < 256) : (i += 1) {
- key[i] = @intCast(u8, i);
+ key[i] = @as(u8, @intCast(i));
var h: HashResult = hash_fn(key[0..i], 256 - i);
// comptime can't really do reinterpret casting yet,
// so we need to write the bytes manually.
for (hashes_bytes[i * @sizeOf(HashResult) ..][0..@sizeOf(HashResult)]) |*byte| {
- byte.* = @truncate(u8, h);
+ byte.* = @as(u8, @truncate(h));
h = h >> 8;
}
}
- return @truncate(u32, hash_fn(&hashes_bytes, 0));
+ return @as(u32, @truncate(hash_fn(&hashes_bytes, 0)));
}
fn CityHash32hashIgnoreSeed(str: []const u8, seed: u32) u32 {
diff --git a/lib/std/hash/crc.zig b/lib/std/hash/crc.zig
index da250af1bf..3e1e458ffc 100644
--- a/lib/std/hash/crc.zig
+++ b/lib/std/hash/crc.zig
@@ -65,7 +65,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
}
inline fn tableEntry(index: I) I {
- return lookup_table[@intCast(u8, index & 0xFF)];
+ return lookup_table[@as(u8, @intCast(index & 0xFF))];
}
pub fn update(self: *Self, bytes: []const u8) void {
@@ -95,7 +95,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
if (!algorithm.reflect_output) {
c >>= @bitSizeOf(I) - @bitSizeOf(W);
}
- return @intCast(W, c ^ algorithm.xor_output);
+ return @as(W, @intCast(c ^ algorithm.xor_output));
}
pub fn hash(bytes: []const u8) W {
@@ -125,7 +125,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
var tables: [8][256]u32 = undefined;
for (&tables[0], 0..) |*e, i| {
- var crc = @intCast(u32, i);
+ var crc = @as(u32, @intCast(i));
var j: usize = 0;
while (j < 8) : (j += 1) {
if (crc & 1 == 1) {
@@ -142,7 +142,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
var crc = tables[0][i];
var j: usize = 1;
while (j < 8) : (j += 1) {
- const index = @truncate(u8, crc);
+ const index = @as(u8, @truncate(crc));
crc = tables[0][index] ^ (crc >> 8);
tables[j][i] = crc;
}
@@ -170,14 +170,14 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
lookup_tables[1][p[6]] ^
lookup_tables[2][p[5]] ^
lookup_tables[3][p[4]] ^
- lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
- lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
- lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
- lookup_tables[7][@truncate(u8, self.crc >> 0)];
+ lookup_tables[4][@as(u8, @truncate(self.crc >> 24))] ^
+ lookup_tables[5][@as(u8, @truncate(self.crc >> 16))] ^
+ lookup_tables[6][@as(u8, @truncate(self.crc >> 8))] ^
+ lookup_tables[7][@as(u8, @truncate(self.crc >> 0))];
}
while (i < input.len) : (i += 1) {
- const index = @truncate(u8, self.crc) ^ input[i];
+ const index = @as(u8, @truncate(self.crc)) ^ input[i];
self.crc = (self.crc >> 8) ^ lookup_tables[0][index];
}
}
@@ -218,7 +218,7 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type {
var table: [16]u32 = undefined;
for (&table, 0..) |*e, i| {
- var crc = @intCast(u32, i * 16);
+ var crc = @as(u32, @intCast(i * 16));
var j: usize = 0;
while (j < 8) : (j += 1) {
if (crc & 1 == 1) {
@@ -241,8 +241,8 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type {
pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
- self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4);
- self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4);
+ self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 0)))] ^ (self.crc >> 4);
+ self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 4)))] ^ (self.crc >> 4);
}
}
diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig
index 753439a4cf..bd433874ed 100644
--- a/lib/std/hash/murmur.zig
+++ b/lib/std/hash/murmur.zig
@@ -14,9 +14,9 @@ pub const Murmur2_32 = struct {
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
const m: u32 = 0x5bd1e995;
- const len = @truncate(u32, str.len);
+ const len = @as(u32, @truncate(str.len));
var h1: u32 = seed ^ len;
- for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
+ for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
k1 = @byteSwap(k1);
@@ -29,13 +29,13 @@ pub const Murmur2_32 = struct {
const offset = len & 0xfffffffc;
const rest = len & 3;
if (rest >= 3) {
- h1 ^= @intCast(u32, str[offset + 2]) << 16;
+ h1 ^= @as(u32, @intCast(str[offset + 2])) << 16;
}
if (rest >= 2) {
- h1 ^= @intCast(u32, str[offset + 1]) << 8;
+ h1 ^= @as(u32, @intCast(str[offset + 1])) << 8;
}
if (rest >= 1) {
- h1 ^= @intCast(u32, str[offset + 0]);
+ h1 ^= @as(u32, @intCast(str[offset + 0]));
h1 *%= m;
}
h1 ^= h1 >> 13;
@@ -73,12 +73,12 @@ pub const Murmur2_32 = struct {
const len: u32 = 8;
var h1: u32 = seed ^ len;
var k1: u32 = undefined;
- k1 = @truncate(u32, v) *% m;
+ k1 = @as(u32, @truncate(v)) *% m;
k1 ^= k1 >> 24;
k1 *%= m;
h1 *%= m;
h1 ^= k1;
- k1 = @truncate(u32, v >> 32) *% m;
+ k1 = @as(u32, @truncate(v >> 32)) *% m;
k1 ^= k1 >> 24;
k1 *%= m;
h1 *%= m;
@@ -100,7 +100,7 @@ pub const Murmur2_64 = struct {
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
const m: u64 = 0xc6a4a7935bd1e995;
var h1: u64 = seed ^ (@as(u64, str.len) *% m);
- for (@ptrCast([*]align(1) const u64, str.ptr)[0 .. str.len / 8]) |v| {
+ for (@as([*]align(1) const u64, @ptrCast(str.ptr))[0 .. str.len / 8]) |v| {
var k1: u64 = v;
if (native_endian == .Big)
k1 = @byteSwap(k1);
@@ -114,7 +114,7 @@ pub const Murmur2_64 = struct {
const offset = str.len - rest;
if (rest > 0) {
var k1: u64 = 0;
- @memcpy(@ptrCast([*]u8, &k1)[0..rest], str[offset..]);
+ @memcpy(@as([*]u8, @ptrCast(&k1))[0..rest], str[offset..]);
if (native_endian == .Big)
k1 = @byteSwap(k1);
h1 ^= k1;
@@ -178,9 +178,9 @@ pub const Murmur3_32 = struct {
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
const c1: u32 = 0xcc9e2d51;
const c2: u32 = 0x1b873593;
- const len = @truncate(u32, str.len);
+ const len = @as(u32, @truncate(str.len));
var h1: u32 = seed;
- for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
+ for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| {
var k1: u32 = v;
if (native_endian == .Big)
k1 = @byteSwap(k1);
@@ -197,13 +197,13 @@ pub const Murmur3_32 = struct {
const offset = len & 0xfffffffc;
const rest = len & 3;
if (rest == 3) {
- k1 ^= @intCast(u32, str[offset + 2]) << 16;
+ k1 ^= @as(u32, @intCast(str[offset + 2])) << 16;
}
if (rest >= 2) {
- k1 ^= @intCast(u32, str[offset + 1]) << 8;
+ k1 ^= @as(u32, @intCast(str[offset + 1])) << 8;
}
if (rest >= 1) {
- k1 ^= @intCast(u32, str[offset + 0]);
+ k1 ^= @as(u32, @intCast(str[offset + 0]));
k1 *%= c1;
k1 = rotl32(k1, 15);
k1 *%= c2;
@@ -255,14 +255,14 @@ pub const Murmur3_32 = struct {
const len: u32 = 8;
var h1: u32 = seed;
var k1: u32 = undefined;
- k1 = @truncate(u32, v) *% c1;
+ k1 = @as(u32, @truncate(v)) *% c1;
k1 = rotl32(k1, 15);
k1 *%= c2;
h1 ^= k1;
h1 = rotl32(h1, 13);
h1 *%= 5;
h1 +%= 0xe6546b64;
- k1 = @truncate(u32, v >> 32) *% c1;
+ k1 = @as(u32, @truncate(v >> 32)) *% c1;
k1 = rotl32(k1, 15);
k1 *%= c2;
h1 ^= k1;
@@ -286,15 +286,15 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
var i: u32 = 0;
while (i < 256) : (i += 1) {
- key[i] = @truncate(u8, i);
+ key[i] = @as(u8, @truncate(i));
var h = hash_fn(key[0..i], 256 - i);
if (native_endian == .Big)
h = @byteSwap(h);
- @memcpy(hashes[i * hashbytes ..][0..hashbytes], @ptrCast([*]u8, &h));
+ @memcpy(hashes[i * hashbytes ..][0..hashbytes], @as([*]u8, @ptrCast(&h)));
}
- return @truncate(u32, hash_fn(&hashes, 0));
+ return @as(u32, @truncate(hash_fn(&hashes, 0)));
}
test "murmur2_32" {
@@ -307,8 +307,8 @@ test "murmur2_32" {
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
- try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_32.hashUint32(v0));
- try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_32.hashUint64(v1));
+ try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_32.hashUint32(v0));
+ try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_32.hashUint64(v1));
}
test "murmur2_64" {
@@ -321,8 +321,8 @@ test "murmur2_64" {
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
- try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_64.hashUint32(v0));
- try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_64.hashUint64(v1));
+ try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_64.hashUint32(v0));
+ try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_64.hashUint64(v1));
}
test "murmur3_32" {
@@ -335,6 +335,6 @@ test "murmur3_32" {
v0le = @byteSwap(v0le);
v1le = @byteSwap(v1le);
}
- try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur3_32.hashUint32(v0));
- try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur3_32.hashUint64(v1));
+ try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur3_32.hashUint32(v0));
+ try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur3_32.hashUint64(v1));
}
diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig
index 3573745444..aced3be66e 100644
--- a/lib/std/hash/wyhash.zig
+++ b/lib/std/hash/wyhash.zig
@@ -132,8 +132,8 @@ pub const Wyhash = struct {
inline fn mum(a: *u64, b: *u64) void {
const x = @as(u128, a.*) *% b.*;
- a.* = @truncate(u64, x);
- b.* = @truncate(u64, x >> 64);
+ a.* = @as(u64, @truncate(x));
+ b.* = @as(u64, @truncate(x >> 64));
}
inline fn mix(a_: u64, b_: u64) u64 {
@@ -252,7 +252,7 @@ test "test ensure idempotent final call" {
test "iterative non-divisible update" {
var buf: [8192]u8 = undefined;
for (&buf, 0..) |*e, i| {
- e.* = @truncate(u8, i);
+ e.* = @as(u8, @truncate(i));
}
const seed = 0x128dad08f;
diff --git a/lib/std/hash/xxhash.zig b/lib/std/hash/xxhash.zig
index 3122406488..f1d1da429d 100644
--- a/lib/std/hash/xxhash.zig
+++ b/lib/std/hash/xxhash.zig
@@ -212,7 +212,7 @@ pub const XxHash32 = struct {
rotl(u32, self.acc3, 12) +% rotl(u32, self.acc4, 18);
}
- acc = acc +% @intCast(u32, self.byte_count) +% @intCast(u32, self.buf_len);
+ acc = acc +% @as(u32, @intCast(self.byte_count)) +% @as(u32, @intCast(self.buf_len));
var pos: usize = 0;
while (pos + 4 <= self.buf_len) : (pos += 4) {
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 4f1639cd60..0afe6f9643 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -101,7 +101,7 @@ pub const StringIndexContext = struct {
}
pub fn hash(self: @This(), x: u32) u64 {
- const x_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + x, 0);
+ const x_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + x, 0);
return hashString(x_slice);
}
};
@@ -110,7 +110,7 @@ pub const StringIndexAdapter = struct {
bytes: *std.ArrayListUnmanaged(u8),
pub fn eql(self: @This(), a_slice: []const u8, b: u32) bool {
- const b_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + b, 0);
+ const b_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + b, 0);
return mem.eql(u8, a_slice, b_slice);
}
@@ -777,25 +777,25 @@ pub fn HashMapUnmanaged(
fingerprint: FingerPrint = free,
used: u1 = 0,
- const slot_free = @bitCast(u8, Metadata{ .fingerprint = free });
- const slot_tombstone = @bitCast(u8, Metadata{ .fingerprint = tombstone });
+ const slot_free = @as(u8, @bitCast(Metadata{ .fingerprint = free }));
+ const slot_tombstone = @as(u8, @bitCast(Metadata{ .fingerprint = tombstone }));
pub fn isUsed(self: Metadata) bool {
return self.used == 1;
}
pub fn isTombstone(self: Metadata) bool {
- return @bitCast(u8, self) == slot_tombstone;
+ return @as(u8, @bitCast(self)) == slot_tombstone;
}
pub fn isFree(self: Metadata) bool {
- return @bitCast(u8, self) == slot_free;
+ return @as(u8, @bitCast(self)) == slot_free;
}
pub fn takeFingerprint(hash: Hash) FingerPrint {
const hash_bits = @typeInfo(Hash).Int.bits;
const fp_bits = @typeInfo(FingerPrint).Int.bits;
- return @truncate(FingerPrint, hash >> (hash_bits - fp_bits));
+ return @as(FingerPrint, @truncate(hash >> (hash_bits - fp_bits)));
}
pub fn fill(self: *Metadata, fp: FingerPrint) void {
@@ -899,7 +899,7 @@ pub fn HashMapUnmanaged(
}
fn capacityForSize(size: Size) Size {
- var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1);
+ var new_cap = @as(u32, @truncate((@as(u64, size) * 100) / max_load_percentage + 1));
new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable;
return new_cap;
}
@@ -927,7 +927,7 @@ pub fn HashMapUnmanaged(
if (self.metadata) |_| {
self.initMetadatas();
self.size = 0;
- self.available = @truncate(u32, (self.capacity() * max_load_percentage) / 100);
+ self.available = @as(u32, @truncate((self.capacity() * max_load_percentage) / 100));
}
}
@@ -942,7 +942,7 @@ pub fn HashMapUnmanaged(
}
fn header(self: *const Self) *Header {
- return @ptrCast(*Header, @ptrCast([*]Header, @alignCast(@alignOf(Header), self.metadata.?)) - 1);
+ return @ptrCast(@as([*]Header, @ptrCast(@alignCast(self.metadata.?))) - 1);
}
fn keys(self: *const Self) [*]K {
@@ -1033,7 +1033,7 @@ pub fn HashMapUnmanaged(
const hash = ctx.hash(key);
const mask = self.capacity() - 1;
- var idx = @truncate(usize, hash & mask);
+ var idx = @as(usize, @truncate(hash & mask));
var metadata = self.metadata.? + idx;
while (metadata[0].isUsed()) {
@@ -1147,7 +1147,7 @@ pub fn HashMapUnmanaged(
const fingerprint = Metadata.takeFingerprint(hash);
// Don't loop indefinitely when there are no empty slots.
var limit = self.capacity();
- var idx = @truncate(usize, hash & mask);
+ var idx = @as(usize, @truncate(hash & mask));
var metadata = self.metadata.? + idx;
while (!metadata[0].isFree() and limit != 0) {
@@ -1325,7 +1325,7 @@ pub fn HashMapUnmanaged(
const mask = self.capacity() - 1;
const fingerprint = Metadata.takeFingerprint(hash);
var limit = self.capacity();
- var idx = @truncate(usize, hash & mask);
+ var idx = @as(usize, @truncate(hash & mask));
var first_tombstone_idx: usize = self.capacity(); // invalid index
var metadata = self.metadata.? + idx;
@@ -1450,7 +1450,7 @@ pub fn HashMapUnmanaged(
}
fn initMetadatas(self: *Self) void {
- @memset(@ptrCast([*]u8, self.metadata.?)[0 .. @sizeOf(Metadata) * self.capacity()], 0);
+ @memset(@as([*]u8, @ptrCast(self.metadata.?))[0 .. @sizeOf(Metadata) * self.capacity()], 0);
}
// This counts the number of occupied slots (not counting tombstones), which is
@@ -1458,7 +1458,7 @@ pub fn HashMapUnmanaged(
fn load(self: *const Self) Size {
const max_load = (self.capacity() * max_load_percentage) / 100;
assert(max_load >= self.available);
- return @truncate(Size, max_load - self.available);
+ return @as(Size, @truncate(max_load - self.available));
}
fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) Allocator.Error!void {
@@ -1480,7 +1480,7 @@ pub fn HashMapUnmanaged(
const new_cap = capacityForSize(self.size);
try other.allocate(allocator, new_cap);
other.initMetadatas();
- other.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
+ other.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100));
var i: Size = 0;
var metadata = self.metadata.?;
@@ -1515,7 +1515,7 @@ pub fn HashMapUnmanaged(
defer map.deinit(allocator);
try map.allocate(allocator, new_cap);
map.initMetadatas();
- map.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
+ map.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100));
if (self.size != 0) {
const old_capacity = self.capacity();
@@ -1558,15 +1558,15 @@ pub fn HashMapUnmanaged(
const metadata = ptr + @sizeOf(Header);
- const hdr = @ptrFromInt(*Header, ptr);
+ const hdr = @as(*Header, @ptrFromInt(ptr));
if (@sizeOf([*]V) != 0) {
- hdr.values = @ptrFromInt([*]V, ptr + vals_start);
+ hdr.values = @as([*]V, @ptrFromInt(ptr + vals_start));
}
if (@sizeOf([*]K) != 0) {
- hdr.keys = @ptrFromInt([*]K, ptr + keys_start);
+ hdr.keys = @as([*]K, @ptrFromInt(ptr + keys_start));
}
hdr.capacity = new_capacity;
- self.metadata = @ptrFromInt([*]Metadata, metadata);
+ self.metadata = @as([*]Metadata, @ptrFromInt(metadata));
}
fn deallocate(self: *Self, allocator: Allocator) void {
@@ -1589,7 +1589,7 @@ pub fn HashMapUnmanaged(
const total_size = std.mem.alignForward(usize, vals_end, max_align);
- const slice = @ptrFromInt([*]align(max_align) u8, @intFromPtr(self.header()))[0..total_size];
+ const slice = @as([*]align(max_align) u8, @ptrFromInt(@intFromPtr(self.header())))[0..total_size];
allocator.free(slice);
self.metadata = null;
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index fd5b0754fe..d04f959345 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -61,11 +61,11 @@ const CAllocator = struct {
pub const supports_posix_memalign = @hasDecl(c, "posix_memalign");
fn getHeader(ptr: [*]u8) *[*]u8 {
- return @ptrFromInt(*[*]u8, @intFromPtr(ptr) - @sizeOf(usize));
+ return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize)));
}
fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 {
- const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align);
+ const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
if (supports_posix_memalign) {
// The posix_memalign only accepts alignment values that are a
// multiple of the pointer size
@@ -75,13 +75,13 @@ const CAllocator = struct {
if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0)
return null;
- return @ptrCast([*]u8, aligned_ptr);
+ return @as([*]u8, @ptrCast(aligned_ptr));
}
// Thin wrapper around regular malloc, overallocate to account for
// alignment padding and store the original malloc()'ed pointer before
// the aligned address.
- var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null);
+ var unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null));
const unaligned_addr = @intFromPtr(unaligned_ptr);
const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment);
var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
@@ -195,7 +195,7 @@ fn rawCAlloc(
// type in C that is size 8 and has 16 byte alignment, so the alignment may
// be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
// is allowed to return a 1-byte aligned pointer.
- return @ptrCast(?[*]u8, c.malloc(len));
+ return @as(?[*]u8, @ptrCast(c.malloc(len)));
}
fn rawCResize(
@@ -283,7 +283,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
fn getRecordPtr(buf: []u8) *align(1) usize {
- return @ptrFromInt(*align(1) usize, @intFromPtr(buf.ptr) + buf.len);
+ return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len));
}
fn alloc(
@@ -293,9 +293,9 @@ pub const HeapAllocator = switch (builtin.os.tag) {
return_address: usize,
) ?[*]u8 {
_ = return_address;
- const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
+ const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
- const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+ const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
const heap_handle = optional_heap_handle orelse blk: {
@@ -308,7 +308,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null;
const root_addr = @intFromPtr(ptr);
const aligned_addr = mem.alignForward(usize, root_addr, ptr_align);
- const buf = @ptrFromInt([*]u8, aligned_addr)[0..n];
+ const buf = @as([*]u8, @ptrFromInt(aligned_addr))[0..n];
getRecordPtr(buf).* = root_addr;
return buf.ptr;
}
@@ -322,7 +322,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
) bool {
_ = log2_buf_align;
_ = return_address;
- const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
+ const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
const root_addr = getRecordPtr(buf).*;
const align_offset = @intFromPtr(buf.ptr) - root_addr;
@@ -330,10 +330,10 @@ pub const HeapAllocator = switch (builtin.os.tag) {
const new_ptr = os.windows.kernel32.HeapReAlloc(
self.heap_handle.?,
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
- @ptrFromInt(*anyopaque, root_addr),
+ @as(*anyopaque, @ptrFromInt(root_addr)),
amt,
) orelse return false;
- assert(new_ptr == @ptrFromInt(*anyopaque, root_addr));
+ assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr)));
getRecordPtr(buf.ptr[0..new_size]).* = root_addr;
return true;
}
@@ -346,8 +346,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
) void {
_ = log2_buf_align;
_ = return_address;
- const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
- os.windows.HeapFree(self.heap_handle.?, 0, @ptrFromInt(*anyopaque, getRecordPtr(buf).*));
+ const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
+ os.windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*)));
}
},
else => @compileError("Unsupported OS"),
@@ -415,9 +415,9 @@ pub const FixedBufferAllocator = struct {
}
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
- const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
_ = ra;
- const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+ const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
const adjusted_index = self.end_index + adjust_off;
const new_end_index = adjusted_index + n;
@@ -433,7 +433,7 @@ pub const FixedBufferAllocator = struct {
new_size: usize,
return_address: usize,
) bool {
- const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
_ = log2_buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
@@ -462,7 +462,7 @@ pub const FixedBufferAllocator = struct {
log2_buf_align: u8,
return_address: usize,
) void {
- const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
_ = log2_buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
@@ -473,9 +473,9 @@ pub const FixedBufferAllocator = struct {
}
fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
- const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+ const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
_ = ra;
- const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+ const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
while (true) {
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
@@ -537,7 +537,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
log2_ptr_align: u8,
ra: usize,
) ?[*]u8 {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse
return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra);
}
@@ -549,7 +549,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
new_len: usize,
ra: usize,
) bool {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra);
} else {
@@ -563,7 +563,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
log2_buf_align: u8,
ra: usize,
) void {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra);
} else {
@@ -728,14 +728,14 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
try testing.expect(slice.len == 100);
for (slice, 0..) |*item, i| {
item.* = try allocator.create(i32);
- item.*.* = @intCast(i32, i);
+ item.*.* = @as(i32, @intCast(i));
}
slice = try allocator.realloc(slice, 20000);
try testing.expect(slice.len == 20000);
for (slice[0..100], 0..) |item, i| {
- try testing.expect(item.* == @intCast(i32, i));
+ try testing.expect(item.* == @as(i32, @intCast(i)));
allocator.destroy(item);
}
diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig
index 12a0bdcf30..3e92aa5eec 100644
--- a/lib/std/heap/PageAllocator.zig
+++ b/lib/std/heap/PageAllocator.zig
@@ -27,7 +27,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch return null;
- return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, addr));
+ return @ptrCast(addr);
}
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .Unordered);
@@ -40,7 +40,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
0,
) catch return null;
assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
- const new_hint = @alignCast(mem.page_size, slice.ptr + aligned_len);
+ const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
return slice.ptr;
}
@@ -66,7 +66,7 @@ fn resize(
// For shrinking that is not releasing, we will only
// decommit the pages not needed anymore.
w.VirtualFree(
- @ptrFromInt(*anyopaque, new_addr_end),
+ @as(*anyopaque, @ptrFromInt(new_addr_end)),
old_addr_end - new_addr_end,
w.MEM_DECOMMIT,
);
@@ -85,9 +85,9 @@ fn resize(
return true;
if (new_size_aligned < buf_aligned_len) {
- const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned);
+ const ptr = buf_unaligned.ptr + new_size_aligned;
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
- os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
+ os.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned]));
return true;
}
@@ -104,7 +104,6 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v
os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE);
} else {
const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size);
- const ptr = @alignCast(mem.page_size, slice.ptr);
- os.munmap(ptr[0..buf_aligned_len]);
+ os.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
}
}
diff --git a/lib/std/heap/ThreadSafeAllocator.zig b/lib/std/heap/ThreadSafeAllocator.zig
index fe10eb2fdb..12bb095b30 100644
--- a/lib/std/heap/ThreadSafeAllocator.zig
+++ b/lib/std/heap/ThreadSafeAllocator.zig
@@ -15,7 +15,7 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator {
}
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
- const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+ const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
self.mutex.lock();
defer self.mutex.unlock();
@@ -23,7 +23,7 @@ fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
}
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
- const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+ const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
self.mutex.lock();
defer self.mutex.unlock();
@@ -32,7 +32,7 @@ fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_ad
}
fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
- const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+ const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
self.mutex.lock();
defer self.mutex.unlock();
diff --git a/lib/std/heap/WasmAllocator.zig b/lib/std/heap/WasmAllocator.zig
index e3e436fd2b..60051b688a 100644
--- a/lib/std/heap/WasmAllocator.zig
+++ b/lib/std/heap/WasmAllocator.zig
@@ -47,7 +47,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
_ = ctx;
_ = return_address;
// Make room for the freelist next pointer.
- const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align);
+ const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
const actual_len = @max(len +| @sizeOf(usize), alignment);
const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
const class = math.log2(slot_size) - min_class;
@@ -55,7 +55,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
const addr = a: {
const top_free_ptr = frees[class];
if (top_free_ptr != 0) {
- const node = @ptrFromInt(*usize, top_free_ptr + (slot_size - @sizeOf(usize)));
+ const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize))));
frees[class] = node.*;
break :a top_free_ptr;
}
@@ -74,11 +74,11 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
break :a next_addr;
}
};
- return @ptrFromInt([*]u8, addr);
+ return @as([*]u8, @ptrFromInt(addr));
}
const bigpages_needed = bigPagesNeeded(actual_len);
const addr = allocBigPages(bigpages_needed);
- return @ptrFromInt([*]u8, addr);
+ return @as([*]u8, @ptrFromInt(addr));
}
fn resize(
@@ -92,7 +92,7 @@ fn resize(
_ = return_address;
// We don't want to move anything from one size class to another, but we
// can recover bytes in between powers of two.
- const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align);
+ const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
@@ -117,20 +117,20 @@ fn free(
) void {
_ = ctx;
_ = return_address;
- const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align);
+ const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
const class = math.log2(slot_size) - min_class;
const addr = @intFromPtr(buf.ptr);
if (class < size_class_count) {
- const node = @ptrFromInt(*usize, addr + (slot_size - @sizeOf(usize)));
+ const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize))));
node.* = frees[class];
frees[class] = addr;
} else {
const bigpages_needed = bigPagesNeeded(actual_len);
const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed);
const big_slot_size_bytes = pow2_pages * bigpage_size;
- const node = @ptrFromInt(*usize, addr + (big_slot_size_bytes - @sizeOf(usize)));
+ const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize))));
const big_class = math.log2(pow2_pages);
node.* = big_frees[big_class];
big_frees[big_class] = addr;
@@ -148,14 +148,14 @@ fn allocBigPages(n: usize) usize {
const top_free_ptr = big_frees[class];
if (top_free_ptr != 0) {
- const node = @ptrFromInt(*usize, top_free_ptr + (slot_size_bytes - @sizeOf(usize)));
+ const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize))));
big_frees[class] = node.*;
return top_free_ptr;
}
const page_index = @wasmMemoryGrow(0, pow2_pages * pages_per_bigpage);
if (page_index <= 0) return 0;
- const addr = @intCast(u32, page_index) * wasm.page_size;
+ const addr = @as(u32, @intCast(page_index)) * wasm.page_size;
return addr;
}
diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig
index c77164ee2d..8f484c52f6 100644
--- a/lib/std/heap/WasmPageAllocator.zig
+++ b/lib/std/heap/WasmPageAllocator.zig
@@ -40,7 +40,7 @@ const FreeBlock = struct {
fn getBit(self: FreeBlock, idx: usize) PageStatus {
const bit_offset = 0;
- return @enumFromInt(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset));
+ return @as(PageStatus, @enumFromInt(Io.get(mem.sliceAsBytes(self.data), idx, bit_offset)));
}
fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void {
@@ -63,7 +63,7 @@ const FreeBlock = struct {
fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize {
@setCold(true);
for (self.data, 0..) |segment, i| {
- const spills_into_next = @bitCast(i128, segment) < 0;
+ const spills_into_next = @as(i128, @bitCast(segment)) < 0;
const has_enough_bits = @popCount(segment) >= num_pages;
if (!spills_into_next and !has_enough_bits) continue;
@@ -109,7 +109,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
if (len > maxInt(usize) - (mem.page_size - 1)) return null;
const page_count = nPages(len);
const page_idx = allocPages(page_count, log2_align) catch return null;
- return @ptrFromInt([*]u8, page_idx * mem.page_size);
+ return @as([*]u8, @ptrFromInt(page_idx * mem.page_size));
}
fn allocPages(page_count: usize, log2_align: u8) !usize {
@@ -129,7 +129,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize {
const next_page_addr = next_page_idx * mem.page_size;
const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align);
const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
- const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count));
+ const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count)));
if (result <= 0)
return error.OutOfMemory;
assert(result == next_page_idx);
@@ -137,7 +137,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize {
if (drop_page_count > 0) {
freePages(next_page_idx, aligned_page_idx);
}
- return @intCast(usize, aligned_page_idx);
+ return @as(usize, @intCast(aligned_page_idx));
}
fn freePages(start: usize, end: usize) void {
@@ -151,7 +151,7 @@ fn freePages(start: usize, end: usize) void {
// TODO: would it be better if we use the first page instead?
new_end -= 1;
- extended.data = @ptrFromInt([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)];
+ extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)];
// Since this is the first page being freed and we consume it, assume *nothing* is free.
@memset(extended.data, PageStatus.none_free);
}
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index a8d6641d8d..d547987f63 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -48,7 +48,7 @@ pub const ArenaAllocator = struct {
// this has to occur before the free because the free frees node
const next_it = node.next;
const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
- const alloc_buf = @ptrCast([*]u8, node)[0..node.data];
+ const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
it = next_it;
}
@@ -128,7 +128,7 @@ pub const ArenaAllocator = struct {
const next_it = node.next;
if (next_it == null)
break node;
- const alloc_buf = @ptrCast([*]u8, node)[0..node.data];
+ const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
it = next_it;
} else null;
@@ -140,7 +140,7 @@ pub const ArenaAllocator = struct {
// perfect, no need to invoke the child_allocator
if (first_node.data == total_size)
return true;
- const first_alloc_buf = @ptrCast([*]u8, first_node)[0..first_node.data];
+ const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data];
if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) {
// successful resize
first_node.data = total_size;
@@ -151,7 +151,7 @@ pub const ArenaAllocator = struct {
return false;
};
self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress());
- const node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), new_ptr));
+ const node: *BufNode = @ptrCast(@alignCast(new_ptr));
node.* = .{ .data = total_size };
self.state.buffer_list.first = node;
}
@@ -166,7 +166,7 @@ pub const ArenaAllocator = struct {
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse
return null;
- const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), ptr));
+ const buf_node: *BufNode = @ptrCast(@alignCast(ptr));
buf_node.* = .{ .data = len };
self.state.buffer_list.prepend(buf_node);
self.state.end_index = 0;
@@ -174,16 +174,16 @@ pub const ArenaAllocator = struct {
}
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
- const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
+ const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
_ = ra;
- const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+ const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
var cur_node = if (self.state.buffer_list.first) |first_node|
first_node
else
(self.createNode(0, n + ptr_align) orelse return null);
while (true) {
- const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data];
+ const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data];
const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..];
const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index;
const adjusted_addr = mem.alignForward(usize, addr, ptr_align);
@@ -208,12 +208,12 @@ pub const ArenaAllocator = struct {
}
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
- const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
+ const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
_ = log2_buf_align;
_ = ret_addr;
const cur_node = self.state.buffer_list.first orelse return false;
- const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data];
+ const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) {
// It's not the most recent allocation, so it cannot be expanded,
// but it's fine if they want to make it smaller.
@@ -235,10 +235,10 @@ pub const ArenaAllocator = struct {
_ = log2_buf_align;
_ = ret_addr;
- const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
+ const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
const cur_node = self.state.buffer_list.first orelse return;
- const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data];
+ const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) {
self.state.end_index -= buf.len;
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 98375c850e..11f7d9dd27 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -250,7 +250,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
used_count: SlotIndex,
fn usedBits(bucket: *BucketHeader, index: usize) *u8 {
- return @ptrFromInt(*u8, @intFromPtr(bucket) + @sizeOf(BucketHeader) + index);
+ return @as(*u8, @ptrFromInt(@intFromPtr(bucket) + @sizeOf(BucketHeader) + index));
}
fn stackTracePtr(
@@ -259,10 +259,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
slot_index: SlotIndex,
trace_kind: TraceKind,
) *[stack_n]usize {
- const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class);
+ const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(size_class);
const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
@intFromEnum(trace_kind) * @as(usize, one_trace_size);
- return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr));
+ return @ptrCast(@alignCast(addr));
}
fn captureStackTrace(
@@ -338,9 +338,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (used_byte != 0) {
var bit_index: u3 = 0;
while (true) : (bit_index += 1) {
- const is_used = @truncate(u1, used_byte >> bit_index) != 0;
+ const is_used = @as(u1, @truncate(used_byte >> bit_index)) != 0;
if (is_used) {
- const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
+ const slot_index = @as(SlotIndex, @intCast(used_bits_byte * 8 + bit_index));
const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
const addr = bucket.page + slot_index * size_class;
log.err("memory address 0x{x} leaked: {}", .{
@@ -361,7 +361,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var leaks = false;
for (self.buckets, 0..) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
- const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i);
+ const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i));
const used_bits_count = usedBitsCount(size_class);
var bucket = first_bucket;
while (true) {
@@ -385,7 +385,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn freeBucket(self: *Self, bucket: *BucketHeader, size_class: usize) void {
const bucket_size = bucketSize(size_class);
- const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
+ const bucket_slice = @as([*]align(@alignOf(BucketHeader)) u8, @ptrCast(bucket))[0..bucket_size];
self.backing_allocator.free(bucket_slice);
}
@@ -444,7 +444,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
self.small_allocations.deinit(self.backing_allocator);
}
self.* = undefined;
- return @enumFromInt(Check, @intFromBool(leaks));
+ return @as(Check, @enumFromInt(@intFromBool(leaks)));
}
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
@@ -496,7 +496,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
bucket.alloc_cursor += 1;
var used_bits_byte = bucket.usedBits(slot_index / 8);
- const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary
+ const used_bit_index: u3 = @as(u3, @intCast(slot_index % 8)); // TODO cast should be unnecessary
used_bits_byte.* |= (@as(u8, 1) << used_bit_index);
bucket.used_count += 1;
bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc);
@@ -667,8 +667,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
new_size: usize,
ret_addr: usize,
) bool {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
- const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8);
+ const self: *Self = @ptrCast(@alignCast(ctx));
+ const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
self.mutex.lock();
defer self.mutex.unlock();
@@ -704,11 +704,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
};
const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
- const slot_index = @intCast(SlotIndex, byte_offset / size_class);
+ const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class));
const used_byte_index = slot_index / 8;
- const used_bit_index = @intCast(u3, slot_index % 8);
+ const used_bit_index = @as(u3, @intCast(slot_index % 8));
const used_byte = bucket.usedBits(used_byte_index);
- const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
+ const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
if (!is_used) {
if (config.safety) {
reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
@@ -739,8 +739,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
if (log2_old_align != entry.value_ptr.log2_ptr_align) {
log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{
- @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align),
- @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align),
+ @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)),
+ @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
bucketStackTrace(bucket, size_class, slot_index, .alloc),
free_stack_trace,
});
@@ -786,8 +786,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
log2_old_align_u8: u8,
ret_addr: usize,
) void {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
- const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8);
+ const self: *Self = @ptrCast(@alignCast(ctx));
+ const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
self.mutex.lock();
defer self.mutex.unlock();
@@ -825,11 +825,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return;
};
const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
- const slot_index = @intCast(SlotIndex, byte_offset / size_class);
+ const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class));
const used_byte_index = slot_index / 8;
- const used_bit_index = @intCast(u3, slot_index % 8);
+ const used_bit_index = @as(u3, @intCast(slot_index % 8));
const used_byte = bucket.usedBits(used_byte_index);
- const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
+ const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
if (!is_used) {
if (config.safety) {
reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
@@ -861,8 +861,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
if (log2_old_align != entry.value_ptr.log2_ptr_align) {
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
- @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align),
- @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align),
+ @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)),
+ @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
bucketStackTrace(bucket, size_class, slot_index, .alloc),
free_stack_trace,
});
@@ -896,7 +896,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
} else {
// move alloc_cursor to end so we can tell size_class later
const slot_count = @divExact(page_size, size_class);
- bucket.alloc_cursor = @truncate(SlotIndex, slot_count);
+ bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count));
if (self.empty_buckets) |prev_bucket| {
// empty_buckets is ordered newest to oldest through prev so that if
// config.never_unmap is false and backing_allocator reuses freed memory
@@ -936,11 +936,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
self.mutex.lock();
defer self.mutex.unlock();
if (!self.isAllocationAllowed(len)) return null;
- return allocInner(self, len, @intCast(Allocator.Log2Align, log2_ptr_align), ret_addr) catch return null;
+ return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null;
}
fn allocInner(
@@ -949,7 +949,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
log2_ptr_align: Allocator.Log2Align,
ret_addr: usize,
) Allocator.Error![*]u8 {
- const new_aligned_size = @max(len, @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align));
+ const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)));
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
@@ -1002,7 +1002,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const bucket_size = bucketSize(size_class);
const bucket_bytes = try self.backing_allocator.alignedAlloc(u8, @alignOf(BucketHeader), bucket_size);
- const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr);
+ const ptr = @as(*BucketHeader, @ptrCast(bucket_bytes.ptr));
ptr.* = BucketHeader{
.prev = ptr,
.next = ptr,
diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig
index b2d83c416b..b5c86c9beb 100644
--- a/lib/std/heap/log_to_writer_allocator.zig
+++ b/lib/std/heap/log_to_writer_allocator.zig
@@ -34,7 +34,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
log2_ptr_align: u8,
ra: usize,
) ?[*]u8 {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
self.writer.print("alloc : {}", .{len}) catch {};
const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
if (result != null) {
@@ -52,7 +52,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
new_len: usize,
ra: usize,
) bool {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
if (new_len <= buf.len) {
self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
} else {
@@ -77,7 +77,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
log2_buf_align: u8,
ra: usize,
) void {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
self.writer.print("free : {}\n", .{buf.len}) catch {};
self.parent_allocator.rawFree(buf, log2_buf_align, ra);
}
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 0d32b5405e..6924a284e3 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -59,7 +59,7 @@ pub fn ScopedLoggingAllocator(
log2_ptr_align: u8,
ra: usize,
) ?[*]u8 {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
if (result != null) {
logHelper(
@@ -84,7 +84,7 @@ pub fn ScopedLoggingAllocator(
new_len: usize,
ra: usize,
) bool {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
if (new_len <= buf.len) {
logHelper(
@@ -118,7 +118,7 @@ pub fn ScopedLoggingAllocator(
log2_buf_align: u8,
ra: usize,
) void {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
self.parent_allocator.rawFree(buf, log2_buf_align, ra);
logHelper(success_log_level, "free - len: {}", .{buf.len});
}
diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig
index 3fc7dfbfca..b56a15d006 100644
--- a/lib/std/heap/memory_pool.zig
+++ b/lib/std/heap/memory_pool.zig
@@ -70,7 +70,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
var i: usize = 0;
while (i < initial_size) : (i += 1) {
const raw_mem = try pool.allocNew();
- const free_node = @ptrCast(NodePtr, raw_mem);
+ const free_node = @as(NodePtr, @ptrCast(raw_mem));
free_node.* = Node{
.next = pool.free_list,
};
@@ -106,11 +106,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
pool.free_list = item.next;
break :blk item;
} else if (pool_options.growable)
- @ptrCast(NodePtr, try pool.allocNew())
+ @as(NodePtr, @ptrCast(try pool.allocNew()))
else
return error.OutOfMemory;
- const ptr = @ptrCast(ItemPtr, node);
+ const ptr = @as(ItemPtr, @ptrCast(node));
ptr.* = undefined;
return ptr;
}
@@ -120,7 +120,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
pub fn destroy(pool: *Pool, ptr: ItemPtr) void {
ptr.* = undefined;
- const node = @ptrCast(NodePtr, ptr);
+ const node = @as(NodePtr, @ptrCast(ptr));
node.* = Node{
.next = pool.free_list,
};
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 942ff4904d..db576e72b2 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -187,7 +187,7 @@ pub const Connection = struct {
const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1);
if (nread == 0) return error.EndOfStream;
conn.read_start = 0;
- conn.read_end = @intCast(u16, nread);
+ conn.read_end = @as(u16, @intCast(nread));
}
pub fn peek(conn: *Connection) []const u8 {
@@ -208,8 +208,8 @@ pub const Connection = struct {
if (available_read > available_buffer) { // partially read buffered data
@memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]);
- out_index += @intCast(u16, available_buffer);
- conn.read_start += @intCast(u16, available_buffer);
+ out_index += @as(u16, @intCast(available_buffer));
+ conn.read_start += @as(u16, @intCast(available_buffer));
break;
} else if (available_read > 0) { // fully read buffered data
@@ -343,7 +343,7 @@ pub const Response = struct {
else => return error.HttpHeadersInvalid,
};
if (first_line[8] != ' ') return error.HttpHeadersInvalid;
- const status = @enumFromInt(http.Status, parseInt3(first_line[9..12].*));
+ const status = @as(http.Status, @enumFromInt(parseInt3(first_line[9..12].*)));
const reason = mem.trimLeft(u8, first_line[12..], " ");
res.version = version;
@@ -415,7 +415,7 @@ pub const Response = struct {
}
inline fn int64(array: *const [8]u8) u64 {
- return @bitCast(u64, array.*);
+ return @as(u64, @bitCast(array.*));
}
fn parseInt3(nnn: @Vector(3, u8)) u10 {
@@ -649,7 +649,7 @@ pub const Request = struct {
try req.connection.?.data.fill();
const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek());
- req.connection.?.data.drop(@intCast(u16, nchecked));
+ req.connection.?.data.drop(@as(u16, @intCast(nchecked)));
if (req.response.parser.state.isContent()) break;
}
@@ -768,7 +768,7 @@ pub const Request = struct {
try req.connection.?.data.fill();
const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek());
- req.connection.?.data.drop(@intCast(u16, nchecked));
+ req.connection.?.data.drop(@as(u16, @intCast(nchecked)));
}
if (has_trail) {
diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig
index fe57b5735d..8c8661ee21 100644
--- a/lib/std/http/Server.zig
+++ b/lib/std/http/Server.zig
@@ -46,7 +46,7 @@ pub const Connection = struct {
const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1);
if (nread == 0) return error.EndOfStream;
conn.read_start = 0;
- conn.read_end = @intCast(u16, nread);
+ conn.read_end = @as(u16, @intCast(nread));
}
pub fn peek(conn: *Connection) []const u8 {
@@ -67,8 +67,8 @@ pub const Connection = struct {
if (available_read > available_buffer) { // partially read buffered data
@memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]);
- out_index += @intCast(u16, available_buffer);
- conn.read_start += @intCast(u16, available_buffer);
+ out_index += @as(u16, @intCast(available_buffer));
+ conn.read_start += @as(u16, @intCast(available_buffer));
break;
} else if (available_read > 0) { // fully read buffered data
@@ -268,7 +268,7 @@ pub const Request = struct {
}
inline fn int64(array: *const [8]u8) u64 {
- return @bitCast(u64, array.*);
+ return @as(u64, @bitCast(array.*));
}
method: http.Method,
@@ -493,7 +493,7 @@ pub const Response = struct {
try res.connection.fill();
const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek());
- res.connection.drop(@intCast(u16, nchecked));
+ res.connection.drop(@as(u16, @intCast(nchecked)));
if (res.request.parser.state.isContent()) break;
}
@@ -560,7 +560,7 @@ pub const Response = struct {
try res.connection.fill();
const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek());
- res.connection.drop(@intCast(u16, nchecked));
+ res.connection.drop(@as(u16, @intCast(nchecked)));
}
if (has_trail) {
diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig
index 6bafb08483..604267bf16 100644
--- a/lib/std/http/protocol.zig
+++ b/lib/std/http/protocol.zig
@@ -83,7 +83,7 @@ pub const HeadersParser = struct {
/// first byte of content is located at `bytes[result]`.
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8);
- const len = @intCast(u32, bytes.len);
+ const len = @as(u32, @intCast(bytes.len));
var index: u32 = 0;
while (true) {
@@ -182,8 +182,8 @@ pub const HeadersParser = struct {
const chunk = bytes[index..][0..vector_len];
const v: Vector = chunk.*;
- const matches_r = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\r')));
- const matches_n = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\n')));
+ const matches_r = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\r'))));
+ const matches_n = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\n'))));
const matches_or: SizeVector = matches_r | matches_n;
const matches = @reduce(.Add, matches_or);
@@ -234,7 +234,7 @@ pub const HeadersParser = struct {
},
4...vector_len => {
inline for (0..vector_len - 3) |i_usize| {
- const i = @truncate(u32, i_usize);
+ const i = @as(u32, @truncate(i_usize));
const b32 = int32(chunk[i..][0..4]);
const b16 = intShift(u16, b32);
@@ -405,10 +405,10 @@ pub const HeadersParser = struct {
/// If the amount returned is less than `bytes.len`, you may assume that the parser is in the `chunk_data` state
/// and that the first byte of the chunk is at `bytes[result]`.
pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
- const len = @intCast(u32, bytes.len);
+ const len = @as(u32, @intCast(bytes.len));
for (bytes[0..], 0..) |c, i| {
- const index = @intCast(u32, i);
+ const index = @as(u32, @intCast(i));
switch (r.state) {
.chunk_data_suffix => switch (c) {
'\r' => r.state = .chunk_data_suffix_r,
@@ -529,7 +529,7 @@ pub const HeadersParser = struct {
try conn.fill();
const nread = @min(conn.peek().len, data_avail);
- conn.drop(@intCast(u16, nread));
+ conn.drop(@as(u16, @intCast(nread)));
r.next_chunk_length -= nread;
if (r.next_chunk_length == 0) r.done = true;
@@ -538,7 +538,7 @@ pub const HeadersParser = struct {
} else {
const out_avail = buffer.len;
- const can_read = @intCast(usize, @min(data_avail, out_avail));
+ const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
const nread = try conn.read(buffer[0..can_read]);
r.next_chunk_length -= nread;
@@ -551,7 +551,7 @@ pub const HeadersParser = struct {
try conn.fill();
const i = r.findChunkedLen(conn.peek());
- conn.drop(@intCast(u16, i));
+ conn.drop(@as(u16, @intCast(i)));
switch (r.state) {
.invalid => return error.HttpChunkInvalid,
@@ -579,10 +579,10 @@ pub const HeadersParser = struct {
try conn.fill();
const nread = @min(conn.peek().len, data_avail);
- conn.drop(@intCast(u16, nread));
+ conn.drop(@as(u16, @intCast(nread)));
r.next_chunk_length -= nread;
} else if (out_avail > 0) {
- const can_read = @intCast(usize, @min(data_avail, out_avail));
+ const can_read: usize = @intCast(@min(data_avail, out_avail));
const nread = try conn.read(buffer[out_index..][0..can_read]);
r.next_chunk_length -= nread;
out_index += nread;
@@ -601,21 +601,21 @@ pub const HeadersParser = struct {
};
inline fn int16(array: *const [2]u8) u16 {
- return @bitCast(u16, array.*);
+ return @as(u16, @bitCast(array.*));
}
inline fn int24(array: *const [3]u8) u24 {
- return @bitCast(u24, array.*);
+ return @as(u24, @bitCast(array.*));
}
inline fn int32(array: *const [4]u8) u32 {
- return @bitCast(u32, array.*);
+ return @as(u32, @bitCast(array.*));
}
inline fn intShift(comptime T: type, x: anytype) T {
switch (@import("builtin").cpu.arch.endian()) {
- .Little => return @truncate(T, x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T))),
- .Big => return @truncate(T, x),
+ .Little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))),
+ .Big => return @as(T, @truncate(x)),
}
}
@@ -634,7 +634,7 @@ const MockBufferedConnection = struct {
const nread = try conn.conn.read(conn.buf[0..]);
if (nread == 0) return error.EndOfStream;
conn.start = 0;
- conn.end = @truncate(u16, nread);
+ conn.end = @as(u16, @truncate(nread));
}
pub fn peek(conn: *MockBufferedConnection) []const u8 {
@@ -652,7 +652,7 @@ const MockBufferedConnection = struct {
const left = buffer.len - out_index;
if (available > 0) {
- const can_read = @truncate(u16, @min(available, left));
+ const can_read = @as(u16, @truncate(@min(available, left)));
@memcpy(buffer[out_index..][0..can_read], conn.buf[conn.start..][0..can_read]);
out_index += can_read;
@@ -705,8 +705,8 @@ test "HeadersParser.findHeadersEnd" {
for (0..36) |i| {
r = HeadersParser.initDynamic(0);
- try std.testing.expectEqual(@intCast(u32, i), r.findHeadersEnd(data[0..i]));
- try std.testing.expectEqual(@intCast(u32, 35 - i), r.findHeadersEnd(data[i..]));
+ try std.testing.expectEqual(@as(u32, @intCast(i)), r.findHeadersEnd(data[0..i]));
+ try std.testing.expectEqual(@as(u32, @intCast(35 - i)), r.findHeadersEnd(data[i..]));
}
}
@@ -761,7 +761,7 @@ test "HeadersParser.read length" {
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
- conn.drop(@intCast(u16, nchecked));
+ conn.drop(@as(u16, @intCast(nchecked)));
if (r.state.isContent()) break;
}
@@ -792,7 +792,7 @@ test "HeadersParser.read chunked" {
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
- conn.drop(@intCast(u16, nchecked));
+ conn.drop(@as(u16, @intCast(nchecked)));
if (r.state.isContent()) break;
}
@@ -822,7 +822,7 @@ test "HeadersParser.read chunked trailer" {
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
- conn.drop(@intCast(u16, nchecked));
+ conn.drop(@as(u16, @intCast(nchecked)));
if (r.state.isContent()) break;
}
@@ -837,7 +837,7 @@ test "HeadersParser.read chunked trailer" {
try conn.fill();
const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
- conn.drop(@intCast(u16, nchecked));
+ conn.drop(@as(u16, @intCast(nchecked)));
if (r.state.isContent()) break;
}
diff --git a/lib/std/io.zig b/lib/std/io.zig
index f2804a3107..e7a4476c0f 100644
--- a/lib/std/io.zig
+++ b/lib/std/io.zig
@@ -275,7 +275,7 @@ pub fn Poller(comptime StreamEnum: type) type {
)) {
.pending => {
self.windows.active.handles_buf[self.windows.active.count] = handle;
- self.windows.active.stream_map[self.windows.active.count] = @enumFromInt(StreamEnum, i);
+ self.windows.active.stream_map[self.windows.active.count] = @as(StreamEnum, @enumFromInt(i));
self.windows.active.count += 1;
},
.closed => {}, // don't add to the wait_objects list
diff --git a/lib/std/io/bit_reader.zig b/lib/std/io/bit_reader.zig
index 4bdb0b9194..7ea2ff5009 100644
--- a/lib/std/io/bit_reader.zig
+++ b/lib/std/io/bit_reader.zig
@@ -60,7 +60,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type)
var out_buffer = @as(Buf, 0);
if (self.bit_count > 0) {
- const n = if (self.bit_count >= bits) @intCast(u3, bits) else self.bit_count;
+ const n = if (self.bit_count >= bits) @as(u3, @intCast(bits)) else self.bit_count;
const shift = u7_bit_count - n;
switch (endian) {
.Big => {
@@ -88,45 +88,45 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type)
while (out_bits.* < bits) {
const n = bits - out_bits.*;
const next_byte = self.forward_reader.readByte() catch |err| switch (err) {
- error.EndOfStream => return @intCast(U, out_buffer),
+ error.EndOfStream => return @as(U, @intCast(out_buffer)),
else => |e| return e,
};
switch (endian) {
.Big => {
if (n >= u8_bit_count) {
- out_buffer <<= @intCast(u3, u8_bit_count - 1);
+ out_buffer <<= @as(u3, @intCast(u8_bit_count - 1));
out_buffer <<= 1;
out_buffer |= @as(Buf, next_byte);
out_bits.* += u8_bit_count;
continue;
}
- const shift = @intCast(u3, u8_bit_count - n);
- out_buffer <<= @intCast(BufShift, n);
+ const shift = @as(u3, @intCast(u8_bit_count - n));
+ out_buffer <<= @as(BufShift, @intCast(n));
out_buffer |= @as(Buf, next_byte >> shift);
out_bits.* += n;
- self.bit_buffer = @truncate(u7, next_byte << @intCast(u3, n - 1));
+ self.bit_buffer = @as(u7, @truncate(next_byte << @as(u3, @intCast(n - 1))));
self.bit_count = shift;
},
.Little => {
if (n >= u8_bit_count) {
- out_buffer |= @as(Buf, next_byte) << @intCast(BufShift, out_bits.*);
+ out_buffer |= @as(Buf, next_byte) << @as(BufShift, @intCast(out_bits.*));
out_bits.* += u8_bit_count;
continue;
}
- const shift = @intCast(u3, u8_bit_count - n);
+ const shift = @as(u3, @intCast(u8_bit_count - n));
const value = (next_byte << shift) >> shift;
- out_buffer |= @as(Buf, value) << @intCast(BufShift, out_bits.*);
+ out_buffer |= @as(Buf, value) << @as(BufShift, @intCast(out_bits.*));
out_bits.* += n;
- self.bit_buffer = @truncate(u7, next_byte >> @intCast(u3, n));
+ self.bit_buffer = @as(u7, @truncate(next_byte >> @as(u3, @intCast(n))));
self.bit_count = shift;
},
}
}
- return @intCast(U, out_buffer);
+ return @as(U, @intCast(out_buffer));
}
pub fn alignToByte(self: *Self) void {
diff --git a/lib/std/io/bit_writer.zig b/lib/std/io/bit_writer.zig
index 0be2e7ab08..ef8f007264 100644
--- a/lib/std/io/bit_writer.zig
+++ b/lib/std/io/bit_writer.zig
@@ -47,27 +47,27 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
const Buf = std.meta.Int(.unsigned, buf_bit_count);
const BufShift = math.Log2Int(Buf);
- const buf_value = @intCast(Buf, value);
+ const buf_value = @as(Buf, @intCast(value));
- const high_byte_shift = @intCast(BufShift, buf_bit_count - u8_bit_count);
+ const high_byte_shift = @as(BufShift, @intCast(buf_bit_count - u8_bit_count));
var in_buffer = switch (endian) {
- .Big => buf_value << @intCast(BufShift, buf_bit_count - bits),
+ .Big => buf_value << @as(BufShift, @intCast(buf_bit_count - bits)),
.Little => buf_value,
};
var in_bits = bits;
if (self.bit_count > 0) {
const bits_remaining = u8_bit_count - self.bit_count;
- const n = @intCast(u3, if (bits_remaining > bits) bits else bits_remaining);
+ const n = @as(u3, @intCast(if (bits_remaining > bits) bits else bits_remaining));
switch (endian) {
.Big => {
- const shift = @intCast(BufShift, high_byte_shift + self.bit_count);
- const v = @intCast(u8, in_buffer >> shift);
+ const shift = @as(BufShift, @intCast(high_byte_shift + self.bit_count));
+ const v = @as(u8, @intCast(in_buffer >> shift));
self.bit_buffer |= v;
in_buffer <<= n;
},
.Little => {
- const v = @truncate(u8, in_buffer) << @intCast(u3, self.bit_count);
+ const v = @as(u8, @truncate(in_buffer)) << @as(u3, @intCast(self.bit_count));
self.bit_buffer |= v;
in_buffer >>= n;
},
@@ -87,15 +87,15 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
while (in_bits >= u8_bit_count) {
switch (endian) {
.Big => {
- const v = @intCast(u8, in_buffer >> high_byte_shift);
+ const v = @as(u8, @intCast(in_buffer >> high_byte_shift));
try self.forward_writer.writeByte(v);
- in_buffer <<= @intCast(u3, u8_bit_count - 1);
+ in_buffer <<= @as(u3, @intCast(u8_bit_count - 1));
in_buffer <<= 1;
},
.Little => {
- const v = @truncate(u8, in_buffer);
+ const v = @as(u8, @truncate(in_buffer));
try self.forward_writer.writeByte(v);
- in_buffer >>= @intCast(u3, u8_bit_count - 1);
+ in_buffer >>= @as(u3, @intCast(u8_bit_count - 1));
in_buffer >>= 1;
},
}
@@ -103,10 +103,10 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
}
if (in_bits > 0) {
- self.bit_count = @intCast(u4, in_bits);
+ self.bit_count = @as(u4, @intCast(in_bits));
self.bit_buffer = switch (endian) {
- .Big => @truncate(u8, in_buffer >> high_byte_shift),
- .Little => @truncate(u8, in_buffer),
+ .Big => @as(u8, @truncate(in_buffer >> high_byte_shift)),
+ .Little => @as(u8, @truncate(in_buffer)),
};
}
}
diff --git a/lib/std/io/c_writer.zig b/lib/std/io/c_writer.zig
index 62c73d3714..ee87a28dc6 100644
--- a/lib/std/io/c_writer.zig
+++ b/lib/std/io/c_writer.zig
@@ -13,7 +13,7 @@ pub fn cWriter(c_file: *std.c.FILE) CWriter {
fn cWriterWrite(c_file: *std.c.FILE, bytes: []const u8) std.fs.File.WriteError!usize {
const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, c_file);
if (amt_written >= 0) return amt_written;
- switch (@enumFromInt(os.E, std.c._errno().*)) {
+ switch (@as(os.E, @enumFromInt(std.c._errno().*))) {
.SUCCESS => unreachable,
.INVAL => unreachable,
.FAULT => unreachable,
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index abdca56d3c..4dde51838b 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -246,7 +246,7 @@ pub fn Reader(
/// Same as `readByte` except the returned byte is signed.
pub fn readByteSigned(self: Self) (Error || error{EndOfStream})!i8 {
- return @bitCast(i8, try self.readByte());
+ return @as(i8, @bitCast(try self.readByte()));
}
/// Reads exactly `num_bytes` bytes and returns as an array.
diff --git a/lib/std/json/scanner.zig b/lib/std/json/scanner.zig
index 4fb7c1da01..274faba2ff 100644
--- a/lib/std/json/scanner.zig
+++ b/lib/std/json/scanner.zig
@@ -193,7 +193,7 @@ pub const TokenType = enum {
/// to get meaningful information from this.
pub const Diagnostics = struct {
line_number: u64 = 1,
- line_start_cursor: usize = @bitCast(usize, @as(isize, -1)), // Start just "before" the input buffer to get a 1-based column for line 1.
+ line_start_cursor: usize = @as(usize, @bitCast(@as(isize, -1))), // Start just "before" the input buffer to get a 1-based column for line 1.
total_bytes_before_current_input: u64 = 0,
cursor_pointer: *const usize = undefined,
@@ -1719,7 +1719,7 @@ const BitStack = struct {
pub fn push(self: *@This(), b: u1) Allocator.Error!void {
const byte_index = self.bit_len >> 3;
- const bit_index = @intCast(u3, self.bit_len & 7);
+ const bit_index = @as(u3, @intCast(self.bit_len & 7));
if (self.bytes.items.len <= byte_index) {
try self.bytes.append(0);
@@ -1733,8 +1733,8 @@ const BitStack = struct {
pub fn peek(self: *const @This()) u1 {
const byte_index = (self.bit_len - 1) >> 3;
- const bit_index = @intCast(u3, (self.bit_len - 1) & 7);
- return @intCast(u1, (self.bytes.items[byte_index] >> bit_index) & 1);
+ const bit_index = @as(u3, @intCast((self.bit_len - 1) & 7));
+ return @as(u1, @intCast((self.bytes.items[byte_index] >> bit_index) & 1));
}
pub fn pop(self: *@This()) u1 {
diff --git a/lib/std/json/static.zig b/lib/std/json/static.zig
index fd3d12d73a..f1926660f3 100644
--- a/lib/std/json/static.zig
+++ b/lib/std/json/static.zig
@@ -442,7 +442,7 @@ fn internalParse(
}
if (ptrInfo.sentinel) |some| {
- const sentinel_value = @ptrCast(*align(1) const ptrInfo.child, some).*;
+ const sentinel_value = @as(*align(1) const ptrInfo.child, @ptrCast(some)).*;
return try arraylist.toOwnedSliceSentinel(sentinel_value);
}
@@ -456,7 +456,7 @@ fn internalParse(
// Use our own array list so we can append the sentinel.
var value_list = ArrayList(u8).init(allocator);
_ = try source.allocNextIntoArrayList(&value_list, .alloc_always);
- return try value_list.toOwnedSliceSentinel(@ptrCast(*const u8, sentinel_ptr).*);
+ return try value_list.toOwnedSliceSentinel(@as(*const u8, @ptrCast(sentinel_ptr)).*);
}
if (ptrInfo.is_const) {
switch (try source.nextAllocMax(allocator, .alloc_if_needed, options.max_value_len.?)) {
@@ -518,8 +518,8 @@ fn internalParseFromValue(
},
.Float, .ComptimeFloat => {
switch (source) {
- .float => |f| return @floatCast(T, f),
- .integer => |i| return @floatFromInt(T, i),
+ .float => |f| return @as(T, @floatCast(f)),
+ .integer => |i| return @as(T, @floatFromInt(i)),
.number_string, .string => |s| return std.fmt.parseFloat(T, s),
else => return error.UnexpectedToken,
}
@@ -530,12 +530,12 @@ fn internalParseFromValue(
if (@round(f) != f) return error.InvalidNumber;
if (f > std.math.maxInt(T)) return error.Overflow;
if (f < std.math.minInt(T)) return error.Overflow;
- return @intFromFloat(T, f);
+ return @as(T, @intFromFloat(f));
},
.integer => |i| {
if (i > std.math.maxInt(T)) return error.Overflow;
if (i < std.math.minInt(T)) return error.Overflow;
- return @intCast(T, i);
+ return @as(T, @intCast(i));
},
.number_string, .string => |s| {
return sliceToInt(T, s);
@@ -686,7 +686,7 @@ fn internalParseFromValue(
switch (source) {
.array => |array| {
const r = if (ptrInfo.sentinel) |sentinel_ptr|
- try allocator.allocSentinel(ptrInfo.child, array.items.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*)
+ try allocator.allocSentinel(ptrInfo.child, array.items.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*)
else
try allocator.alloc(ptrInfo.child, array.items.len);
@@ -701,7 +701,7 @@ fn internalParseFromValue(
// Dynamic length string.
const r = if (ptrInfo.sentinel) |sentinel_ptr|
- try allocator.allocSentinel(ptrInfo.child, s.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*)
+ try allocator.allocSentinel(ptrInfo.child, s.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*)
else
try allocator.alloc(ptrInfo.child, s.len);
@memcpy(r[0..], s);
@@ -743,7 +743,7 @@ fn sliceToInt(comptime T: type, slice: []const u8) !T {
const float = try std.fmt.parseFloat(f128, slice);
if (@round(float) != float) return error.InvalidNumber;
if (float > std.math.maxInt(T) or float < std.math.minInt(T)) return error.Overflow;
- return @intCast(T, @intFromFloat(i128, float));
+ return @as(T, @intCast(@as(i128, @intFromFloat(float))));
}
fn sliceToEnum(comptime T: type, slice: []const u8) !T {
@@ -759,7 +759,7 @@ fn fillDefaultStructValues(comptime T: type, r: *T, fields_seen: *[@typeInfo(T).
inline for (@typeInfo(T).Struct.fields, 0..) |field, i| {
if (!fields_seen[i]) {
if (field.default_value) |default_ptr| {
- const default = @ptrCast(*align(1) const field.type, default_ptr).*;
+ const default = @as(*align(1) const field.type, @ptrCast(default_ptr)).*;
@field(r, field.name) = default;
} else {
return error.MissingField;
diff --git a/lib/std/json/stringify.zig b/lib/std/json/stringify.zig
index 6d10e95330..5de5db54b9 100644
--- a/lib/std/json/stringify.zig
+++ b/lib/std/json/stringify.zig
@@ -78,8 +78,8 @@ fn outputUnicodeEscape(
assert(codepoint <= 0x10FFFF);
// To escape an extended character that is not in the Basic Multilingual Plane,
// the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair.
- const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
- const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
+ const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800;
+ const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00;
try out_stream.writeAll("\\u");
try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream);
try out_stream.writeAll("\\u");
diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig
index 760bad13fd..3a2750f5a1 100644
--- a/lib/std/json/write_stream.zig
+++ b/lib/std/json/write_stream.zig
@@ -176,8 +176,8 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
.ComptimeInt => {
return self.emitNumber(@as(std.math.IntFittingRange(value, value), value));
},
- .Float, .ComptimeFloat => if (@floatCast(f64, value) == value) {
- try self.stream.print("{}", .{@floatCast(f64, value)});
+ .Float, .ComptimeFloat => if (@as(f64, @floatCast(value)) == value) {
+ try self.stream.print("{}", .{@as(f64, @floatCast(value))});
self.popState();
return;
},
@@ -294,7 +294,7 @@ test "json write stream" {
fn getJsonObject(allocator: std.mem.Allocator) !Value {
var value = Value{ .object = ObjectMap.init(allocator) };
- try value.object.put("one", Value{ .integer = @intCast(i64, 1) });
+ try value.object.put("one", Value{ .integer = @as(i64, @intCast(1)) });
try value.object.put("two", Value{ .float = 2.0 });
return value;
}
diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig
index 859d753a6a..33555caec5 100644
--- a/lib/std/leb128.zig
+++ b/lib/std/leb128.zig
@@ -30,17 +30,17 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T {
if (value > std.math.maxInt(T)) return error.Overflow;
}
- return @truncate(T, value);
+ return @as(T, @truncate(value));
}
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
const T = @TypeOf(uint_value);
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
- var value = @intCast(U, uint_value);
+ var value = @as(U, @intCast(uint_value));
while (true) {
- const byte = @truncate(u8, value & 0x7f);
+ const byte = @as(u8, @truncate(value & 0x7f));
value >>= 7;
if (value == 0) {
try writer.writeByte(byte);
@@ -71,18 +71,18 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
if (ov[1] != 0) {
// Overflow is ok so long as the sign bit is set and this is the last byte
if (byte & 0x80 != 0) return error.Overflow;
- if (@bitCast(S, ov[0]) >= 0) return error.Overflow;
+ if (@as(S, @bitCast(ov[0])) >= 0) return error.Overflow;
// and all the overflowed bits are 1
- const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
- const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
+ const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift)));
+ const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
} else {
// If we don't overflow and this is the last byte and the number being decoded
// is negative, check that the remaining bits are 1
- if ((byte & 0x80 == 0) and (@bitCast(S, ov[0]) < 0)) {
- const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
- const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
+ if ((byte & 0x80 == 0) and (@as(S, @bitCast(ov[0])) < 0)) {
+ const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift)));
+ const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
}
}
@@ -92,7 +92,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
const needs_sign_ext = group + 1 < max_group;
if (byte & 0x40 != 0 and needs_sign_ext) {
const ones = @as(S, -1);
- value |= @bitCast(U, ones) << (shift + 7);
+ value |= @as(U, @bitCast(ones)) << (shift + 7);
}
break;
}
@@ -100,13 +100,13 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
return error.Overflow;
}
- const result = @bitCast(S, value);
+ const result = @as(S, @bitCast(value));
// Only applies if we extended to i8
if (S != T) {
if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow;
}
- return @truncate(T, result);
+ return @as(T, @truncate(result));
}
/// Write a single signed integer as signed LEB128 to the given writer.
@@ -115,11 +115,11 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits);
- var value = @intCast(S, int_value);
+ var value = @as(S, @intCast(int_value));
while (true) {
- const uvalue = @bitCast(U, value);
- const byte = @truncate(u8, uvalue);
+ const uvalue = @as(U, @bitCast(value));
+ const byte = @as(u8, @truncate(uvalue));
value >>= 6;
if (value == -1 or value == 0) {
try writer.writeByte(byte & 0x7F);
@@ -141,15 +141,15 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.unsigned, l * 7)) void {
const T = @TypeOf(int);
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
- var value = @intCast(U, int);
+ var value = @as(U, @intCast(int));
comptime var i = 0;
inline while (i < (l - 1)) : (i += 1) {
- const byte = @truncate(u8, value) | 0b1000_0000;
+ const byte = @as(u8, @truncate(value)) | 0b1000_0000;
value >>= 7;
ptr[i] = byte;
}
- ptr[i] = @truncate(u8, value);
+ ptr[i] = @as(u8, @truncate(value));
}
test "writeUnsignedFixed" {
@@ -245,7 +245,7 @@ test "deserialize signed LEB128" {
try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1);
try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000);
- try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @bitCast(i64, @intCast(u64, 0x8000000000000000)));
+ try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))));
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000);
try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000);
@@ -356,7 +356,7 @@ test "serialize unsigned LEB128" {
const max = std.math.maxInt(T);
var i = @as(std.meta.Int(.unsigned, @typeInfo(T).Int.bits + 1), min);
- while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
+ while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
}
}
@@ -374,6 +374,6 @@ test "serialize signed LEB128" {
const max = std.math.maxInt(T);
var i = @as(std.meta.Int(.signed, @typeInfo(T).Int.bits + 1), min);
- while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
+ while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
}
}
diff --git a/lib/std/macho.zig b/lib/std/macho.zig
index 8bddd67023..1b886e2d90 100644
--- a/lib/std/macho.zig
+++ b/lib/std/macho.zig
@@ -787,7 +787,7 @@ pub const section_64 = extern struct {
}
pub fn @"type"(sect: section_64) u8 {
- return @truncate(u8, sect.flags & 0xff);
+ return @as(u8, @truncate(sect.flags & 0xff));
}
pub fn attrs(sect: section_64) u32 {
@@ -1870,7 +1870,7 @@ pub const LoadCommandIterator = struct {
pub fn cast(lc: LoadCommand, comptime Cmd: type) ?Cmd {
if (lc.data.len < @sizeOf(Cmd)) return null;
- return @ptrCast(*const Cmd, @alignCast(@alignOf(Cmd), &lc.data[0])).*;
+ return @as(*const Cmd, @ptrCast(@alignCast(&lc.data[0]))).*;
}
/// Asserts LoadCommand is of type segment_command_64.
@@ -1878,9 +1878,9 @@ pub const LoadCommandIterator = struct {
const segment_lc = lc.cast(segment_command_64).?;
if (segment_lc.nsects == 0) return &[0]section_64{};
const data = lc.data[@sizeOf(segment_command_64)..];
- const sections = @ptrCast(
+ const sections = @as(
[*]const section_64,
- @alignCast(@alignOf(section_64), &data[0]),
+ @ptrCast(@alignCast(&data[0])),
)[0..segment_lc.nsects];
return sections;
}
@@ -1903,16 +1903,16 @@ pub const LoadCommandIterator = struct {
pub fn next(it: *LoadCommandIterator) ?LoadCommand {
if (it.index >= it.ncmds) return null;
- const hdr = @ptrCast(
+ const hdr = @as(
*const load_command,
- @alignCast(@alignOf(load_command), &it.buffer[0]),
+ @ptrCast(@alignCast(&it.buffer[0])),
).*;
const cmd = LoadCommand{
.hdr = hdr,
.data = it.buffer[0..hdr.cmdsize],
};
- it.buffer = @alignCast(@alignOf(u64), it.buffer[hdr.cmdsize..]);
+ it.buffer = @alignCast(it.buffer[hdr.cmdsize..]);
it.index += 1;
return cmd;
diff --git a/lib/std/math.zig b/lib/std/math.zig
index c7d354f787..2a6c24bcb4 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -85,31 +85,31 @@ pub const inf_f128 = @compileError("Deprecated: use `inf(f128)` instead");
pub const epsilon = @compileError("Deprecated: use `floatEps` instead");
pub const nan_u16 = @as(u16, 0x7C01);
-pub const nan_f16 = @bitCast(f16, nan_u16);
+pub const nan_f16 = @as(f16, @bitCast(nan_u16));
pub const qnan_u16 = @as(u16, 0x7E00);
-pub const qnan_f16 = @bitCast(f16, qnan_u16);
+pub const qnan_f16 = @as(f16, @bitCast(qnan_u16));
pub const nan_u32 = @as(u32, 0x7F800001);
-pub const nan_f32 = @bitCast(f32, nan_u32);
+pub const nan_f32 = @as(f32, @bitCast(nan_u32));
pub const qnan_u32 = @as(u32, 0x7FC00000);
-pub const qnan_f32 = @bitCast(f32, qnan_u32);
+pub const qnan_f32 = @as(f32, @bitCast(qnan_u32));
pub const nan_u64 = @as(u64, 0x7FF << 52) | 1;
-pub const nan_f64 = @bitCast(f64, nan_u64);
+pub const nan_f64 = @as(f64, @bitCast(nan_u64));
pub const qnan_u64 = @as(u64, 0x7ff8000000000000);
-pub const qnan_f64 = @bitCast(f64, qnan_u64);
+pub const qnan_f64 = @as(f64, @bitCast(qnan_u64));
pub const nan_f80 = make_f80(F80{ .fraction = 0xA000000000000000, .exp = 0x7fff });
pub const qnan_f80 = make_f80(F80{ .fraction = 0xC000000000000000, .exp = 0x7fff });
pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001);
-pub const nan_f128 = @bitCast(f128, nan_u128);
+pub const nan_f128 = @as(f128, @bitCast(nan_u128));
pub const qnan_u128 = @as(u128, 0x7fff8000000000000000000000000000);
-pub const qnan_f128 = @bitCast(f128, qnan_u128);
+pub const qnan_f128 = @as(f128, @bitCast(qnan_u128));
pub const nan = @import("math/nan.zig").nan;
pub const snan = @import("math/nan.zig").snan;
@@ -508,10 +508,10 @@ pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
const C = @typeInfo(T).Vector.child;
const len = @typeInfo(T).Vector.len;
if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0));
- break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt));
+ break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt)));
} else {
if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0;
- break :blk @intCast(Log2Int(T), abs_shift_amt);
+ break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
}
};
@@ -552,10 +552,10 @@ pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
const C = @typeInfo(T).Vector.child;
const len = @typeInfo(T).Vector.len;
if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0));
- break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt));
+ break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt)));
} else {
if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0;
- break :blk @intCast(Log2Int(T), abs_shift_amt);
+ break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
}
};
@@ -596,7 +596,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T {
if (@typeInfo(C).Int.signedness == .signed) {
@compileError("cannot rotate signed integers");
}
- const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits));
+ const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits)));
return (x >> @splat(@typeInfo(T).Vector.len, ar)) | (x << @splat(@typeInfo(T).Vector.len, 1 + ~ar));
} else if (@typeInfo(T).Int.signedness == .signed) {
@compileError("cannot rotate signed integer");
@@ -604,7 +604,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T {
if (T == u0) return 0;
if (isPowerOfTwo(@typeInfo(T).Int.bits)) {
- const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits));
+ const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits)));
return x >> ar | x << (1 +% ~ar);
} else {
const ar = @mod(r, @typeInfo(T).Int.bits);
@@ -640,7 +640,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T {
if (@typeInfo(C).Int.signedness == .signed) {
@compileError("cannot rotate signed integers");
}
- const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits));
+ const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits)));
return (x << @splat(@typeInfo(T).Vector.len, ar)) | (x >> @splat(@typeInfo(T).Vector.len, 1 +% ~ar));
} else if (@typeInfo(T).Int.signedness == .signed) {
@compileError("cannot rotate signed integer");
@@ -648,7 +648,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T {
if (T == u0) return 0;
if (isPowerOfTwo(@typeInfo(T).Int.bits)) {
- const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits));
+ const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits)));
return x << ar | x >> 1 +% ~ar;
} else {
const ar = @mod(r, @typeInfo(T).Int.bits);
@@ -1029,9 +1029,9 @@ pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) {
if (int_info.signedness == .unsigned) return x;
const Uint = std.meta.Int(.unsigned, int_info.bits);
if (x < 0) {
- return ~@bitCast(Uint, x +% -1);
+ return ~@as(Uint, @bitCast(x +% -1));
} else {
- return @intCast(Uint, x);
+ return @as(Uint, @intCast(x));
}
},
else => unreachable,
@@ -1056,7 +1056,7 @@ pub fn negateCast(x: anytype) !std.meta.Int(.signed, @bitSizeOf(@TypeOf(x))) {
if (x == -minInt(int)) return minInt(int);
- return -@intCast(int, x);
+ return -@as(int, @intCast(x));
}
test "negateCast" {
@@ -1080,7 +1080,7 @@ pub fn cast(comptime T: type, x: anytype) ?T {
} else if ((is_comptime or minInt(@TypeOf(x)) < minInt(T)) and x < minInt(T)) {
return null;
} else {
- return @intCast(T, x);
+ return @as(T, @intCast(x));
}
}
@@ -1102,13 +1102,19 @@ test "cast" {
pub const AlignCastError = error{UnalignedMemory};
+fn AlignCastResult(comptime alignment: u29, comptime Ptr: type) type {
+ var ptr_info = @typeInfo(Ptr);
+ ptr_info.Pointer.alignment = alignment;
+ return @Type(ptr_info);
+}
+
/// Align cast a pointer but return an error if it's the wrong alignment
-pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) {
+pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!AlignCastResult(alignment, @TypeOf(ptr)) {
const addr = @intFromPtr(ptr);
if (addr % alignment != 0) {
return error.UnalignedMemory;
}
- return @alignCast(alignment, ptr);
+ return @alignCast(ptr);
}
/// Asserts `int > 0`.
@@ -1172,7 +1178,7 @@ pub inline fn floor(value: anytype) @TypeOf(value) {
pub fn floorPowerOfTwo(comptime T: type, value: T) T {
const uT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
if (value <= 0) return 0;
- return @as(T, 1) << log2_int(uT, @intCast(uT, value));
+ return @as(T, 1) << log2_int(uT, @as(uT, @intCast(value)));
}
test "floorPowerOfTwo" {
@@ -1211,7 +1217,7 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(
assert(value != 0);
const PromotedType = std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1);
const ShiftType = std.math.Log2Int(PromotedType);
- return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(value - 1));
+ return @as(PromotedType, 1) << @as(ShiftType, @intCast(@typeInfo(T).Int.bits - @clz(value - 1)));
}
/// Returns the next power of two (if the value is not already a power of two).
@@ -1227,7 +1233,7 @@ pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
if (overflowBit & x != 0) {
return error.Overflow;
}
- return @intCast(T, x);
+ return @as(T, @intCast(x));
}
/// Returns the next power of two (if the value is not already a power
@@ -1277,7 +1283,7 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned)
@compileError("log2_int requires an unsigned integer, found " ++ @typeName(T));
assert(x != 0);
- return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(x));
+ return @as(Log2Int(T), @intCast(@typeInfo(T).Int.bits - 1 - @clz(x)));
}
/// Return the log base 2 of integer value x, rounding up to the
@@ -1311,8 +1317,8 @@ pub fn lossyCast(comptime T: type, value: anytype) T {
switch (@typeInfo(T)) {
.Float => {
switch (@typeInfo(@TypeOf(value))) {
- .Int => return @floatFromInt(T, value),
- .Float => return @floatCast(T, value),
+ .Int => return @as(T, @floatFromInt(value)),
+ .Float => return @as(T, @floatCast(value)),
.ComptimeInt => return @as(T, value),
.ComptimeFloat => return @as(T, value),
else => @compileError("bad type"),
@@ -1326,7 +1332,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T {
} else if (value <= minInt(T)) {
return @as(T, minInt(T));
} else {
- return @intCast(T, value);
+ return @as(T, @intCast(value));
}
},
.Float, .ComptimeFloat => {
@@ -1335,7 +1341,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T {
} else if (value <= minInt(T)) {
return @as(T, minInt(T));
} else {
- return @intFromFloat(T, value);
+ return @as(T, @intFromFloat(value));
}
},
else => @compileError("bad type"),
@@ -1594,7 +1600,7 @@ test "compare between signed and unsigned" {
try testing.expect(compare(@as(u8, 255), .gt, @as(i9, -1)));
try testing.expect(!compare(@as(u8, 255), .lte, @as(i9, -1)));
try testing.expect(compare(@as(u8, 1), .lt, @as(u8, 2)));
- try testing.expect(@bitCast(u8, @as(i8, -1)) == @as(u8, 255));
+ try testing.expect(@as(u8, @bitCast(@as(i8, -1))) == @as(u8, 255));
try testing.expect(!compare(@as(u8, 255), .eq, @as(i8, -1)));
try testing.expect(compare(@as(u8, 1), .eq, @as(u8, 1)));
}
@@ -1624,7 +1630,7 @@ test "order.compare" {
test "compare.reverse" {
inline for (@typeInfo(CompareOperator).Enum.fields) |op_field| {
- const op = @enumFromInt(CompareOperator, op_field.value);
+ const op = @as(CompareOperator, @enumFromInt(op_field.value));
try testing.expect(compare(2, op, 3) == compare(3, op.reverse(), 2));
try testing.expect(compare(3, op, 3) == compare(3, op.reverse(), 3));
try testing.expect(compare(4, op, 3) == compare(3, op.reverse(), 4));
@@ -1646,10 +1652,10 @@ pub inline fn boolMask(comptime MaskInt: type, value: bool) MaskInt {
if (MaskInt == u1) return @intFromBool(value);
if (MaskInt == i1) {
// The @as here is a workaround for #7950
- return @bitCast(i1, @as(u1, @intFromBool(value)));
+ return @as(i1, @bitCast(@as(u1, @intFromBool(value))));
}
- return -%@intCast(MaskInt, @intFromBool(value));
+ return -%@as(MaskInt, @intCast(@intFromBool(value)));
}
test "boolMask" {
@@ -1680,7 +1686,7 @@ test "boolMask" {
/// Return the mod of `num` with the smallest integer type
pub fn comptimeMod(num: anytype, comptime denom: comptime_int) IntFittingRange(0, denom - 1) {
- return @intCast(IntFittingRange(0, denom - 1), @mod(num, denom));
+ return @as(IntFittingRange(0, denom - 1), @intCast(@mod(num, denom)));
}
pub const F80 = struct {
@@ -1690,14 +1696,14 @@ pub const F80 = struct {
pub fn make_f80(repr: F80) f80 {
const int = (@as(u80, repr.exp) << 64) | repr.fraction;
- return @bitCast(f80, int);
+ return @as(f80, @bitCast(int));
}
pub fn break_f80(x: f80) F80 {
- const int = @bitCast(u80, x);
+ const int = @as(u80, @bitCast(x));
return .{
- .fraction = @truncate(u64, int),
- .exp = @truncate(u16, int >> 64),
+ .fraction = @as(u64, @truncate(int)),
+ .exp = @as(u16, @truncate(int >> 64)),
};
}
@@ -1709,7 +1715,7 @@ pub inline fn sign(i: anytype) @TypeOf(i) {
const T = @TypeOf(i);
return switch (@typeInfo(T)) {
.Int, .ComptimeInt => @as(T, @intFromBool(i > 0)) - @as(T, @intFromBool(i < 0)),
- .Float, .ComptimeFloat => @floatFromInt(T, @intFromBool(i > 0)) - @floatFromInt(T, @intFromBool(i < 0)),
+ .Float, .ComptimeFloat => @as(T, @floatFromInt(@intFromBool(i > 0))) - @as(T, @floatFromInt(@intFromBool(i < 0))),
.Vector => |vinfo| blk: {
switch (@typeInfo(vinfo.child)) {
.Int, .Float => {
diff --git a/lib/std/math/acos.zig b/lib/std/math/acos.zig
index e88bed7227..1a29ca7b54 100644
--- a/lib/std/math/acos.zig
+++ b/lib/std/math/acos.zig
@@ -36,7 +36,7 @@ fn acos32(x: f32) f32 {
const pio2_hi = 1.5707962513e+00;
const pio2_lo = 7.5497894159e-08;
- const hx: u32 = @bitCast(u32, x);
+ const hx: u32 = @as(u32, @bitCast(x));
const ix: u32 = hx & 0x7FFFFFFF;
// |x| >= 1 or nan
@@ -72,8 +72,8 @@ fn acos32(x: f32) f32 {
// x > 0.5
const z = (1.0 - x) * 0.5;
const s = @sqrt(z);
- const jx = @bitCast(u32, s);
- const df = @bitCast(f32, jx & 0xFFFFF000);
+ const jx = @as(u32, @bitCast(s));
+ const df = @as(f32, @bitCast(jx & 0xFFFFF000));
const c = (z - df * df) / (s + df);
const w = r32(z) * s + c;
return 2 * (df + w);
@@ -100,13 +100,13 @@ fn acos64(x: f64) f64 {
const pio2_hi: f64 = 1.57079632679489655800e+00;
const pio2_lo: f64 = 6.12323399573676603587e-17;
- const ux = @bitCast(u64, x);
- const hx = @intCast(u32, ux >> 32);
+ const ux = @as(u64, @bitCast(x));
+ const hx = @as(u32, @intCast(ux >> 32));
const ix = hx & 0x7FFFFFFF;
// |x| >= 1 or nan
if (ix >= 0x3FF00000) {
- const lx = @intCast(u32, ux & 0xFFFFFFFF);
+ const lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
// acos(1) = 0, acos(-1) = pi
if ((ix - 0x3FF00000) | lx == 0) {
@@ -141,8 +141,8 @@ fn acos64(x: f64) f64 {
// x > 0.5
const z = (1.0 - x) * 0.5;
const s = @sqrt(z);
- const jx = @bitCast(u64, s);
- const df = @bitCast(f64, jx & 0xFFFFFFFF00000000);
+ const jx = @as(u64, @bitCast(s));
+ const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000));
const c = (z - df * df) / (s + df);
const w = r64(z) * s + c;
return 2 * (df + w);
diff --git a/lib/std/math/acosh.zig b/lib/std/math/acosh.zig
index a78130d2ef..0c6de9933e 100644
--- a/lib/std/math/acosh.zig
+++ b/lib/std/math/acosh.zig
@@ -24,7 +24,7 @@ pub fn acosh(x: anytype) @TypeOf(x) {
// acosh(x) = log(x + sqrt(x * x - 1))
fn acosh32(x: f32) f32 {
- const u = @bitCast(u32, x);
+ const u = @as(u32, @bitCast(x));
const i = u & 0x7FFFFFFF;
// |x| < 2, invalid if x < 1 or nan
@@ -42,7 +42,7 @@ fn acosh32(x: f32) f32 {
}
fn acosh64(x: f64) f64 {
- const u = @bitCast(u64, x);
+ const u = @as(u64, @bitCast(x));
const e = (u >> 52) & 0x7FF;
// |x| < 2, invalid if x < 1 or nan
diff --git a/lib/std/math/asin.zig b/lib/std/math/asin.zig
index 48ad04c579..ac1d01ff55 100644
--- a/lib/std/math/asin.zig
+++ b/lib/std/math/asin.zig
@@ -36,7 +36,7 @@ fn r32(z: f32) f32 {
fn asin32(x: f32) f32 {
const pio2 = 1.570796326794896558e+00;
- const hx: u32 = @bitCast(u32, x);
+ const hx: u32 = @as(u32, @bitCast(x));
const ix: u32 = hx & 0x7FFFFFFF;
// |x| >= 1
@@ -92,13 +92,13 @@ fn asin64(x: f64) f64 {
const pio2_hi: f64 = 1.57079632679489655800e+00;
const pio2_lo: f64 = 6.12323399573676603587e-17;
- const ux = @bitCast(u64, x);
- const hx = @intCast(u32, ux >> 32);
+ const ux = @as(u64, @bitCast(x));
+ const hx = @as(u32, @intCast(ux >> 32));
const ix = hx & 0x7FFFFFFF;
// |x| >= 1 or nan
if (ix >= 0x3FF00000) {
- const lx = @intCast(u32, ux & 0xFFFFFFFF);
+ const lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
// asin(1) = +-pi/2 with inexact
if ((ix - 0x3FF00000) | lx == 0) {
@@ -128,8 +128,8 @@ fn asin64(x: f64) f64 {
if (ix >= 0x3FEF3333) {
fx = pio2_hi - 2 * (s + s * r);
} else {
- const jx = @bitCast(u64, s);
- const df = @bitCast(f64, jx & 0xFFFFFFFF00000000);
+ const jx = @as(u64, @bitCast(s));
+ const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000));
const c = (z - df * df) / (s + df);
fx = 0.5 * pio2_hi - (2 * s * r - (pio2_lo - 2 * c) - (0.5 * pio2_hi - 2 * df));
}
diff --git a/lib/std/math/asinh.zig b/lib/std/math/asinh.zig
index 65028ef5d9..13b1045bf6 100644
--- a/lib/std/math/asinh.zig
+++ b/lib/std/math/asinh.zig
@@ -26,11 +26,11 @@ pub fn asinh(x: anytype) @TypeOf(x) {
// asinh(x) = sign(x) * log(|x| + sqrt(x * x + 1)) ~= x - x^3/6 + o(x^5)
fn asinh32(x: f32) f32 {
- const u = @bitCast(u32, x);
+ const u = @as(u32, @bitCast(x));
const i = u & 0x7FFFFFFF;
const s = i >> 31;
- var rx = @bitCast(f32, i); // |x|
+ var rx = @as(f32, @bitCast(i)); // |x|
// TODO: Shouldn't need this explicit check.
if (math.isNegativeInf(x)) {
@@ -58,11 +58,11 @@ fn asinh32(x: f32) f32 {
}
fn asinh64(x: f64) f64 {
- const u = @bitCast(u64, x);
+ const u = @as(u64, @bitCast(x));
const e = (u >> 52) & 0x7FF;
const s = e >> 63;
- var rx = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x|
+ var rx = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x|
if (math.isNegativeInf(x)) {
return x;
diff --git a/lib/std/math/atan.zig b/lib/std/math/atan.zig
index 41caae11a6..75be6ea746 100644
--- a/lib/std/math/atan.zig
+++ b/lib/std/math/atan.zig
@@ -46,7 +46,7 @@ fn atan32(x_: f32) f32 {
};
var x = x_;
- var ix: u32 = @bitCast(u32, x);
+ var ix: u32 = @as(u32, @bitCast(x));
const sign = ix >> 31;
ix &= 0x7FFFFFFF;
@@ -143,8 +143,8 @@ fn atan64(x_: f64) f64 {
};
var x = x_;
- var ux = @bitCast(u64, x);
- var ix = @intCast(u32, ux >> 32);
+ var ux = @as(u64, @bitCast(x));
+ var ix = @as(u32, @intCast(ux >> 32));
const sign = ix >> 31;
ix &= 0x7FFFFFFF;
@@ -165,7 +165,7 @@ fn atan64(x_: f64) f64 {
// |x| < 2^(-27)
if (ix < 0x3E400000) {
if (ix < 0x00100000) {
- math.doNotOptimizeAway(@floatCast(f32, x));
+ math.doNotOptimizeAway(@as(f32, @floatCast(x)));
}
return x;
}
@@ -212,7 +212,7 @@ fn atan64(x_: f64) f64 {
}
test "math.atan" {
- try expect(@bitCast(u32, atan(@as(f32, 0.2))) == @bitCast(u32, atan32(0.2)));
+ try expect(@as(u32, @bitCast(atan(@as(f32, 0.2)))) == @as(u32, @bitCast(atan32(0.2))));
try expect(atan(@as(f64, 0.2)) == atan64(0.2));
}
diff --git a/lib/std/math/atan2.zig b/lib/std/math/atan2.zig
index b9b37e7da4..026c76b5b2 100644
--- a/lib/std/math/atan2.zig
+++ b/lib/std/math/atan2.zig
@@ -44,8 +44,8 @@ fn atan2_32(y: f32, x: f32) f32 {
return x + y;
}
- var ix = @bitCast(u32, x);
- var iy = @bitCast(u32, y);
+ var ix = @as(u32, @bitCast(x));
+ var iy = @as(u32, @bitCast(y));
// x = 1.0
if (ix == 0x3F800000) {
@@ -129,13 +129,13 @@ fn atan2_64(y: f64, x: f64) f64 {
return x + y;
}
- var ux = @bitCast(u64, x);
- var ix = @intCast(u32, ux >> 32);
- var lx = @intCast(u32, ux & 0xFFFFFFFF);
+ var ux = @as(u64, @bitCast(x));
+ var ix = @as(u32, @intCast(ux >> 32));
+ var lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
- var uy = @bitCast(u64, y);
- var iy = @intCast(u32, uy >> 32);
- var ly = @intCast(u32, uy & 0xFFFFFFFF);
+ var uy = @as(u64, @bitCast(y));
+ var iy = @as(u32, @intCast(uy >> 32));
+ var ly = @as(u32, @intCast(uy & 0xFFFFFFFF));
// x = 1.0
if ((ix -% 0x3FF00000) | lx == 0) {
diff --git a/lib/std/math/atanh.zig b/lib/std/math/atanh.zig
index aed5d8bca8..58b56ac8fa 100644
--- a/lib/std/math/atanh.zig
+++ b/lib/std/math/atanh.zig
@@ -26,11 +26,11 @@ pub fn atanh(x: anytype) @TypeOf(x) {
// atanh(x) = log((1 + x) / (1 - x)) / 2 = log1p(2x / (1 - x)) / 2 ~= x + x^3 / 3 + o(x^5)
fn atanh_32(x: f32) f32 {
- const u = @bitCast(u32, x);
+ const u = @as(u32, @bitCast(x));
const i = u & 0x7FFFFFFF;
const s = u >> 31;
- var y = @bitCast(f32, i); // |x|
+ var y = @as(f32, @bitCast(i)); // |x|
if (y == 1.0) {
return math.copysign(math.inf(f32), x);
@@ -55,11 +55,11 @@ fn atanh_32(x: f32) f32 {
}
fn atanh_64(x: f64) f64 {
- const u = @bitCast(u64, x);
+ const u = @as(u64, @bitCast(x));
const e = (u >> 52) & 0x7FF;
const s = u >> 63;
- var y = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x|
+ var y = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x|
if (y == 1.0) {
return math.copysign(math.inf(f64), x);
@@ -69,7 +69,7 @@ fn atanh_64(x: f64) f64 {
if (e < 0x3FF - 32) {
// underflow
if (e == 0) {
- math.doNotOptimizeAway(@floatCast(f32, y));
+ math.doNotOptimizeAway(@as(f32, @floatCast(y)));
}
}
// |x| < 0.5
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 846a809e05..213876ccad 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -30,7 +30,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
}
const w_value = std.math.absCast(scalar);
- return @intCast(usize, @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1);
+ return @as(usize, @intCast(@divFloor(@as(Limb, @intCast(math.log2(w_value))), limb_bits) + 1));
}
pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize {
@@ -87,8 +87,8 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
// r2 = b * c
const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
- const r2 = @truncate(Limb, bc);
- const c2 = @truncate(Limb, bc >> limb_bits);
+ const r2 = @as(Limb, @truncate(bc));
+ const c2 = @as(Limb, @truncate(bc >> limb_bits));
// ov2[0] = ov1[0] + r2
const ov2 = @addWithOverflow(ov1[0], r2);
@@ -107,8 +107,8 @@ fn subMulLimbWithBorrow(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
// r2 = b * c
const bc = @as(DoubleLimb, std.math.mulWide(Limb, b, c));
- const r2 = @truncate(Limb, bc);
- const c2 = @truncate(Limb, bc >> limb_bits);
+ const r2 = @as(Limb, @truncate(bc));
+ const c2 = @as(Limb, @truncate(bc >> limb_bits));
// ov2[0] = ov1[0] - r2
const ov2 = @subWithOverflow(ov1[0], r2);
@@ -244,7 +244,7 @@ pub const Mutable = struct {
} else {
var i: usize = 0;
while (true) : (i += 1) {
- self.limbs[i] = @truncate(Limb, w_value);
+ self.limbs[i] = @as(Limb, @truncate(w_value));
w_value >>= limb_bits;
if (w_value == 0) break;
@@ -340,7 +340,7 @@ pub const Mutable = struct {
}
const req_limbs = calcTwosCompLimbCount(bit_count);
- const bit = @truncate(Log2Limb, bit_count - 1);
+ const bit = @as(Log2Limb, @truncate(bit_count - 1));
const signmask = @as(Limb, 1) << bit; // 0b0..010..0 where 1 is the sign bit.
const mask = (signmask << 1) -% 1; // 0b0..011..1 where the leftmost 1 is the sign bit.
@@ -365,7 +365,7 @@ pub const Mutable = struct {
r.set(0);
} else {
const new_req_limbs = calcTwosCompLimbCount(bit_count - 1);
- const msb = @truncate(Log2Limb, bit_count - 2);
+ const msb = @as(Log2Limb, @truncate(bit_count - 2));
const new_signmask = @as(Limb, 1) << msb; // 0b0..010..0 where 1 is the sign bit.
const new_mask = (new_signmask << 1) -% 1; // 0b0..001..1 where the rightmost 0 is the sign bit.
@@ -1153,7 +1153,7 @@ pub const Mutable = struct {
// const msb = @truncate(Log2Limb, checkbit);
// const checkmask = (@as(Limb, 1) << msb) -% 1;
- if (a.limbs[a.limbs.len - 1] >> @truncate(Log2Limb, checkbit) != 0) {
+ if (a.limbs[a.limbs.len - 1] >> @as(Log2Limb, @truncate(checkbit)) != 0) {
// Need to saturate.
r.setTwosCompIntLimit(if (a.positive) .max else .min, signedness, bit_count);
return;
@@ -1554,7 +1554,7 @@ pub const Mutable = struct {
// Optimization for small divisor. By using a half limb we can avoid requiring DoubleLimb
// divisions in the hot code path. This may often require compiler_rt software-emulation.
if (divisor < maxInt(HalfLimb)) {
- lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @intCast(HalfLimb, divisor));
+ lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @as(HalfLimb, @intCast(divisor)));
} else {
lldiv1(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], divisor);
}
@@ -1671,7 +1671,7 @@ pub const Mutable = struct {
} else {
const q0 = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
const n0 = @as(DoubleLimb, y.limbs[t]);
- q.limbs[k] = @intCast(Limb, q0 / n0);
+ q.limbs[k] = @as(Limb, @intCast(q0 / n0));
}
// 3.2
@@ -1750,7 +1750,7 @@ pub const Mutable = struct {
return;
}
- const bit = @truncate(Log2Limb, bit_count - 1);
+ const bit = @as(Log2Limb, @truncate(bit_count - 1));
const signmask = @as(Limb, 1) << bit;
const mask = (signmask << 1) -% 1;
@@ -1781,7 +1781,7 @@ pub const Mutable = struct {
return;
}
- const bit = @truncate(Log2Limb, bit_count - 1);
+ const bit = @as(Log2Limb, @truncate(bit_count - 1));
const signmask = @as(Limb, 1) << bit; // 0b0..010...0 where 1 is the sign bit.
const mask = (signmask << 1) -% 1; // 0b0..01..1 where the leftmost 1 is the sign bit.
@@ -1912,7 +1912,7 @@ pub const Mutable = struct {
.Big => buffer.len - ((total_bits + 7) / 8),
};
- const sign_bit = @as(u8, 1) << @intCast(u3, (total_bits - 1) % 8);
+ const sign_bit = @as(u8, 1) << @as(u3, @intCast((total_bits - 1) % 8));
positive = ((buffer[last_byte] & sign_bit) == 0);
}
@@ -1942,7 +1942,7 @@ pub const Mutable = struct {
.signed => b: {
const SLimb = std.meta.Int(.signed, @bitSizeOf(Limb));
const limb = mem.readVarPackedInt(SLimb, buffer, bit_index + bit_offset, bit_count - bit_index, endian, .signed);
- break :b @bitCast(Limb, limb);
+ break :b @as(Limb, @bitCast(limb));
},
};
@@ -2170,7 +2170,7 @@ pub const Const = struct {
var r: UT = 0;
if (@sizeOf(UT) <= @sizeOf(Limb)) {
- r = @intCast(UT, self.limbs[0]);
+ r = @as(UT, @intCast(self.limbs[0]));
} else {
for (self.limbs[0..self.limbs.len], 0..) |_, ri| {
const limb = self.limbs[self.limbs.len - ri - 1];
@@ -2180,10 +2180,10 @@ pub const Const = struct {
}
if (info.signedness == .unsigned) {
- return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
+ return if (self.positive) @as(T, @intCast(r)) else error.NegativeIntoUnsigned;
} else {
if (self.positive) {
- return @intCast(T, r);
+ return @as(T, @intCast(r));
} else {
if (math.cast(T, r)) |ok| {
return -ok;
@@ -2292,7 +2292,7 @@ pub const Const = struct {
outer: for (self.limbs[0..self.limbs.len]) |limb| {
var shift: usize = 0;
while (shift < limb_bits) : (shift += base_shift) {
- const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1));
+ const r = @as(u8, @intCast((limb >> @as(Log2Limb, @intCast(shift))) & @as(Limb, base - 1)));
const ch = std.fmt.digitToChar(r, case);
string[digits_len] = ch;
digits_len += 1;
@@ -2340,7 +2340,7 @@ pub const Const = struct {
var r_word = r.limbs[0];
var i: usize = 0;
while (i < digits_per_limb) : (i += 1) {
- const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case);
+ const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case);
r_word /= base;
string[digits_len] = ch;
digits_len += 1;
@@ -2352,7 +2352,7 @@ pub const Const = struct {
var r_word = q.limbs[0];
while (r_word != 0) {
- const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case);
+ const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case);
r_word /= base;
string[digits_len] = ch;
digits_len += 1;
@@ -3680,13 +3680,13 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
rem.* = 0;
} else if (pdiv < b) {
quo[i] = 0;
- rem.* = @truncate(Limb, pdiv);
+ rem.* = @as(Limb, @truncate(pdiv));
} else if (pdiv == b) {
quo[i] = 1;
rem.* = 0;
} else {
- quo[i] = @truncate(Limb, @divTrunc(pdiv, b));
- rem.* = @truncate(Limb, pdiv - (quo[i] *% b));
+ quo[i] = @as(Limb, @truncate(@divTrunc(pdiv, b)));
+ rem.* = @as(Limb, @truncate(pdiv - (quo[i] *% b)));
}
}
}
@@ -3719,7 +3719,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
- const interior_limb_shift = @truncate(Log2Limb, shift);
+ const interior_limb_shift = @as(Log2Limb, @truncate(shift));
// We only need the extra limb if the shift of the last element overflows.
// This is useful for the implementation of `shiftLeftSat`.
@@ -3741,7 +3741,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
r[dst_i] = carry | @call(.always_inline, math.shr, .{
Limb,
src_digit,
- limb_bits - @intCast(Limb, interior_limb_shift),
+ limb_bits - @as(Limb, @intCast(interior_limb_shift)),
});
carry = (src_digit << interior_limb_shift);
}
@@ -3756,7 +3756,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
assert(r.len >= a.len - (shift / limb_bits));
const limb_shift = shift / limb_bits;
- const interior_limb_shift = @truncate(Log2Limb, shift);
+ const interior_limb_shift = @as(Log2Limb, @truncate(shift));
var carry: Limb = 0;
var i: usize = 0;
@@ -3769,7 +3769,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
carry = @call(.always_inline, math.shl, .{
Limb,
src_digit,
- limb_bits - @intCast(Limb, interior_limb_shift),
+ limb_bits - @as(Limb, @intCast(interior_limb_shift)),
});
}
}
@@ -4150,7 +4150,7 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
// Square the result if the current bit is zero, square and multiply by a if
// it is one.
var exp_bits = 32 - 1 - b_leading_zeros;
- var exp = b << @intCast(u5, 1 + b_leading_zeros);
+ var exp = b << @as(u5, @intCast(1 + b_leading_zeros));
var i: usize = 0;
while (i < exp_bits) : (i += 1) {
@@ -4174,9 +4174,9 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable {
assert(storage.len >= 2);
const A_is_positive = A >= 0;
- const Au = @intCast(DoubleLimb, if (A < 0) -A else A);
- storage[0] = @truncate(Limb, Au);
- storage[1] = @truncate(Limb, Au >> limb_bits);
+ const Au = @as(DoubleLimb, @intCast(if (A < 0) -A else A));
+ storage[0] = @as(Limb, @truncate(Au));
+ storage[1] = @as(Limb, @truncate(Au >> limb_bits));
return .{
.limbs = storage[0..2],
.positive = A_is_positive,
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 9c3c1b6881..3eaa46d7c1 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -2898,19 +2898,19 @@ test "big int conversion write twos complement with padding" {
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa };
m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
- try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq);
+ try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq);
buffer = &[_]u8{ 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
- try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq);
+ try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq);
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa, 0xaa, 0xaa, 0xaa };
m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
- try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq);
+ try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq);
buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
- try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq);
+ try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq);
bit_count = 12 * 8 + 2;
@@ -3014,20 +3014,20 @@ test "big int bit reverse" {
try bitReverseTest(u96, 0x123456789abcdef111213141, 0x828c84888f7b3d591e6a2c48);
try bitReverseTest(u128, 0x123456789abcdef11121314151617181, 0x818e868a828c84888f7b3d591e6a2c48);
- try bitReverseTest(i8, @bitCast(i8, @as(u8, 0x92)), @bitCast(i8, @as(u8, 0x49)));
- try bitReverseTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x2c48)));
- try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x6a2c48)));
- try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x12345f)), @bitCast(i24, @as(u24, 0xfa2c48)));
- try bitReverseTest(i24, @bitCast(i24, @as(u24, 0xf23456)), @bitCast(i24, @as(u24, 0x6a2c4f)));
- try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x1e6a2c48)));
- try bitReverseTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), @bitCast(i32, @as(u32, 0x1e6a2c4f)));
- try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x1234567f)), @bitCast(i32, @as(u32, 0xfe6a2c48)));
- try bitReverseTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x591e6a2c48)));
- try bitReverseTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0x3d591e6a2c48)));
- try bitReverseTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0x7b3d591e6a2c48)));
- try bitReverseTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48)));
- try bitReverseTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48)));
- try bitReverseTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48)));
+ try bitReverseTest(i8, @as(i8, @bitCast(@as(u8, 0x92))), @as(i8, @bitCast(@as(u8, 0x49))));
+ try bitReverseTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x2c48))));
+ try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x6a2c48))));
+ try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x12345f))), @as(i24, @bitCast(@as(u24, 0xfa2c48))));
+ try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), @as(i24, @bitCast(@as(u24, 0x6a2c4f))));
+ try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c48))));
+ try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c4f))));
+ try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x1234567f))), @as(i32, @bitCast(@as(u32, 0xfe6a2c48))));
+ try bitReverseTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x591e6a2c48))));
+ try bitReverseTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0x3d591e6a2c48))));
+ try bitReverseTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0x7b3d591e6a2c48))));
+ try bitReverseTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0x8f7b3d591e6a2c48))));
+ try bitReverseTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x828c84888f7b3d591e6a2c48))));
+ try bitReverseTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x818e868a828c84888f7b3d591e6a2c48))));
}
fn byteSwapTest(comptime T: type, comptime input: comptime_int, comptime expected_output: comptime_int) !void {
@@ -3063,16 +3063,16 @@ test "big int byte swap" {
try byteSwapTest(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
try byteSwapTest(i8, -50, -50);
- try byteSwapTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412)));
- try byteSwapTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412)));
- try byteSwapTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412)));
- try byteSwapTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x9a78563412)));
- try byteSwapTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412)));
- try byteSwapTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412)));
- try byteSwapTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412)));
- try byteSwapTest(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412)));
- try byteSwapTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412)));
- try byteSwapTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412)));
+ try byteSwapTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
+ try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
+ try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
+ try byteSwapTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x9a78563412))));
+ try byteSwapTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
+ try byteSwapTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412))));
+ try byteSwapTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
+ try byteSwapTest(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412))));
+ try byteSwapTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412))));
+ try byteSwapTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x8171615141312111f1debc9a78563412))));
try byteSwapTest(u512, 0x80, 1 << 511);
try byteSwapTest(i512, 0x80, minInt(i512));
@@ -3080,11 +3080,11 @@ test "big int byte swap" {
try byteSwapTest(i512, -0x100, (1 << 504) - 1);
try byteSwapTest(i400, -0x100, (1 << 392) - 1);
try byteSwapTest(i400, -0x2, -(1 << 392) - 1);
- try byteSwapTest(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2);
- try byteSwapTest(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412)));
- try byteSwapTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2);
- try byteSwapTest(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412)));
- try byteSwapTest(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412)));
+ try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2);
+ try byteSwapTest(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412))));
+ try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
+ try byteSwapTest(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
+ try byteSwapTest(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
}
test "big.int mul multi-multi alias r with a and b" {
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index 22f7ba183f..5313380c27 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -137,7 +137,7 @@ pub const Rational = struct {
debug.assert(@typeInfo(T) == .Float);
const UnsignedInt = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- const f_bits = @bitCast(UnsignedInt, f);
+ const f_bits = @as(UnsignedInt, @bitCast(f));
const exponent_bits = math.floatExponentBits(T);
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
@@ -146,7 +146,7 @@ pub const Rational = struct {
const exponent_mask = (1 << exponent_bits) - 1;
const mantissa_mask = (1 << mantissa_bits) - 1;
- var exponent = @intCast(i16, (f_bits >> mantissa_bits) & exponent_mask);
+ var exponent = @as(i16, @intCast((f_bits >> mantissa_bits) & exponent_mask));
var mantissa = f_bits & mantissa_mask;
switch (exponent) {
@@ -177,9 +177,9 @@ pub const Rational = struct {
try self.q.set(1);
if (shift >= 0) {
- try self.q.shiftLeft(&self.q, @intCast(usize, shift));
+ try self.q.shiftLeft(&self.q, @as(usize, @intCast(shift)));
} else {
- try self.p.shiftLeft(&self.p, @intCast(usize, -shift));
+ try self.p.shiftLeft(&self.p, @as(usize, @intCast(-shift)));
}
try self.reduce();
@@ -210,7 +210,7 @@ pub const Rational = struct {
}
// 1. left-shift a or sub so that a/b is in [1 << msize1, 1 << (msize2 + 1)]
- var exp = @intCast(isize, self.p.bitCountTwosComp()) - @intCast(isize, self.q.bitCountTwosComp());
+ var exp = @as(isize, @intCast(self.p.bitCountTwosComp())) - @as(isize, @intCast(self.q.bitCountTwosComp()));
var a2 = try self.p.clone();
defer a2.deinit();
@@ -220,9 +220,9 @@ pub const Rational = struct {
const shift = msize2 - exp;
if (shift >= 0) {
- try a2.shiftLeft(&a2, @intCast(usize, shift));
+ try a2.shiftLeft(&a2, @as(usize, @intCast(shift)));
} else {
- try b2.shiftLeft(&b2, @intCast(usize, -shift));
+ try b2.shiftLeft(&b2, @as(usize, @intCast(-shift)));
}
// 2. compute quotient and remainder
@@ -254,8 +254,8 @@ pub const Rational = struct {
// 4. Rounding
if (emin - msize <= exp and exp <= emin) {
// denormal
- const shift1 = @intCast(math.Log2Int(BitReprType), emin - (exp - 1));
- const lost_bits = mantissa & ((@intCast(BitReprType, 1) << shift1) - 1);
+ const shift1 = @as(math.Log2Int(BitReprType), @intCast(emin - (exp - 1)));
+ const lost_bits = mantissa & ((@as(BitReprType, @intCast(1)) << shift1) - 1);
have_rem = have_rem or lost_bits != 0;
mantissa >>= shift1;
exp = 2 - ebias;
@@ -276,7 +276,7 @@ pub const Rational = struct {
}
mantissa >>= 1;
- const f = math.scalbn(@floatFromInt(T, mantissa), @intCast(i32, exp - msize1));
+ const f = math.scalbn(@as(T, @floatFromInt(mantissa)), @as(i32, @intCast(exp - msize1)));
if (math.isInf(f)) {
exact = false;
}
@@ -477,7 +477,7 @@ fn extractLowBits(a: Int, comptime T: type) T {
const t_bits = @typeInfo(T).Int.bits;
const limb_bits = @typeInfo(Limb).Int.bits;
if (t_bits <= limb_bits) {
- return @truncate(T, a.limbs[0]);
+ return @as(T, @truncate(a.limbs[0]));
} else {
var r: T = 0;
comptime var i: usize = 0;
diff --git a/lib/std/math/cbrt.zig b/lib/std/math/cbrt.zig
index 1ff1818e8d..737757b817 100644
--- a/lib/std/math/cbrt.zig
+++ b/lib/std/math/cbrt.zig
@@ -27,7 +27,7 @@ fn cbrt32(x: f32) f32 {
const B1: u32 = 709958130; // (127 - 127.0 / 3 - 0.03306235651) * 2^23
const B2: u32 = 642849266; // (127 - 127.0 / 3 - 24 / 3 - 0.03306235651) * 2^23
- var u = @bitCast(u32, x);
+ var u = @as(u32, @bitCast(x));
var hx = u & 0x7FFFFFFF;
// cbrt(nan, inf) = itself
@@ -41,7 +41,7 @@ fn cbrt32(x: f32) f32 {
if (hx == 0) {
return x;
}
- u = @bitCast(u32, x * 0x1.0p24);
+ u = @as(u32, @bitCast(x * 0x1.0p24));
hx = u & 0x7FFFFFFF;
hx = hx / 3 + B2;
} else {
@@ -52,7 +52,7 @@ fn cbrt32(x: f32) f32 {
u |= hx;
// first step newton to 16 bits
- var t: f64 = @bitCast(f32, u);
+ var t: f64 = @as(f32, @bitCast(u));
var r: f64 = t * t * t;
t = t * (@as(f64, x) + x + r) / (x + r + r);
@@ -60,7 +60,7 @@ fn cbrt32(x: f32) f32 {
r = t * t * t;
t = t * (@as(f64, x) + x + r) / (x + r + r);
- return @floatCast(f32, t);
+ return @as(f32, @floatCast(t));
}
fn cbrt64(x: f64) f64 {
@@ -74,8 +74,8 @@ fn cbrt64(x: f64) f64 {
const P3: f64 = -0.758397934778766047437;
const P4: f64 = 0.145996192886612446982;
- var u = @bitCast(u64, x);
- var hx = @intCast(u32, u >> 32) & 0x7FFFFFFF;
+ var u = @as(u64, @bitCast(x));
+ var hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF;
// cbrt(nan, inf) = itself
if (hx >= 0x7FF00000) {
@@ -84,8 +84,8 @@ fn cbrt64(x: f64) f64 {
// cbrt to ~5bits
if (hx < 0x00100000) {
- u = @bitCast(u64, x * 0x1.0p54);
- hx = @intCast(u32, u >> 32) & 0x7FFFFFFF;
+ u = @as(u64, @bitCast(x * 0x1.0p54));
+ hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF;
// cbrt(0) is itself
if (hx == 0) {
@@ -98,7 +98,7 @@ fn cbrt64(x: f64) f64 {
u &= 1 << 63;
u |= @as(u64, hx) << 32;
- var t = @bitCast(f64, u);
+ var t = @as(f64, @bitCast(u));
// cbrt to 23 bits
// cbrt(x) = t * cbrt(x / t^3) ~= t * P(t^3 / x)
@@ -106,9 +106,9 @@ fn cbrt64(x: f64) f64 {
t = t * ((P0 + r * (P1 + r * P2)) + ((r * r) * r) * (P3 + r * P4));
// Round t away from 0 to 23 bits
- u = @bitCast(u64, t);
+ u = @as(u64, @bitCast(t));
u = (u + 0x80000000) & 0xFFFFFFFFC0000000;
- t = @bitCast(f64, u);
+ t = @as(f64, @bitCast(u));
// one step newton to 53 bits
const s = t * t;
diff --git a/lib/std/math/complex/atan.zig b/lib/std/math/complex/atan.zig
index 56c199016d..381fc43f7d 100644
--- a/lib/std/math/complex/atan.zig
+++ b/lib/std/math/complex/atan.zig
@@ -32,7 +32,7 @@ fn redupif32(x: f32) f32 {
t -= 0.5;
}
- const u = @floatFromInt(f32, @intFromFloat(i32, t));
+ const u = @as(f32, @floatFromInt(@as(i32, @intFromFloat(t))));
return ((x - u * DP1) - u * DP2) - t * DP3;
}
@@ -81,7 +81,7 @@ fn redupif64(x: f64) f64 {
t -= 0.5;
}
- const u = @floatFromInt(f64, @intFromFloat(i64, t));
+ const u = @as(f64, @floatFromInt(@as(i64, @intFromFloat(t))));
return ((x - u * DP1) - u * DP2) - t * DP3;
}
diff --git a/lib/std/math/complex/cosh.zig b/lib/std/math/complex/cosh.zig
index b3ffab5175..413279db2d 100644
--- a/lib/std/math/complex/cosh.zig
+++ b/lib/std/math/complex/cosh.zig
@@ -26,10 +26,10 @@ fn cosh32(z: Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
- const hx = @bitCast(u32, x);
+ const hx = @as(u32, @bitCast(x));
const ix = hx & 0x7fffffff;
- const hy = @bitCast(u32, y);
+ const hy = @as(u32, @bitCast(y));
const iy = hy & 0x7fffffff;
if (ix < 0x7f800000 and iy < 0x7f800000) {
@@ -89,14 +89,14 @@ fn cosh64(z: Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
- const fx = @bitCast(u64, x);
- const hx = @intCast(u32, fx >> 32);
- const lx = @truncate(u32, fx);
+ const fx = @as(u64, @bitCast(x));
+ const hx = @as(u32, @intCast(fx >> 32));
+ const lx = @as(u32, @truncate(fx));
const ix = hx & 0x7fffffff;
- const fy = @bitCast(u64, y);
- const hy = @intCast(u32, fy >> 32);
- const ly = @truncate(u32, fy);
+ const fy = @as(u64, @bitCast(y));
+ const hy = @as(u32, @intCast(fy >> 32));
+ const ly = @as(u32, @truncate(fy));
const iy = hy & 0x7fffffff;
// nearly non-exceptional case where x, y are finite
diff --git a/lib/std/math/complex/exp.zig b/lib/std/math/complex/exp.zig
index 84ee251d0e..4644ea4be7 100644
--- a/lib/std/math/complex/exp.zig
+++ b/lib/std/math/complex/exp.zig
@@ -30,13 +30,13 @@ fn exp32(z: Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
- const hy = @bitCast(u32, y) & 0x7fffffff;
+ const hy = @as(u32, @bitCast(y)) & 0x7fffffff;
// cexp(x + i0) = exp(x) + i0
if (hy == 0) {
return Complex(f32).init(@exp(x), y);
}
- const hx = @bitCast(u32, x);
+ const hx = @as(u32, @bitCast(x));
// cexp(0 + iy) = cos(y) + isin(y)
if ((hx & 0x7fffffff) == 0) {
return Complex(f32).init(@cos(y), @sin(y));
@@ -75,18 +75,18 @@ fn exp64(z: Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
- const fy = @bitCast(u64, y);
- const hy = @intCast(u32, (fy >> 32) & 0x7fffffff);
- const ly = @truncate(u32, fy);
+ const fy = @as(u64, @bitCast(y));
+ const hy = @as(u32, @intCast((fy >> 32) & 0x7fffffff));
+ const ly = @as(u32, @truncate(fy));
// cexp(x + i0) = exp(x) + i0
if (hy | ly == 0) {
return Complex(f64).init(@exp(x), y);
}
- const fx = @bitCast(u64, x);
- const hx = @intCast(u32, fx >> 32);
- const lx = @truncate(u32, fx);
+ const fx = @as(u64, @bitCast(x));
+ const hx = @as(u32, @intCast(fx >> 32));
+ const lx = @as(u32, @truncate(fx));
// cexp(0 + iy) = cos(y) + isin(y)
if ((hx & 0x7fffffff) | lx == 0) {
diff --git a/lib/std/math/complex/ldexp.zig b/lib/std/math/complex/ldexp.zig
index c196d4afe6..201b6305af 100644
--- a/lib/std/math/complex/ldexp.zig
+++ b/lib/std/math/complex/ldexp.zig
@@ -27,10 +27,10 @@ fn frexp_exp32(x: f32, expt: *i32) f32 {
const kln2 = 162.88958740; // k * ln2
const exp_x = @exp(x - kln2);
- const hx = @bitCast(u32, exp_x);
+ const hx = @as(u32, @bitCast(exp_x));
// TODO zig should allow this cast implicitly because it should know the value is in range
- expt.* = @intCast(i32, hx >> 23) - (0x7f + 127) + k;
- return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23));
+ expt.* = @as(i32, @intCast(hx >> 23)) - (0x7f + 127) + k;
+ return @as(f32, @bitCast((hx & 0x7fffff) | ((0x7f + 127) << 23)));
}
fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) {
@@ -39,10 +39,10 @@ fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) {
const exptf = expt + ex_expt;
const half_expt1 = @divTrunc(exptf, 2);
- const scale1 = @bitCast(f32, (0x7f + half_expt1) << 23);
+ const scale1 = @as(f32, @bitCast((0x7f + half_expt1) << 23));
const half_expt2 = exptf - half_expt1;
- const scale2 = @bitCast(f32, (0x7f + half_expt2) << 23);
+ const scale2 = @as(f32, @bitCast((0x7f + half_expt2) << 23));
return Complex(f32).init(
@cos(z.im) * exp_x * scale1 * scale2,
@@ -56,14 +56,14 @@ fn frexp_exp64(x: f64, expt: *i32) f64 {
const exp_x = @exp(x - kln2);
- const fx = @bitCast(u64, exp_x);
- const hx = @intCast(u32, fx >> 32);
- const lx = @truncate(u32, fx);
+ const fx = @as(u64, @bitCast(exp_x));
+ const hx = @as(u32, @intCast(fx >> 32));
+ const lx = @as(u32, @truncate(fx));
- expt.* = @intCast(i32, hx >> 20) - (0x3ff + 1023) + k;
+ expt.* = @as(i32, @intCast(hx >> 20)) - (0x3ff + 1023) + k;
const high_word = (hx & 0xfffff) | ((0x3ff + 1023) << 20);
- return @bitCast(f64, (@as(u64, high_word) << 32) | lx);
+ return @as(f64, @bitCast((@as(u64, high_word) << 32) | lx));
}
fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) {
@@ -72,10 +72,10 @@ fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) {
const exptf = @as(i64, expt + ex_expt);
const half_expt1 = @divTrunc(exptf, 2);
- const scale1 = @bitCast(f64, (0x3ff + half_expt1) << (20 + 32));
+ const scale1 = @as(f64, @bitCast((0x3ff + half_expt1) << (20 + 32)));
const half_expt2 = exptf - half_expt1;
- const scale2 = @bitCast(f64, (0x3ff + half_expt2) << (20 + 32));
+ const scale2 = @as(f64, @bitCast((0x3ff + half_expt2) << (20 + 32)));
return Complex(f64).init(
@cos(z.im) * exp_x * scale1 * scale2,
diff --git a/lib/std/math/complex/sinh.zig b/lib/std/math/complex/sinh.zig
index 9afb7faf30..c9ea0d04fc 100644
--- a/lib/std/math/complex/sinh.zig
+++ b/lib/std/math/complex/sinh.zig
@@ -26,10 +26,10 @@ fn sinh32(z: Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
- const hx = @bitCast(u32, x);
+ const hx = @as(u32, @bitCast(x));
const ix = hx & 0x7fffffff;
- const hy = @bitCast(u32, y);
+ const hy = @as(u32, @bitCast(y));
const iy = hy & 0x7fffffff;
if (ix < 0x7f800000 and iy < 0x7f800000) {
@@ -89,14 +89,14 @@ fn sinh64(z: Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
- const fx = @bitCast(u64, x);
- const hx = @intCast(u32, fx >> 32);
- const lx = @truncate(u32, fx);
+ const fx = @as(u64, @bitCast(x));
+ const hx = @as(u32, @intCast(fx >> 32));
+ const lx = @as(u32, @truncate(fx));
const ix = hx & 0x7fffffff;
- const fy = @bitCast(u64, y);
- const hy = @intCast(u32, fy >> 32);
- const ly = @truncate(u32, fy);
+ const fy = @as(u64, @bitCast(y));
+ const hy = @as(u32, @intCast(fy >> 32));
+ const ly = @as(u32, @truncate(fy));
const iy = hy & 0x7fffffff;
if (ix < 0x7ff00000 and iy < 0x7ff00000) {
diff --git a/lib/std/math/complex/sqrt.zig b/lib/std/math/complex/sqrt.zig
index 456d10aa85..fe2e8e6531 100644
--- a/lib/std/math/complex/sqrt.zig
+++ b/lib/std/math/complex/sqrt.zig
@@ -58,14 +58,14 @@ fn sqrt32(z: Complex(f32)) Complex(f32) {
if (dx >= 0) {
const t = @sqrt((dx + math.hypot(f64, dx, dy)) * 0.5);
return Complex(f32).init(
- @floatCast(f32, t),
- @floatCast(f32, dy / (2.0 * t)),
+ @as(f32, @floatCast(t)),
+ @as(f32, @floatCast(dy / (2.0 * t))),
);
} else {
const t = @sqrt((-dx + math.hypot(f64, dx, dy)) * 0.5);
return Complex(f32).init(
- @floatCast(f32, @fabs(y) / (2.0 * t)),
- @floatCast(f32, math.copysign(t, y)),
+ @as(f32, @floatCast(@fabs(y) / (2.0 * t))),
+ @as(f32, @floatCast(math.copysign(t, y))),
);
}
}
diff --git a/lib/std/math/complex/tanh.zig b/lib/std/math/complex/tanh.zig
index 92e197e308..a90f141741 100644
--- a/lib/std/math/complex/tanh.zig
+++ b/lib/std/math/complex/tanh.zig
@@ -24,7 +24,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
- const hx = @bitCast(u32, x);
+ const hx = @as(u32, @bitCast(x));
const ix = hx & 0x7fffffff;
if (ix >= 0x7f800000) {
@@ -32,7 +32,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) {
const r = if (y == 0) y else x * y;
return Complex(f32).init(x, r);
}
- const xx = @bitCast(f32, hx - 0x40000000);
+ const xx = @as(f32, @bitCast(hx - 0x40000000));
const r = if (math.isInf(y)) y else @sin(y) * @cos(y);
return Complex(f32).init(xx, math.copysign(@as(f32, 0.0), r));
}
@@ -62,11 +62,11 @@ fn tanh64(z: Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
- const fx = @bitCast(u64, x);
+ const fx = @as(u64, @bitCast(x));
// TODO: zig should allow this conversion implicitly because it can notice that the value necessarily
// fits in range.
- const hx = @intCast(u32, fx >> 32);
- const lx = @truncate(u32, fx);
+ const hx = @as(u32, @intCast(fx >> 32));
+ const lx = @as(u32, @truncate(fx));
const ix = hx & 0x7fffffff;
if (ix >= 0x7ff00000) {
@@ -75,7 +75,7 @@ fn tanh64(z: Complex(f64)) Complex(f64) {
return Complex(f64).init(x, r);
}
- const xx = @bitCast(f64, (@as(u64, hx - 0x40000000) << 32) | lx);
+ const xx = @as(f64, @bitCast((@as(u64, hx - 0x40000000) << 32) | lx));
const r = if (math.isInf(y)) y else @sin(y) * @cos(y);
return Complex(f64).init(xx, math.copysign(@as(f64, 0.0), r));
}
diff --git a/lib/std/math/copysign.zig b/lib/std/math/copysign.zig
index b5fd6d4d9a..3cefc0471f 100644
--- a/lib/std/math/copysign.zig
+++ b/lib/std/math/copysign.zig
@@ -7,9 +7,9 @@ pub fn copysign(magnitude: anytype, sign: @TypeOf(magnitude)) @TypeOf(magnitude)
const T = @TypeOf(magnitude);
const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const sign_bit_mask = @as(TBits, 1) << (@bitSizeOf(T) - 1);
- const mag = @bitCast(TBits, magnitude) & ~sign_bit_mask;
- const sgn = @bitCast(TBits, sign) & sign_bit_mask;
- return @bitCast(T, mag | sgn);
+ const mag = @as(TBits, @bitCast(magnitude)) & ~sign_bit_mask;
+ const sgn = @as(TBits, @bitCast(sign)) & sign_bit_mask;
+ return @as(T, @bitCast(mag | sgn));
}
test "math.copysign" {
diff --git a/lib/std/math/cosh.zig b/lib/std/math/cosh.zig
index d633f2fa0c..085d6fd2f9 100644
--- a/lib/std/math/cosh.zig
+++ b/lib/std/math/cosh.zig
@@ -29,9 +29,9 @@ pub fn cosh(x: anytype) @TypeOf(x) {
// = 1 + 0.5 * (exp(x) - 1) * (exp(x) - 1) / exp(x)
// = 1 + (x * x) / 2 + o(x^4)
fn cosh32(x: f32) f32 {
- const u = @bitCast(u32, x);
+ const u = @as(u32, @bitCast(x));
const ux = u & 0x7FFFFFFF;
- const ax = @bitCast(f32, ux);
+ const ax = @as(f32, @bitCast(ux));
// |x| < log(2)
if (ux < 0x3F317217) {
@@ -54,9 +54,9 @@ fn cosh32(x: f32) f32 {
}
fn cosh64(x: f64) f64 {
- const u = @bitCast(u64, x);
- const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1);
- const ax = @bitCast(f64, u & (maxInt(u64) >> 1));
+ const u = @as(u64, @bitCast(x));
+ const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
+ const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
// TODO: Shouldn't need this explicit check.
if (x == 0.0) {
diff --git a/lib/std/math/expm1.zig b/lib/std/math/expm1.zig
index 5c4052db56..8192573a88 100644
--- a/lib/std/math/expm1.zig
+++ b/lib/std/math/expm1.zig
@@ -38,7 +38,7 @@ fn expm1_32(x_: f32) f32 {
const Q2: f32 = 1.5807170421e-3;
var x = x_;
- const ux = @bitCast(u32, x);
+ const ux = @as(u32, @bitCast(x));
const hx = ux & 0x7FFFFFFF;
const sign = hx >> 31;
@@ -88,8 +88,8 @@ fn expm1_32(x_: f32) f32 {
kf += 0.5;
}
- k = @intFromFloat(i32, kf);
- const t = @floatFromInt(f32, k);
+ k = @as(i32, @intFromFloat(kf));
+ const t = @as(f32, @floatFromInt(k));
hi = x - t * ln2_hi;
lo = t * ln2_lo;
}
@@ -133,7 +133,7 @@ fn expm1_32(x_: f32) f32 {
}
}
- const twopk = @bitCast(f32, @intCast(u32, (0x7F +% k) << 23));
+ const twopk = @as(f32, @bitCast(@as(u32, @intCast((0x7F +% k) << 23))));
if (k < 0 or k > 56) {
var y = x - e + 1.0;
@@ -146,7 +146,7 @@ fn expm1_32(x_: f32) f32 {
return y - 1.0;
}
- const uf = @bitCast(f32, @intCast(u32, 0x7F -% k) << 23);
+ const uf = @as(f32, @bitCast(@as(u32, @intCast(0x7F -% k)) << 23));
if (k < 23) {
return (x - e + (1 - uf)) * twopk;
} else {
@@ -169,8 +169,8 @@ fn expm1_64(x_: f64) f64 {
const Q5: f64 = -2.01099218183624371326e-07;
var x = x_;
- const ux = @bitCast(u64, x);
- const hx = @intCast(u32, ux >> 32) & 0x7FFFFFFF;
+ const ux = @as(u64, @bitCast(x));
+ const hx = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF;
const sign = ux >> 63;
if (math.isNegativeInf(x)) {
@@ -219,8 +219,8 @@ fn expm1_64(x_: f64) f64 {
kf += 0.5;
}
- k = @intFromFloat(i32, kf);
- const t = @floatFromInt(f64, k);
+ k = @as(i32, @intFromFloat(kf));
+ const t = @as(f64, @floatFromInt(k));
hi = x - t * ln2_hi;
lo = t * ln2_lo;
}
@@ -231,7 +231,7 @@ fn expm1_64(x_: f64) f64 {
// |x| < 2^(-54)
else if (hx < 0x3C900000) {
if (hx < 0x00100000) {
- math.doNotOptimizeAway(@floatCast(f32, x));
+ math.doNotOptimizeAway(@as(f32, @floatCast(x)));
}
return x;
} else {
@@ -264,7 +264,7 @@ fn expm1_64(x_: f64) f64 {
}
}
- const twopk = @bitCast(f64, @intCast(u64, 0x3FF +% k) << 52);
+ const twopk = @as(f64, @bitCast(@as(u64, @intCast(0x3FF +% k)) << 52));
if (k < 0 or k > 56) {
var y = x - e + 1.0;
@@ -277,7 +277,7 @@ fn expm1_64(x_: f64) f64 {
return y - 1.0;
}
- const uf = @bitCast(f64, @intCast(u64, 0x3FF -% k) << 52);
+ const uf = @as(f64, @bitCast(@as(u64, @intCast(0x3FF -% k)) << 52));
if (k < 20) {
return (x - e + (1 - uf)) * twopk;
} else {
diff --git a/lib/std/math/expo2.zig b/lib/std/math/expo2.zig
index 4345233173..b451e46865 100644
--- a/lib/std/math/expo2.zig
+++ b/lib/std/math/expo2.zig
@@ -21,7 +21,7 @@ fn expo2f(x: f32) f32 {
const kln2 = 0x1.45C778p+7;
const u = (0x7F + k / 2) << 23;
- const scale = @bitCast(f32, u);
+ const scale = @as(f32, @bitCast(u));
return @exp(x - kln2) * scale * scale;
}
@@ -30,6 +30,6 @@ fn expo2d(x: f64) f64 {
const kln2 = 0x1.62066151ADD8BP+10;
const u = (0x3FF + k / 2) << 20;
- const scale = @bitCast(f64, @as(u64, u) << 32);
+ const scale = @as(f64, @bitCast(@as(u64, u) << 32));
return @exp(x - kln2) * scale * scale;
}
diff --git a/lib/std/math/float.zig b/lib/std/math/float.zig
index 768cc03285..5552ec5c9c 100644
--- a/lib/std/math/float.zig
+++ b/lib/std/math/float.zig
@@ -11,7 +11,7 @@ inline fn mantissaOne(comptime T: type) comptime_int {
inline fn reconstructFloat(comptime T: type, comptime exponent: comptime_int, comptime mantissa: comptime_int) T {
const TBits = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } });
const biased_exponent = @as(TBits, exponent + floatExponentMax(T));
- return @bitCast(T, (biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa));
+ return @as(T, @bitCast((biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa)));
}
/// Returns the number of bits in the exponent of floating point type T.
diff --git a/lib/std/math/frexp.zig b/lib/std/math/frexp.zig
index 31168d28d4..f295b959cb 100644
--- a/lib/std/math/frexp.zig
+++ b/lib/std/math/frexp.zig
@@ -38,8 +38,8 @@ pub fn frexp(x: anytype) Frexp(@TypeOf(x)) {
fn frexp32(x: f32) Frexp(f32) {
var result: Frexp(f32) = undefined;
- var y = @bitCast(u32, x);
- const e = @intCast(i32, y >> 23) & 0xFF;
+ var y = @as(u32, @bitCast(x));
+ const e = @as(i32, @intCast(y >> 23)) & 0xFF;
if (e == 0) {
if (x != 0) {
@@ -68,15 +68,15 @@ fn frexp32(x: f32) Frexp(f32) {
result.exponent = e - 0x7E;
y &= 0x807FFFFF;
y |= 0x3F000000;
- result.significand = @bitCast(f32, y);
+ result.significand = @as(f32, @bitCast(y));
return result;
}
fn frexp64(x: f64) Frexp(f64) {
var result: Frexp(f64) = undefined;
- var y = @bitCast(u64, x);
- const e = @intCast(i32, y >> 52) & 0x7FF;
+ var y = @as(u64, @bitCast(x));
+ const e = @as(i32, @intCast(y >> 52)) & 0x7FF;
if (e == 0) {
if (x != 0) {
@@ -105,15 +105,15 @@ fn frexp64(x: f64) Frexp(f64) {
result.exponent = e - 0x3FE;
y &= 0x800FFFFFFFFFFFFF;
y |= 0x3FE0000000000000;
- result.significand = @bitCast(f64, y);
+ result.significand = @as(f64, @bitCast(y));
return result;
}
fn frexp128(x: f128) Frexp(f128) {
var result: Frexp(f128) = undefined;
- var y = @bitCast(u128, x);
- const e = @intCast(i32, y >> 112) & 0x7FFF;
+ var y = @as(u128, @bitCast(x));
+ const e = @as(i32, @intCast(y >> 112)) & 0x7FFF;
if (e == 0) {
if (x != 0) {
@@ -142,7 +142,7 @@ fn frexp128(x: f128) Frexp(f128) {
result.exponent = e - 0x3FFE;
y &= 0x8000FFFFFFFFFFFFFFFFFFFFFFFFFFFF;
y |= 0x3FFE0000000000000000000000000000;
- result.significand = @bitCast(f128, y);
+ result.significand = @as(f128, @bitCast(y));
return result;
}
diff --git a/lib/std/math/hypot.zig b/lib/std/math/hypot.zig
index 981f6143fe..9fb569667b 100644
--- a/lib/std/math/hypot.zig
+++ b/lib/std/math/hypot.zig
@@ -25,8 +25,8 @@ pub fn hypot(comptime T: type, x: T, y: T) T {
}
fn hypot32(x: f32, y: f32) f32 {
- var ux = @bitCast(u32, x);
- var uy = @bitCast(u32, y);
+ var ux = @as(u32, @bitCast(x));
+ var uy = @as(u32, @bitCast(y));
ux &= maxInt(u32) >> 1;
uy &= maxInt(u32) >> 1;
@@ -36,8 +36,8 @@ fn hypot32(x: f32, y: f32) f32 {
uy = tmp;
}
- var xx = @bitCast(f32, ux);
- var yy = @bitCast(f32, uy);
+ var xx = @as(f32, @bitCast(ux));
+ var yy = @as(f32, @bitCast(uy));
if (uy == 0xFF << 23) {
return yy;
}
@@ -56,7 +56,7 @@ fn hypot32(x: f32, y: f32) f32 {
yy *= 0x1.0p-90;
}
- return z * @sqrt(@floatCast(f32, @as(f64, x) * x + @as(f64, y) * y));
+ return z * @sqrt(@as(f32, @floatCast(@as(f64, x) * x + @as(f64, y) * y)));
}
fn sq(hi: *f64, lo: *f64, x: f64) void {
@@ -69,8 +69,8 @@ fn sq(hi: *f64, lo: *f64, x: f64) void {
}
fn hypot64(x: f64, y: f64) f64 {
- var ux = @bitCast(u64, x);
- var uy = @bitCast(u64, y);
+ var ux = @as(u64, @bitCast(x));
+ var uy = @as(u64, @bitCast(y));
ux &= maxInt(u64) >> 1;
uy &= maxInt(u64) >> 1;
@@ -82,8 +82,8 @@ fn hypot64(x: f64, y: f64) f64 {
const ex = ux >> 52;
const ey = uy >> 52;
- var xx = @bitCast(f64, ux);
- var yy = @bitCast(f64, uy);
+ var xx = @as(f64, @bitCast(ux));
+ var yy = @as(f64, @bitCast(uy));
// hypot(inf, nan) == inf
if (ey == 0x7FF) {
diff --git a/lib/std/math/ilogb.zig b/lib/std/math/ilogb.zig
index 7c58be2ec5..735a2250c9 100644
--- a/lib/std/math/ilogb.zig
+++ b/lib/std/math/ilogb.zig
@@ -38,8 +38,8 @@ fn ilogbX(comptime T: type, x: T) i32 {
const absMask = signBit - 1;
- var u = @bitCast(Z, x) & absMask;
- var e = @intCast(i32, u >> significandBits);
+ var u = @as(Z, @bitCast(x)) & absMask;
+ var e = @as(i32, @intCast(u >> significandBits));
if (e == 0) {
if (u == 0) {
@@ -49,12 +49,12 @@ fn ilogbX(comptime T: type, x: T) i32 {
// offset sign bit, exponent bits, and integer bit (if present) + bias
const offset = 1 + exponentBits + @as(comptime_int, @intFromBool(T == f80)) - exponentBias;
- return offset - @intCast(i32, @clz(u));
+ return offset - @as(i32, @intCast(@clz(u)));
}
if (e == maxExponent) {
math.raiseInvalid();
- if (u > @bitCast(Z, math.inf(T))) {
+ if (u > @as(Z, @bitCast(math.inf(T)))) {
return fp_ilogbnan; // u is a NaN
} else return maxInt(i32);
}
diff --git a/lib/std/math/isfinite.zig b/lib/std/math/isfinite.zig
index 556f8a2378..36c6cdd062 100644
--- a/lib/std/math/isfinite.zig
+++ b/lib/std/math/isfinite.zig
@@ -7,7 +7,7 @@ pub fn isFinite(x: anytype) bool {
const T = @TypeOf(x);
const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const remove_sign = ~@as(TBits, 0) >> 1;
- return @bitCast(TBits, x) & remove_sign < @bitCast(TBits, math.inf(T));
+ return @as(TBits, @bitCast(x)) & remove_sign < @as(TBits, @bitCast(math.inf(T)));
}
test "math.isFinite" {
diff --git a/lib/std/math/isinf.zig b/lib/std/math/isinf.zig
index ac30470f31..9b3a0a8f4a 100644
--- a/lib/std/math/isinf.zig
+++ b/lib/std/math/isinf.zig
@@ -7,7 +7,7 @@ pub inline fn isInf(x: anytype) bool {
const T = @TypeOf(x);
const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const remove_sign = ~@as(TBits, 0) >> 1;
- return @bitCast(TBits, x) & remove_sign == @bitCast(TBits, math.inf(T));
+ return @as(TBits, @bitCast(x)) & remove_sign == @as(TBits, @bitCast(math.inf(T)));
}
/// Returns whether x is an infinity with a positive sign.
diff --git a/lib/std/math/isnormal.zig b/lib/std/math/isnormal.zig
index 08f848f5df..38b459b54e 100644
--- a/lib/std/math/isnormal.zig
+++ b/lib/std/math/isnormal.zig
@@ -15,7 +15,7 @@ pub fn isNormal(x: anytype) bool {
// The sign bit is removed because all ones would overflow into it.
// For f80, even though it has an explicit integer part stored,
// the exponent effectively takes priority if mismatching.
- const value = @bitCast(TBits, x) +% increment_exp;
+ const value = @as(TBits, @bitCast(x)) +% increment_exp;
return value & remove_sign >= (increment_exp << 1);
}
@@ -35,7 +35,7 @@ test "math.isNormal" {
try expect(!isNormal(@as(T, math.floatTrueMin(T))));
// largest subnormal
- try expect(!isNormal(@bitCast(T, ~(~@as(TBits, 0) << math.floatFractionalBits(T)))));
+ try expect(!isNormal(@as(T, @bitCast(~(~@as(TBits, 0) << math.floatFractionalBits(T))))));
// non-finite numbers
try expect(!isNormal(-math.inf(T)));
@@ -43,6 +43,6 @@ test "math.isNormal" {
try expect(!isNormal(math.nan(T)));
// overflow edge-case (described in implementation, also see #10133)
- try expect(!isNormal(@bitCast(T, ~@as(TBits, 0))));
+ try expect(!isNormal(@as(T, @bitCast(~@as(TBits, 0)))));
}
}
diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig
index 448e94f8e5..d32a8189b6 100644
--- a/lib/std/math/ldexp.zig
+++ b/lib/std/math/ldexp.zig
@@ -16,53 +16,53 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) {
const max_biased_exponent = 2 * math.floatExponentMax(T);
const mantissa_mask = @as(TBits, (1 << mantissa_bits) - 1);
- const repr = @bitCast(TBits, x);
+ const repr = @as(TBits, @bitCast(x));
const sign_bit = repr & (1 << (exponent_bits + mantissa_bits));
if (math.isNan(x) or !math.isFinite(x))
return x;
- var exponent: i32 = @intCast(i32, (repr << 1) >> (mantissa_bits + 1));
+ var exponent: i32 = @as(i32, @intCast((repr << 1) >> (mantissa_bits + 1)));
if (exponent == 0)
exponent += (@as(i32, exponent_bits) + @intFromBool(T == f80)) - @clz(repr << 1);
if (n >= 0) {
if (n > max_biased_exponent - exponent) {
// Overflow. Return +/- inf
- return @bitCast(T, @bitCast(TBits, math.inf(T)) | sign_bit);
+ return @as(T, @bitCast(@as(TBits, @bitCast(math.inf(T))) | sign_bit));
} else if (exponent + n <= 0) {
// Result is subnormal
- return @bitCast(T, (repr << @intCast(Log2Int(TBits), n)) | sign_bit);
+ return @as(T, @bitCast((repr << @as(Log2Int(TBits), @intCast(n))) | sign_bit));
} else if (exponent <= 0) {
// Result is normal, but needs shifting
- var result = @intCast(TBits, n + exponent) << mantissa_bits;
- result |= (repr << @intCast(Log2Int(TBits), 1 - exponent)) & mantissa_mask;
- return @bitCast(T, result | sign_bit);
+ var result = @as(TBits, @intCast(n + exponent)) << mantissa_bits;
+ result |= (repr << @as(Log2Int(TBits), @intCast(1 - exponent))) & mantissa_mask;
+ return @as(T, @bitCast(result | sign_bit));
}
// Result needs no shifting
- return @bitCast(T, repr + (@intCast(TBits, n) << mantissa_bits));
+ return @as(T, @bitCast(repr + (@as(TBits, @intCast(n)) << mantissa_bits)));
} else {
if (n <= -exponent) {
if (n < -(mantissa_bits + exponent))
- return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0
+ return @as(T, @bitCast(sign_bit)); // Severe underflow. Return +/- 0
// Result underflowed, we need to shift and round
- const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1));
+ const shift = @as(Log2Int(TBits), @intCast(@min(-n, -(exponent + n) + 1)));
const exact_tie: bool = @ctz(repr) == shift - 1;
var result = repr & mantissa_mask;
if (T != f80) // Include integer bit
result |= @as(TBits, @intFromBool(exponent > 0)) << fractional_bits;
- result = @intCast(TBits, (result >> (shift - 1)));
+ result = @as(TBits, @intCast((result >> (shift - 1))));
// Round result, including round-to-even for exact ties
result = ((result + 1) >> 1) & ~@as(TBits, @intFromBool(exact_tie));
- return @bitCast(T, result | sign_bit);
+ return @as(T, @bitCast(result | sign_bit));
}
// Result is exact, and needs no shifting
- return @bitCast(T, repr - (@intCast(TBits, -n) << mantissa_bits));
+ return @as(T, @bitCast(repr - (@as(TBits, @intCast(-n)) << mantissa_bits)));
}
}
@@ -105,8 +105,8 @@ test "math.ldexp" {
// Multiplications might flush the denormals to zero, esp. at
// runtime, so we manually construct the constants here instead.
const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
- const EightTimesTrueMin = @bitCast(T, @as(Z, 8));
- const TwoTimesTrueMin = @bitCast(T, @as(Z, 2));
+ const EightTimesTrueMin = @as(T, @bitCast(@as(Z, 8)));
+ const TwoTimesTrueMin = @as(T, @bitCast(@as(Z, 2)));
// subnormals -> subnormals
try expect(ldexp(math.floatTrueMin(T), 3) == EightTimesTrueMin);
diff --git a/lib/std/math/log.zig b/lib/std/math/log.zig
index c1a0f5c8e4..9f27130ce1 100644
--- a/lib/std/math/log.zig
+++ b/lib/std/math/log.zig
@@ -30,12 +30,12 @@ pub fn log(comptime T: type, base: T, x: T) T {
// TODO implement integer log without using float math
.Int => |IntType| switch (IntType.signedness) {
.signed => @compileError("log not implemented for signed integers"),
- .unsigned => return @intFromFloat(T, @floor(@log(@floatFromInt(f64, x)) / @log(float_base))),
+ .unsigned => return @as(T, @intFromFloat(@floor(@log(@as(f64, @floatFromInt(x))) / @log(float_base)))),
},
.Float => {
switch (T) {
- f32 => return @floatCast(f32, @log(@as(f64, x)) / @log(float_base)),
+ f32 => return @as(f32, @floatCast(@log(@as(f64, x)) / @log(float_base))),
f64 => return @log(x) / @log(float_base),
else => @compileError("log not implemented for " ++ @typeName(T)),
}
diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig
index 44e5a88445..785f11771c 100644
--- a/lib/std/math/log10.zig
+++ b/lib/std/math/log10.zig
@@ -49,9 +49,9 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) {
const bit_size = @typeInfo(T).Int.bits;
if (bit_size <= 8) {
- return @intCast(OutT, log10_int_u8(x));
+ return @as(OutT, @intCast(log10_int_u8(x)));
} else if (bit_size <= 16) {
- return @intCast(OutT, less_than_5(x));
+ return @as(OutT, @intCast(less_than_5(x)));
}
var val = x;
@@ -71,7 +71,7 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) {
log += 5;
}
- return @intCast(OutT, log + less_than_5(@intCast(u32, val)));
+ return @as(OutT, @intCast(log + less_than_5(@as(u32, @intCast(val)))));
}
fn pow10(comptime y: comptime_int) comptime_int {
@@ -134,7 +134,7 @@ inline fn less_than_5(x: u32) u32 {
}
fn oldlog10(x: anytype) u8 {
- return @intFromFloat(u8, @log10(@floatFromInt(f64, x)));
+ return @as(u8, @intFromFloat(@log10(@as(f64, @floatFromInt(x)))));
}
test "oldlog10 doesn't work" {
@@ -158,7 +158,7 @@ test "log10_int vs old implementation" {
inline for (int_types) |T| {
const last = @min(maxInt(T), 100_000);
for (1..last) |i| {
- const x = @intCast(T, i);
+ const x = @as(T, @intCast(i));
try testing.expectEqual(oldlog10(x), log10_int(x));
}
@@ -185,10 +185,10 @@ test "log10_int close to powers of 10" {
try testing.expectEqual(expected_max_ilog, log10_int(max_val));
for (0..(expected_max_ilog + 1)) |idx| {
- const i = @intCast(T, idx);
+ const i = @as(T, @intCast(idx));
const p: T = try math.powi(T, 10, i);
- const b = @intCast(Log2Int(T), i);
+ const b = @as(Log2Int(T), @intCast(i));
if (p >= 10) {
try testing.expectEqual(b - 1, log10_int(p - 9));
diff --git a/lib/std/math/log1p.zig b/lib/std/math/log1p.zig
index ad67955a8d..1f986a20c8 100644
--- a/lib/std/math/log1p.zig
+++ b/lib/std/math/log1p.zig
@@ -33,7 +33,7 @@ fn log1p_32(x: f32) f32 {
const Lg3: f32 = 0x91e9ee.0p-25;
const Lg4: f32 = 0xf89e26.0p-26;
- const u = @bitCast(u32, x);
+ const u = @as(u32, @bitCast(x));
var ix = u;
var k: i32 = 1;
var f: f32 = undefined;
@@ -72,9 +72,9 @@ fn log1p_32(x: f32) f32 {
if (k != 0) {
const uf = 1 + x;
- var iu = @bitCast(u32, uf);
+ var iu = @as(u32, @bitCast(uf));
iu += 0x3F800000 - 0x3F3504F3;
- k = @intCast(i32, iu >> 23) - 0x7F;
+ k = @as(i32, @intCast(iu >> 23)) - 0x7F;
// correction to avoid underflow in c / u
if (k < 25) {
@@ -86,7 +86,7 @@ fn log1p_32(x: f32) f32 {
// u into [sqrt(2)/2, sqrt(2)]
iu = (iu & 0x007FFFFF) + 0x3F3504F3;
- f = @bitCast(f32, iu) - 1;
+ f = @as(f32, @bitCast(iu)) - 1;
}
const s = f / (2.0 + f);
@@ -96,7 +96,7 @@ fn log1p_32(x: f32) f32 {
const t2 = z * (Lg1 + w * Lg3);
const R = t2 + t1;
const hfsq = 0.5 * f * f;
- const dk = @floatFromInt(f32, k);
+ const dk = @as(f32, @floatFromInt(k));
return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
}
@@ -112,8 +112,8 @@ fn log1p_64(x: f64) f64 {
const Lg6: f64 = 1.531383769920937332e-01;
const Lg7: f64 = 1.479819860511658591e-01;
- var ix = @bitCast(u64, x);
- var hx = @intCast(u32, ix >> 32);
+ var ix = @as(u64, @bitCast(x));
+ var hx = @as(u32, @intCast(ix >> 32));
var k: i32 = 1;
var c: f64 = undefined;
var f: f64 = undefined;
@@ -150,10 +150,10 @@ fn log1p_64(x: f64) f64 {
if (k != 0) {
const uf = 1 + x;
- const hu = @bitCast(u64, uf);
- var iu = @intCast(u32, hu >> 32);
+ const hu = @as(u64, @bitCast(uf));
+ var iu = @as(u32, @intCast(hu >> 32));
iu += 0x3FF00000 - 0x3FE6A09E;
- k = @intCast(i32, iu >> 20) - 0x3FF;
+ k = @as(i32, @intCast(iu >> 20)) - 0x3FF;
// correction to avoid underflow in c / u
if (k < 54) {
@@ -166,7 +166,7 @@ fn log1p_64(x: f64) f64 {
// u into [sqrt(2)/2, sqrt(2)]
iu = (iu & 0x000FFFFF) + 0x3FE6A09E;
const iq = (@as(u64, iu) << 32) | (hu & 0xFFFFFFFF);
- f = @bitCast(f64, iq) - 1;
+ f = @as(f64, @bitCast(iq)) - 1;
}
const hfsq = 0.5 * f * f;
@@ -176,7 +176,7 @@ fn log1p_64(x: f64) f64 {
const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
const R = t2 + t1;
- const dk = @floatFromInt(f64, k);
+ const dk = @as(f64, @floatFromInt(k));
return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
}
diff --git a/lib/std/math/modf.zig b/lib/std/math/modf.zig
index d12c497729..b9d0083e3c 100644
--- a/lib/std/math/modf.zig
+++ b/lib/std/math/modf.zig
@@ -37,8 +37,8 @@ pub fn modf(x: anytype) modf_result(@TypeOf(x)) {
fn modf32(x: f32) modf32_result {
var result: modf32_result = undefined;
- const u = @bitCast(u32, x);
- const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
+ const u = @as(u32, @bitCast(x));
+ const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
const us = u & 0x80000000;
// TODO: Shouldn't need this.
@@ -54,26 +54,26 @@ fn modf32(x: f32) modf32_result {
if (e == 0x80 and u << 9 != 0) { // nan
result.fpart = x;
} else {
- result.fpart = @bitCast(f32, us);
+ result.fpart = @as(f32, @bitCast(us));
}
return result;
}
// no integral part
if (e < 0) {
- result.ipart = @bitCast(f32, us);
+ result.ipart = @as(f32, @bitCast(us));
result.fpart = x;
return result;
}
- const mask = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
+ const mask = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e));
if (u & mask == 0) {
result.ipart = x;
- result.fpart = @bitCast(f32, us);
+ result.fpart = @as(f32, @bitCast(us));
return result;
}
- const uf = @bitCast(f32, u & ~mask);
+ const uf = @as(f32, @bitCast(u & ~mask));
result.ipart = uf;
result.fpart = x - uf;
return result;
@@ -82,8 +82,8 @@ fn modf32(x: f32) modf32_result {
fn modf64(x: f64) modf64_result {
var result: modf64_result = undefined;
- const u = @bitCast(u64, x);
- const e = @intCast(i32, (u >> 52) & 0x7FF) - 0x3FF;
+ const u = @as(u64, @bitCast(x));
+ const e = @as(i32, @intCast((u >> 52) & 0x7FF)) - 0x3FF;
const us = u & (1 << 63);
if (math.isInf(x)) {
@@ -98,26 +98,26 @@ fn modf64(x: f64) modf64_result {
if (e == 0x400 and u << 12 != 0) { // nan
result.fpart = x;
} else {
- result.fpart = @bitCast(f64, us);
+ result.fpart = @as(f64, @bitCast(us));
}
return result;
}
// no integral part
if (e < 0) {
- result.ipart = @bitCast(f64, us);
+ result.ipart = @as(f64, @bitCast(us));
result.fpart = x;
return result;
}
- const mask = @as(u64, maxInt(u64) >> 12) >> @intCast(u6, e);
+ const mask = @as(u64, maxInt(u64) >> 12) >> @as(u6, @intCast(e));
if (u & mask == 0) {
result.ipart = x;
- result.fpart = @bitCast(f64, us);
+ result.fpart = @as(f64, @bitCast(us));
return result;
}
- const uf = @bitCast(f64, u & ~mask);
+ const uf = @as(f64, @bitCast(u & ~mask));
result.ipart = uf;
result.fpart = x - uf;
return result;
diff --git a/lib/std/math/pow.zig b/lib/std/math/pow.zig
index 7643e143e3..36aef966cf 100644
--- a/lib/std/math/pow.zig
+++ b/lib/std/math/pow.zig
@@ -144,7 +144,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
var xe = r2.exponent;
var x1 = r2.significand;
- var i = @intFromFloat(std.meta.Int(.signed, @typeInfo(T).Float.bits), yi);
+ var i = @as(std.meta.Int(.signed, @typeInfo(T).Float.bits), @intFromFloat(yi));
while (i != 0) : (i >>= 1) {
const overflow_shift = math.floatExponentBits(T) + 1;
if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {
@@ -179,7 +179,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
fn isOddInteger(x: f64) bool {
const r = math.modf(x);
- return r.fpart == 0.0 and @intFromFloat(i64, r.ipart) & 1 == 1;
+ return r.fpart == 0.0 and @as(i64, @intFromFloat(r.ipart)) & 1 == 1;
}
test "math.pow" {
diff --git a/lib/std/math/signbit.zig b/lib/std/math/signbit.zig
index 9aab487d37..df061568b1 100644
--- a/lib/std/math/signbit.zig
+++ b/lib/std/math/signbit.zig
@@ -6,7 +6,7 @@ const expect = std.testing.expect;
pub fn signbit(x: anytype) bool {
const T = @TypeOf(x);
const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
- return @bitCast(TBits, x) >> (@bitSizeOf(T) - 1) != 0;
+ return @as(TBits, @bitCast(x)) >> (@bitSizeOf(T) - 1) != 0;
}
test "math.signbit" {
diff --git a/lib/std/math/sinh.zig b/lib/std/math/sinh.zig
index 5ec47fa3b5..0082f61d3f 100644
--- a/lib/std/math/sinh.zig
+++ b/lib/std/math/sinh.zig
@@ -29,9 +29,9 @@ pub fn sinh(x: anytype) @TypeOf(x) {
// = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2
// = x + x^3 / 6 + o(x^5)
fn sinh32(x: f32) f32 {
- const u = @bitCast(u32, x);
+ const u = @as(u32, @bitCast(x));
const ux = u & 0x7FFFFFFF;
- const ax = @bitCast(f32, ux);
+ const ax = @as(f32, @bitCast(ux));
if (x == 0.0 or math.isNan(x)) {
return x;
@@ -60,9 +60,9 @@ fn sinh32(x: f32) f32 {
}
fn sinh64(x: f64) f64 {
- const u = @bitCast(u64, x);
- const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1);
- const ax = @bitCast(f64, u & (maxInt(u64) >> 1));
+ const u = @as(u64, @bitCast(x));
+ const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
+ const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
if (x == 0.0 or math.isNan(x)) {
return x;
diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig
index 926582034e..0dd5381cd9 100644
--- a/lib/std/math/sqrt.zig
+++ b/lib/std/math/sqrt.zig
@@ -57,7 +57,7 @@ fn sqrt_int(comptime T: type, value: T) Sqrt(T) {
one >>= 2;
}
- return @intCast(Sqrt(T), res);
+ return @as(Sqrt(T), @intCast(res));
}
}
diff --git a/lib/std/math/tanh.zig b/lib/std/math/tanh.zig
index dcde79a925..9c9a3e6801 100644
--- a/lib/std/math/tanh.zig
+++ b/lib/std/math/tanh.zig
@@ -29,9 +29,9 @@ pub fn tanh(x: anytype) @TypeOf(x) {
// = (exp(2x) - 1) / (exp(2x) - 1 + 2)
// = (1 - exp(-2x)) / (exp(-2x) - 1 + 2)
fn tanh32(x: f32) f32 {
- const u = @bitCast(u32, x);
+ const u = @as(u32, @bitCast(x));
const ux = u & 0x7FFFFFFF;
- const ax = @bitCast(f32, ux);
+ const ax = @as(f32, @bitCast(ux));
const sign = (u >> 31) != 0;
var t: f32 = undefined;
@@ -66,10 +66,10 @@ fn tanh32(x: f32) f32 {
}
fn tanh64(x: f64) f64 {
- const u = @bitCast(u64, x);
+ const u = @as(u64, @bitCast(x));
const ux = u & 0x7FFFFFFFFFFFFFFF;
- const w = @intCast(u32, ux >> 32);
- const ax = @bitCast(f64, ux);
+ const w = @as(u32, @intCast(ux >> 32));
+ const ax = @as(f64, @bitCast(ux));
const sign = (u >> 63) != 0;
var t: f64 = undefined;
@@ -96,7 +96,7 @@ fn tanh64(x: f64) f64 {
}
// |x| is subnormal
else {
- math.doNotOptimizeAway(@floatCast(f32, ax));
+ math.doNotOptimizeAway(@as(f32, @floatCast(ax)));
t = ax;
}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index bbeecdda23..229bc0b63e 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -69,7 +69,7 @@ pub fn ValidationAllocator(comptime T: type) type {
ret_addr: usize,
) ?[*]u8 {
assert(n > 0);
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
const underlying = self.getUnderlyingAllocatorPtr();
const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse
return null;
@@ -84,7 +84,7 @@ pub fn ValidationAllocator(comptime T: type) type {
new_len: usize,
ret_addr: usize,
) bool {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
assert(buf.len > 0);
const underlying = self.getUnderlyingAllocatorPtr();
return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr);
@@ -96,7 +96,7 @@ pub fn ValidationAllocator(comptime T: type) type {
log2_buf_align: u8,
ret_addr: usize,
) void {
- const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+ const self: *Self = @ptrCast(@alignCast(ctx));
assert(buf.len > 0);
const underlying = self.getUnderlyingAllocatorPtr();
underlying.rawFree(buf, log2_buf_align, ret_addr);
@@ -169,7 +169,7 @@ test "Allocator.resize" {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
- for (values, 0..) |*v, i| v.* = @intCast(T, i);
+ for (values, 0..) |*v, i| v.* = @as(T, @intCast(i));
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
@@ -185,7 +185,7 @@ test "Allocator.resize" {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
- for (values, 0..) |*v, i| v.* = @floatFromInt(T, i);
+ for (values, 0..) |*v, i| v.* = @as(T, @floatFromInt(i));
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
@@ -233,7 +233,7 @@ pub fn zeroes(comptime T: type) T {
return @as(T, 0);
},
.Enum, .EnumLiteral => {
- return @enumFromInt(T, 0);
+ return @as(T, @enumFromInt(0));
},
.Void => {
return {};
@@ -264,7 +264,7 @@ pub fn zeroes(comptime T: type) T {
switch (ptr_info.size) {
.Slice => {
if (ptr_info.sentinel) |sentinel| {
- if (ptr_info.child == u8 and @ptrCast(*const u8, sentinel).* == 0) {
+ if (ptr_info.child == u8 and @as(*const u8, @ptrCast(sentinel)).* == 0) {
return ""; // A special case for the most common use-case: null-terminated strings.
}
@compileError("Can't set a sentinel slice to zero. This would require allocating memory.");
@@ -282,7 +282,7 @@ pub fn zeroes(comptime T: type) T {
},
.Array => |info| {
if (info.sentinel) |sentinel_ptr| {
- const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*;
+ const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*;
return [_:sentinel]info.child{zeroes(info.child)} ** info.len;
}
return [_]info.child{zeroes(info.child)} ** info.len;
@@ -456,7 +456,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T {
},
}
} else if (field.default_value) |default_value_ptr| {
- const default_value = @ptrCast(*align(1) const field.type, default_value_ptr).*;
+ const default_value = @as(*align(1) const field.type, @ptrCast(default_value_ptr)).*;
@field(value, field.name) = default_value;
} else {
switch (@typeInfo(field.type)) {
@@ -709,7 +709,7 @@ pub fn span(ptr: anytype) Span(@TypeOf(ptr)) {
const l = len(ptr);
const ptr_info = @typeInfo(Result).Pointer;
if (ptr_info.sentinel) |s_ptr| {
- const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*;
+ const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*;
return ptr[0..l :s];
} else {
return ptr[0..l];
@@ -740,7 +740,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type {
// to find the value searched for, which is only the case if it matches
// the sentinel of the type passed.
if (array_info.sentinel) |sentinel_ptr| {
- const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*;
+ const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*;
if (end == sentinel) {
new_ptr_info.sentinel = &end;
} else {
@@ -755,7 +755,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type {
// to find the value searched for, which is only the case if it matches
// the sentinel of the type passed.
if (ptr_info.sentinel) |sentinel_ptr| {
- const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*;
+ const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*;
if (end == sentinel) {
new_ptr_info.sentinel = &end;
} else {
@@ -793,7 +793,7 @@ pub fn sliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) SliceTo(@Typ
const length = lenSliceTo(ptr, end);
const ptr_info = @typeInfo(Result).Pointer;
if (ptr_info.sentinel) |s_ptr| {
- const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*;
+ const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*;
return ptr[0..length :s];
} else {
return ptr[0..length];
@@ -810,11 +810,11 @@ test "sliceTo" {
try testing.expectEqualSlices(u16, array[0..2], sliceTo(&array, 3));
try testing.expectEqualSlices(u16, array[0..2], sliceTo(array[0..3], 3));
- const sentinel_ptr = @ptrCast([*:5]u16, &array);
+ const sentinel_ptr = @as([*:5]u16, @ptrCast(&array));
try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_ptr, 3));
try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_ptr, 99));
- const optional_sentinel_ptr = @ptrCast(?[*:5]u16, &array);
+ const optional_sentinel_ptr = @as(?[*:5]u16, @ptrCast(&array));
try testing.expectEqualSlices(u16, array[0..2], sliceTo(optional_sentinel_ptr, 3).?);
try testing.expectEqualSlices(u16, array[0..4], sliceTo(optional_sentinel_ptr, 99).?);
@@ -846,7 +846,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize {
.One => switch (@typeInfo(ptr_info.child)) {
.Array => |array_info| {
if (array_info.sentinel) |sentinel_ptr| {
- const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*;
+ const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*;
if (sentinel == end) {
return indexOfSentinel(array_info.child, end, ptr);
}
@@ -856,7 +856,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize {
else => {},
},
.Many => if (ptr_info.sentinel) |sentinel_ptr| {
- const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*;
+ const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*;
// We may be looking for something other than the sentinel,
// but iterating past the sentinel would be a bug so we need
// to check for both.
@@ -870,7 +870,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize {
},
.Slice => {
if (ptr_info.sentinel) |sentinel_ptr| {
- const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*;
+ const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*;
if (sentinel == end) {
return indexOfSentinel(ptr_info.child, sentinel, ptr);
}
@@ -893,7 +893,7 @@ test "lenSliceTo" {
try testing.expectEqual(@as(usize, 2), lenSliceTo(&array, 3));
try testing.expectEqual(@as(usize, 2), lenSliceTo(array[0..3], 3));
- const sentinel_ptr = @ptrCast([*:5]u16, &array);
+ const sentinel_ptr = @as([*:5]u16, @ptrCast(&array));
try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_ptr, 3));
try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_ptr, 99));
@@ -925,7 +925,7 @@ pub fn len(value: anytype) usize {
.Many => {
const sentinel_ptr = info.sentinel orelse
@compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value)));
- const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*;
+ const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*;
return indexOfSentinel(info.child, sentinel, value);
},
.C => {
@@ -1331,7 +1331,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian)
.Little => {
const ShiftType = math.Log2Int(ReturnType);
for (bytes, 0..) |b, index| {
- result = result | (@as(ReturnType, b) << @intCast(ShiftType, index * 8));
+ result = result | (@as(ReturnType, b) << @as(ShiftType, @intCast(index * 8)));
}
},
}
@@ -1359,8 +1359,8 @@ pub fn readVarPackedInt(
const Log2N = std.math.Log2Int(T);
const read_size = (bit_count + (bit_offset % 8) + 7) / 8;
- const bit_shift = @intCast(u3, bit_offset % 8);
- const pad = @intCast(Log2N, @bitSizeOf(T) - bit_count);
+ const bit_shift = @as(u3, @intCast(bit_offset % 8));
+ const pad = @as(Log2N, @intCast(@bitSizeOf(T) - bit_count));
const lowest_byte = switch (endian) {
.Big => bytes.len - (bit_offset / 8) - read_size,
@@ -1372,17 +1372,17 @@ pub fn readVarPackedInt(
// These are the same shifts/masks we perform below, but adds `@truncate`/`@intCast`
// where needed since int is smaller than a byte.
const value = if (read_size == 1) b: {
- break :b @truncate(uN, read_bytes[0] >> bit_shift);
+ break :b @as(uN, @truncate(read_bytes[0] >> bit_shift));
} else b: {
const i: u1 = @intFromBool(endian == .Big);
- const head = @truncate(uN, read_bytes[i] >> bit_shift);
- const tail_shift = @intCast(Log2N, @as(u4, 8) - bit_shift);
- const tail = @truncate(uN, read_bytes[1 - i]);
+ const head = @as(uN, @truncate(read_bytes[i] >> bit_shift));
+ const tail_shift = @as(Log2N, @intCast(@as(u4, 8) - bit_shift));
+ const tail = @as(uN, @truncate(read_bytes[1 - i]));
break :b (tail << tail_shift) | head;
};
switch (signedness) {
- .signed => return @intCast(T, (@bitCast(iN, value) << pad) >> pad),
- .unsigned => return @intCast(T, (@bitCast(uN, value) << pad) >> pad),
+ .signed => return @as(T, @intCast((@as(iN, @bitCast(value)) << pad) >> pad)),
+ .unsigned => return @as(T, @intCast((@as(uN, @bitCast(value)) << pad) >> pad)),
}
}
@@ -1398,13 +1398,13 @@ pub fn readVarPackedInt(
.Little => {
int = read_bytes[0] >> bit_shift;
for (read_bytes[1..], 0..) |elem, i| {
- int |= (@as(uN, elem) << @intCast(Log2N, (8 * (i + 1) - bit_shift)));
+ int |= (@as(uN, elem) << @as(Log2N, @intCast((8 * (i + 1) - bit_shift))));
}
},
}
switch (signedness) {
- .signed => return @intCast(T, (@bitCast(iN, int) << pad) >> pad),
- .unsigned => return @intCast(T, (@bitCast(uN, int) << pad) >> pad),
+ .signed => return @as(T, @intCast((@as(iN, @bitCast(int)) << pad) >> pad)),
+ .unsigned => return @as(T, @intCast((@as(uN, @bitCast(int)) << pad) >> pad)),
}
}
@@ -1414,7 +1414,7 @@ pub fn readVarPackedInt(
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
- return @ptrCast(*align(1) const T, bytes).*;
+ return @as(*align(1) const T, @ptrCast(bytes)).*;
}
/// Reads an integer from memory with bit count specified by T.
@@ -1480,10 +1480,10 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T
const Log2N = std.math.Log2Int(T);
const bit_count = @as(usize, @bitSizeOf(T));
- const bit_shift = @intCast(u3, bit_offset % 8);
+ const bit_shift = @as(u3, @intCast(bit_offset % 8));
const load_size = (bit_count + 7) / 8;
- const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count);
+ const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count));
const LoadInt = std.meta.Int(.unsigned, load_size * 8);
if (bit_count == 0)
@@ -1492,13 +1492,13 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T
// Read by loading a LoadInt, and then follow it up with a 1-byte read
// of the tail if bit_offset pushed us over a byte boundary.
const read_bytes = bytes[bit_offset / 8 ..];
- const val = @truncate(uN, readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift);
+ const val = @as(uN, @truncate(readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift));
if (bit_shift > load_tail_bits) {
- const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits);
+ const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits));
const tail_byte = read_bytes[load_size];
- const tail_truncated = if (bit_count < 8) @truncate(uN, tail_byte) else @as(uN, tail_byte);
- return @bitCast(T, val | (tail_truncated << (@truncate(Log2N, bit_count) -% tail_bits)));
- } else return @bitCast(T, val);
+ const tail_truncated = if (bit_count < 8) @as(uN, @truncate(tail_byte)) else @as(uN, tail_byte);
+ return @as(T, @bitCast(val | (tail_truncated << (@as(Log2N, @truncate(bit_count)) -% tail_bits))));
+ } else return @as(T, @bitCast(val));
}
fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
@@ -1506,11 +1506,11 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
const Log2N = std.math.Log2Int(T);
const bit_count = @as(usize, @bitSizeOf(T));
- const bit_shift = @intCast(u3, bit_offset % 8);
+ const bit_shift = @as(u3, @intCast(bit_offset % 8));
const byte_count = (@as(usize, bit_shift) + bit_count + 7) / 8;
const load_size = (bit_count + 7) / 8;
- const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count);
+ const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count));
const LoadInt = std.meta.Int(.unsigned, load_size * 8);
if (bit_count == 0)
@@ -1520,12 +1520,12 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
// of the tail if bit_offset pushed us over a byte boundary.
const end = bytes.len - (bit_offset / 8);
const read_bytes = bytes[(end - byte_count)..end];
- const val = @truncate(uN, readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift);
+ const val = @as(uN, @truncate(readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift));
if (bit_shift > load_tail_bits) {
- const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits);
- const tail_byte = if (bit_count < 8) @truncate(uN, read_bytes[0]) else @as(uN, read_bytes[0]);
- return @bitCast(T, val | (tail_byte << (@truncate(Log2N, bit_count) -% tail_bits)));
- } else return @bitCast(T, val);
+ const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits));
+ const tail_byte = if (bit_count < 8) @as(uN, @truncate(read_bytes[0])) else @as(uN, read_bytes[0]);
+ return @as(T, @bitCast(val | (tail_byte << (@as(Log2N, @truncate(bit_count)) -% tail_bits))));
+ } else return @as(T, @bitCast(val));
}
pub const readPackedIntNative = switch (native_endian) {
@@ -1605,7 +1605,7 @@ test "readIntBig and readIntLittle" {
/// This function stores in native endian, which means it is implemented as a simple
/// memory store.
pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void {
- @ptrCast(*align(1) T, buf).* = value;
+ @as(*align(1) T, @ptrCast(buf)).* = value;
}
/// Writes an integer to memory, storing it in twos-complement.
@@ -1642,10 +1642,10 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value:
const Log2N = std.math.Log2Int(T);
const bit_count = @as(usize, @bitSizeOf(T));
- const bit_shift = @intCast(u3, bit_offset % 8);
+ const bit_shift = @as(u3, @intCast(bit_offset % 8));
const store_size = (@bitSizeOf(T) + 7) / 8;
- const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count);
+ const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count));
const StoreInt = std.meta.Int(.unsigned, store_size * 8);
if (bit_count == 0)
@@ -1656,11 +1656,11 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value:
const write_bytes = bytes[bit_offset / 8 ..];
const head = write_bytes[0] & ((@as(u8, 1) << bit_shift) - 1);
- var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head);
+ var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head));
if (bit_shift > store_tail_bits) {
- const tail_len = @intCast(Log2N, bit_shift - store_tail_bits);
- write_bytes[store_size] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1);
- write_bytes[store_size] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len)));
+ const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits));
+ write_bytes[store_size] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1);
+ write_bytes[store_size] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len))));
} else if (bit_shift < store_tail_bits) {
const tail_len = store_tail_bits - bit_shift;
const tail = write_bytes[store_size - 1] & (@as(u8, 0xfe) << (7 - tail_len));
@@ -1675,11 +1675,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T)
const Log2N = std.math.Log2Int(T);
const bit_count = @as(usize, @bitSizeOf(T));
- const bit_shift = @intCast(u3, bit_offset % 8);
+ const bit_shift = @as(u3, @intCast(bit_offset % 8));
const byte_count = (bit_shift + bit_count + 7) / 8;
const store_size = (@bitSizeOf(T) + 7) / 8;
- const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count);
+ const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count));
const StoreInt = std.meta.Int(.unsigned, store_size * 8);
if (bit_count == 0)
@@ -1691,11 +1691,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T)
const write_bytes = bytes[(end - byte_count)..end];
const head = write_bytes[byte_count - 1] & ((@as(u8, 1) << bit_shift) - 1);
- var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head);
+ var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head));
if (bit_shift > store_tail_bits) {
- const tail_len = @intCast(Log2N, bit_shift - store_tail_bits);
- write_bytes[0] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1);
- write_bytes[0] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len)));
+ const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits));
+ write_bytes[0] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1);
+ write_bytes[0] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len))));
} else if (bit_shift < store_tail_bits) {
const tail_len = store_tail_bits - bit_shift;
const tail = write_bytes[0] & (@as(u8, 0xfe) << (7 - tail_len));
@@ -1744,14 +1744,14 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
return @memset(buffer, 0);
} else if (@typeInfo(T).Int.bits == 8) {
@memset(buffer, 0);
- buffer[0] = @bitCast(u8, value);
+ buffer[0] = @as(u8, @bitCast(value));
return;
}
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
- var bits = @bitCast(uint, value);
+ var bits = @as(uint, @bitCast(value));
for (buffer) |*b| {
- b.* = @truncate(u8, bits);
+ b.* = @as(u8, @truncate(bits));
bits >>= 8;
}
}
@@ -1768,17 +1768,17 @@ pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
return @memset(buffer, 0);
} else if (@typeInfo(T).Int.bits == 8) {
@memset(buffer, 0);
- buffer[buffer.len - 1] = @bitCast(u8, value);
+ buffer[buffer.len - 1] = @as(u8, @bitCast(value));
return;
}
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
- var bits = @bitCast(uint, value);
+ var bits = @as(uint, @bitCast(value));
var index: usize = buffer.len;
while (index != 0) {
index -= 1;
- buffer[index] = @truncate(u8, bits);
+ buffer[index] = @as(u8, @truncate(bits));
bits >>= 8;
}
}
@@ -1822,7 +1822,7 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
const Log2N = std.math.Log2Int(T);
- const bit_shift = @intCast(u3, bit_offset % 8);
+ const bit_shift = @as(u3, @intCast(bit_offset % 8));
const write_size = (bit_count + bit_shift + 7) / 8;
const lowest_byte = switch (endian) {
.Big => bytes.len - (bit_offset / 8) - write_size,
@@ -1833,8 +1833,8 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
if (write_size == 1) {
// Single byte writes are handled specially, since we need to mask bits
// on both ends of the byte.
- const mask = (@as(u8, 0xff) >> @intCast(u3, 8 - bit_count));
- const new_bits = @intCast(u8, @bitCast(uN, value) & mask) << bit_shift;
+ const mask = (@as(u8, 0xff) >> @as(u3, @intCast(8 - bit_count)));
+ const new_bits = @as(u8, @intCast(@as(uN, @bitCast(value)) & mask)) << bit_shift;
write_bytes[0] = (write_bytes[0] & ~(mask << bit_shift)) | new_bits;
return;
}
@@ -1843,31 +1843,31 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
// Iterate bytes forward for Little-endian, backward for Big-endian
const delta: i2 = if (endian == .Big) -1 else 1;
- const start = if (endian == .Big) @intCast(isize, write_bytes.len - 1) else 0;
+ const start = if (endian == .Big) @as(isize, @intCast(write_bytes.len - 1)) else 0;
var i: isize = start; // isize for signed index arithmetic
// Write first byte, using a mask to protects bits preceding bit_offset
const head_mask = @as(u8, 0xff) >> bit_shift;
- write_bytes[@intCast(usize, i)] &= ~(head_mask << bit_shift);
- write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & head_mask) << bit_shift;
- remaining >>= @intCast(Log2N, @as(u4, 8) - bit_shift);
+ write_bytes[@as(usize, @intCast(i))] &= ~(head_mask << bit_shift);
+ write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift;
+ remaining >>= @as(Log2N, @intCast(@as(u4, 8) - bit_shift));
i += delta;
// Write bytes[1..bytes.len - 1]
if (@bitSizeOf(T) > 8) {
- const loop_end = start + delta * (@intCast(isize, write_size) - 1);
+ const loop_end = start + delta * (@as(isize, @intCast(write_size)) - 1);
while (i != loop_end) : (i += delta) {
- write_bytes[@intCast(usize, i)] = @truncate(u8, @bitCast(uN, remaining));
+ write_bytes[@as(usize, @intCast(i))] = @as(u8, @truncate(@as(uN, @bitCast(remaining))));
remaining >>= 8;
}
}
// Write last byte, using a mask to protect bits following bit_offset + bit_count
- const following_bits = -%@truncate(u3, bit_shift + bit_count);
+ const following_bits = -%@as(u3, @truncate(bit_shift + bit_count));
const tail_mask = (@as(u8, 0xff) << following_bits) >> following_bits;
- write_bytes[@intCast(usize, i)] &= ~tail_mask;
- write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & tail_mask);
+ write_bytes[@as(usize, @intCast(i))] &= ~tail_mask;
+ write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & tail_mask));
}
test "writeIntBig and writeIntLittle" {
@@ -3799,15 +3799,14 @@ pub fn alignPointerOffset(ptr: anytype, align_to: usize) ?usize {
/// type.
pub fn alignPointer(ptr: anytype, align_to: usize) ?@TypeOf(ptr) {
const adjust_off = alignPointerOffset(ptr, align_to) orelse return null;
- const T = @TypeOf(ptr);
// Avoid the use of ptrFromInt to avoid losing the pointer provenance info.
- return @alignCast(@typeInfo(T).Pointer.alignment, ptr + adjust_off);
+ return @alignCast(ptr + adjust_off);
}
test "alignPointer" {
const S = struct {
fn checkAlign(comptime T: type, base: usize, align_to: usize, expected: usize) !void {
- var ptr = @ptrFromInt(T, base);
+ var ptr = @as(T, @ptrFromInt(base));
var aligned = alignPointer(ptr, align_to);
try testing.expectEqual(expected, @intFromPtr(aligned));
}
@@ -3854,9 +3853,7 @@ fn AsBytesReturnType(comptime P: type) type {
/// Given a pointer to a single item, returns a slice of the underlying bytes, preserving pointer attributes.
pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) {
- const P = @TypeOf(ptr);
- const T = AsBytesReturnType(P);
- return @ptrCast(T, @alignCast(meta.alignment(T), ptr));
+ return @ptrCast(@alignCast(ptr));
}
test "asBytes" {
@@ -3902,7 +3899,7 @@ test "asBytes" {
test "asBytes preserves pointer attributes" {
const inArr: u32 align(16) = 0xDEADBEEF;
- const inPtr = @ptrCast(*align(16) const volatile u32, &inArr);
+ const inPtr = @as(*align(16) const volatile u32, @ptrCast(&inArr));
const outSlice = asBytes(inPtr);
const in = @typeInfo(@TypeOf(inPtr)).Pointer;
@@ -3948,7 +3945,7 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type {
/// Given a pointer to an array of bytes, returns a pointer to a value of the specified type
/// backed by those bytes, preserving pointer attributes.
pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) {
- return @ptrCast(BytesAsValueReturnType(T, @TypeOf(bytes)), bytes);
+ return @as(BytesAsValueReturnType(T, @TypeOf(bytes)), @ptrCast(bytes));
}
test "bytesAsValue" {
@@ -3993,7 +3990,7 @@ test "bytesAsValue" {
test "bytesAsValue preserves pointer attributes" {
const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF };
- const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..];
+ const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..];
const outPtr = bytesAsValue(u32, inSlice);
const in = @typeInfo(@TypeOf(inSlice)).Pointer;
@@ -4043,7 +4040,7 @@ pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T,
const cast_target = CopyPtrAttrs(@TypeOf(bytes), .Many, T);
- return @ptrCast(cast_target, bytes)[0..@divExact(bytes.len, @sizeOf(T))];
+ return @as(cast_target, @ptrCast(bytes))[0..@divExact(bytes.len, @sizeOf(T))];
}
test "bytesAsSlice" {
@@ -4101,7 +4098,7 @@ test "bytesAsSlice with specified alignment" {
test "bytesAsSlice preserves pointer attributes" {
const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF };
- const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..];
+ const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..];
const outSlice = bytesAsSlice(u16, inSlice);
const in = @typeInfo(@TypeOf(inSlice)).Pointer;
@@ -4133,7 +4130,7 @@ pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) {
const cast_target = CopyPtrAttrs(Slice, .Many, u8);
- return @ptrCast(cast_target, slice)[0 .. slice.len * @sizeOf(meta.Elem(Slice))];
+ return @as(cast_target, @ptrCast(slice))[0 .. slice.len * @sizeOf(meta.Elem(Slice))];
}
test "sliceAsBytes" {
@@ -4197,7 +4194,7 @@ test "sliceAsBytes and bytesAsSlice back" {
test "sliceAsBytes preserves pointer attributes" {
const inArr align(16) = [2]u16{ 0xDEAD, 0xBEEF };
- const inSlice = @ptrCast(*align(16) const volatile [2]u16, &inArr)[0..];
+ const inSlice = @as(*align(16) const volatile [2]u16, @ptrCast(&inArr))[0..];
const outSlice = sliceAsBytes(inSlice);
const in = @typeInfo(@TypeOf(inSlice)).Pointer;
@@ -4218,7 +4215,7 @@ pub fn alignForward(comptime T: type, addr: T, alignment: T) T {
}
pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize {
- const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment);
+ const alignment = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_alignment));
return alignForward(usize, addr, alignment);
}
@@ -4282,7 +4279,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
/// .stage2_c doesn't support asm blocks yet, so use volatile stores instead
var deopt_target: if (builtin.zig_backend == .stage2_c) u8 else void = undefined;
fn doNotOptimizeAwayC(ptr: anytype) void {
- const dest = @ptrCast(*volatile u8, &deopt_target);
+ const dest = @as(*volatile u8, @ptrCast(&deopt_target));
for (asBytes(ptr)) |b| {
dest.* = b;
}
@@ -4433,7 +4430,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali
error.Overflow => return null,
};
const alignment_offset = begin_address_aligned - begin_address;
- return @alignCast(new_alignment, bytes[alignment_offset .. alignment_offset + new_length]);
+ return @alignCast(bytes[alignment_offset .. alignment_offset + new_length]);
}
/// Returns the largest sub-slice within the given slice that conforms to the new alignment,
@@ -4445,7 +4442,7 @@ pub fn alignInSlice(slice: anytype, comptime new_alignment: usize) ?AlignedSlice
const Element = @TypeOf(slice[0]);
const slice_length_bytes = aligned_bytes.len - (aligned_bytes.len % @sizeOf(Element));
const aligned_slice = bytesAsSlice(Element, aligned_bytes[0..slice_length_bytes]);
- return @alignCast(new_alignment, aligned_slice);
+ return @alignCast(aligned_slice);
}
test "read/write(Var)PackedInt" {
@@ -4490,8 +4487,8 @@ test "read/write(Var)PackedInt" {
for ([_]PackedType{
~@as(PackedType, 0), // all ones: -1 iN / maxInt uN
@as(PackedType, 0), // all zeros: 0 iN / 0 uN
- @bitCast(PackedType, @as(iPackedType, math.maxInt(iPackedType))), // maxInt iN
- @bitCast(PackedType, @as(iPackedType, math.minInt(iPackedType))), // maxInt iN
+ @as(PackedType, @bitCast(@as(iPackedType, math.maxInt(iPackedType)))), // maxInt iN
+ @as(PackedType, @bitCast(@as(iPackedType, math.minInt(iPackedType)))), // maxInt iN
random.int(PackedType), // random
random.int(PackedType), // random
}) |write_value| {
@@ -4502,11 +4499,11 @@ test "read/write(Var)PackedInt" {
// Read
const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, native_endian);
- try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+ try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
// Write
writePackedInt(PackedType, asBytes(&value), offset, write_value, native_endian);
- try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+ try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
// Read again
const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, native_endian);
@@ -4515,9 +4512,9 @@ test "read/write(Var)PackedInt" {
// Verify bits outside of the target integer are unmodified
const diff_bits = init_value ^ value;
if (offset != offset_at_end)
- try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
if (offset != 0)
- try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
}
{ // Fixed-size Read/Write (Foreign-endian)
@@ -4527,11 +4524,11 @@ test "read/write(Var)PackedInt" {
// Read
const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian);
- try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+ try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
// Write
writePackedInt(PackedType, asBytes(&value), offset, write_value, foreign_endian);
- try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+ try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
// Read again
const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian);
@@ -4540,9 +4537,9 @@ test "read/write(Var)PackedInt" {
// Verify bits outside of the target integer are unmodified
const diff_bits = init_value ^ @byteSwap(value);
if (offset != offset_at_end)
- try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
if (offset != 0)
- try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
}
const signedness = @typeInfo(PackedType).Int.signedness;
@@ -4559,11 +4556,11 @@ test "read/write(Var)PackedInt" {
// Read
const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness);
- try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+ try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
// Write
writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), native_endian);
- try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+ try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
// Read again
const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness);
@@ -4572,9 +4569,9 @@ test "read/write(Var)PackedInt" {
// Verify bits outside of the target integer are unmodified
const diff_bits = init_value ^ value;
if (offset != offset_at_end)
- try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
if (offset != 0)
- try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
}
{ // Variable-size Read/Write (Foreign-endian)
@@ -4587,11 +4584,11 @@ test "read/write(Var)PackedInt" {
// Read
const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness);
- try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+ try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
// Write
writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), foreign_endian);
- try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+ try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
// Read again
const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness);
@@ -4600,9 +4597,9 @@ test "read/write(Var)PackedInt" {
// Verify bits outside of the target integer are unmodified
const diff_bits = init_value ^ @byteSwap(value);
if (offset != offset_at_end)
- try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
if (offset != 0)
- try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
}
}
}
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index 301480f662..214a6443d2 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -101,7 +101,7 @@ pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr:
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: Allocator, comptime T: type) Error!*T {
- if (@sizeOf(T) == 0) return @ptrFromInt(*T, math.maxInt(usize));
+ if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize)));
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, @returnAddress());
return &slice[0];
}
@@ -112,7 +112,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
const info = @typeInfo(@TypeOf(ptr)).Pointer;
const T = info.child;
if (@sizeOf(T) == 0) return;
- const non_const_ptr = @ptrCast([*]u8, @constCast(ptr));
+ const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr)));
self.rawFree(non_const_ptr[0..@sizeOf(T)], math.log2(info.alignment), @returnAddress());
}
@@ -209,15 +209,15 @@ pub fn allocAdvancedWithRetAddr(
if (n == 0) {
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a);
- return @ptrFromInt([*]align(a) T, ptr)[0..0];
+ return @as([*]align(a) T, @ptrFromInt(ptr))[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory;
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(byte_ptr[0..byte_count], undefined);
- const byte_slice = byte_ptr[0..byte_count];
- return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
+ const byte_slice: []align(a) u8 = @alignCast(byte_ptr[0..byte_count]);
+ return mem.bytesAsSlice(T, byte_slice);
}
/// Requests to modify the size of an allocation. It is guaranteed to not move
@@ -268,7 +268,7 @@ pub fn reallocAdvanced(
if (new_n == 0) {
self.free(old_mem);
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment);
- return @ptrFromInt([*]align(Slice.alignment) T, ptr)[0..0];
+ return @as([*]align(Slice.alignment) T, @ptrFromInt(ptr))[0..0];
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
@@ -276,7 +276,8 @@ pub fn reallocAdvanced(
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
if (mem.isAligned(@intFromPtr(old_byte_slice.ptr), Slice.alignment)) {
if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) {
- return mem.bytesAsSlice(T, @alignCast(Slice.alignment, old_byte_slice.ptr[0..byte_count]));
+ const new_bytes: []align(Slice.alignment) u8 = @alignCast(old_byte_slice.ptr[0..byte_count]);
+ return mem.bytesAsSlice(T, new_bytes);
}
}
@@ -288,7 +289,8 @@ pub fn reallocAdvanced(
@memset(old_byte_slice, undefined);
self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
- return mem.bytesAsSlice(T, @alignCast(Slice.alignment, new_mem[0..byte_count]));
+ const new_bytes: []align(Slice.alignment) u8 = @alignCast(new_mem[0..byte_count]);
+ return mem.bytesAsSlice(T, new_bytes);
}
/// Free an array allocated with `alloc`. To free a single item,
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index fedbd1a40d..8fe0aee9fb 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -185,18 +185,18 @@ pub fn sentinel(comptime T: type) ?Elem(T) {
switch (@typeInfo(T)) {
.Array => |info| {
const sentinel_ptr = info.sentinel orelse return null;
- return @ptrCast(*const info.child, sentinel_ptr).*;
+ return @as(*const info.child, @ptrCast(sentinel_ptr)).*;
},
.Pointer => |info| {
switch (info.size) {
.Many, .Slice => {
const sentinel_ptr = info.sentinel orelse return null;
- return @ptrCast(*align(1) const info.child, sentinel_ptr).*;
+ return @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*;
},
.One => switch (@typeInfo(info.child)) {
.Array => |array_info| {
const sentinel_ptr = array_info.sentinel orelse return null;
- return @ptrCast(*align(1) const array_info.child, sentinel_ptr).*;
+ return @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*;
},
else => {},
},
@@ -241,7 +241,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.Array = .{
.len = array_info.len,
.child = array_info.child,
- .sentinel = @ptrCast(?*const anyopaque, &sentinel_val),
+ .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
},
}),
.is_allowzero = info.is_allowzero,
@@ -259,7 +259,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.address_space = info.address_space,
.child = info.child,
.is_allowzero = info.is_allowzero,
- .sentinel = @ptrCast(?*const anyopaque, &sentinel_val),
+ .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
},
}),
else => {},
@@ -277,7 +277,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
.address_space = ptr_info.address_space,
.child = ptr_info.child,
.is_allowzero = ptr_info.is_allowzero,
- .sentinel = @ptrCast(?*const anyopaque, &sentinel_val),
+ .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
},
}),
},
@@ -929,8 +929,8 @@ test "intToEnum with error return" {
try testing.expect(intToEnum(E1, zero) catch unreachable == E1.A);
try testing.expect(intToEnum(E2, one) catch unreachable == E2.B);
try testing.expect(intToEnum(E3, zero) catch unreachable == E3.A);
- try testing.expect(intToEnum(E3, 127) catch unreachable == @enumFromInt(E3, 127));
- try testing.expect(intToEnum(E3, -128) catch unreachable == @enumFromInt(E3, -128));
+ try testing.expect(intToEnum(E3, 127) catch unreachable == @as(E3, @enumFromInt(127)));
+ try testing.expect(intToEnum(E3, -128) catch unreachable == @as(E3, @enumFromInt(-128)));
try testing.expectError(error.InvalidEnumTag, intToEnum(E1, one));
try testing.expectError(error.InvalidEnumTag, intToEnum(E3, 128));
try testing.expectError(error.InvalidEnumTag, intToEnum(E3, -129));
@@ -943,7 +943,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa
if (!enum_info.is_exhaustive) {
if (std.math.cast(enum_info.tag_type, tag_int)) |tag| {
- return @enumFromInt(EnumTag, tag);
+ return @as(EnumTag, @enumFromInt(tag));
}
return error.InvalidEnumTag;
}
diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig
index cf37fc5adf..d028f88066 100644
--- a/lib/std/meta/trailer_flags.zig
+++ b/lib/std/meta/trailer_flags.zig
@@ -72,7 +72,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: FieldValues) void {
inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
if (@field(fields, field.name)) |value|
- self.set(p, @enumFromInt(FieldEnum, i), value);
+ self.set(p, @as(FieldEnum, @enumFromInt(i)), value);
}
}
@@ -89,14 +89,14 @@ pub fn TrailerFlags(comptime Fields: type) type {
if (@sizeOf(Field(field)) == 0)
return undefined;
const off = self.offset(field);
- return @ptrCast(*Field(field), @alignCast(@alignOf(Field(field)), p + off));
+ return @ptrCast(@alignCast(p + off));
}
pub fn ptrConst(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) *const Field(field) {
if (@sizeOf(Field(field)) == 0)
return undefined;
const off = self.offset(field);
- return @ptrCast(*const Field(field), @alignCast(@alignOf(Field(field)), p + off));
+ return @ptrCast(@alignCast(p + off));
}
pub fn offset(self: Self, comptime field: FieldEnum) usize {
diff --git a/lib/std/meta/trait.zig b/lib/std/meta/trait.zig
index 3d0e0bce5d..e00fac261c 100644
--- a/lib/std/meta/trait.zig
+++ b/lib/std/meta/trait.zig
@@ -237,7 +237,7 @@ pub fn isManyItemPtr(comptime T: type) bool {
test "isManyItemPtr" {
const array = [_]u8{0} ** 10;
- const mip = @ptrCast([*]const u8, &array[0]);
+ const mip = @as([*]const u8, @ptrCast(&array[0]));
try testing.expect(isManyItemPtr(@TypeOf(mip)));
try testing.expect(!isManyItemPtr(@TypeOf(array)));
try testing.expect(!isManyItemPtr(@TypeOf(array[0..1])));
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 26ba6cc919..ffbff62da2 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -78,7 +78,7 @@ pub fn MultiArrayList(comptime T: type) type {
const casted_ptr: [*]F = if (@sizeOf(F) == 0)
undefined
else
- @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr));
+ @ptrCast(@alignCast(byte_ptr));
return casted_ptr[0..self.len];
}
@@ -89,14 +89,14 @@ pub fn MultiArrayList(comptime T: type) type {
else => unreachable,
};
inline for (fields, 0..) |field_info, i| {
- self.items(@enumFromInt(Field, i))[index] = @field(e, field_info.name);
+ self.items(@as(Field, @enumFromInt(i)))[index] = @field(e, field_info.name);
}
}
pub fn get(self: Slice, index: usize) T {
var result: Elem = undefined;
inline for (fields, 0..) |field_info, i| {
- @field(result, field_info.name) = self.items(@enumFromInt(Field, i))[index];
+ @field(result, field_info.name) = self.items(@as(Field, @enumFromInt(i)))[index];
}
return switch (@typeInfo(T)) {
.Struct => result,
@@ -110,10 +110,9 @@ pub fn MultiArrayList(comptime T: type) type {
return .{};
}
const unaligned_ptr = self.ptrs[sizes.fields[0]];
- const aligned_ptr = @alignCast(@alignOf(Elem), unaligned_ptr);
- const casted_ptr = @ptrCast([*]align(@alignOf(Elem)) u8, aligned_ptr);
+ const aligned_ptr: [*]align(@alignOf(Elem)) u8 = @alignCast(unaligned_ptr);
return .{
- .bytes = casted_ptr,
+ .bytes = aligned_ptr,
.len = self.len,
.capacity = self.capacity,
};
@@ -294,7 +293,7 @@ pub fn MultiArrayList(comptime T: type) type {
};
const slices = self.slice();
inline for (fields, 0..) |field_info, field_index| {
- const field_slice = slices.items(@enumFromInt(Field, field_index));
+ const field_slice = slices.items(@as(Field, @enumFromInt(field_index)));
var i: usize = self.len - 1;
while (i > index) : (i -= 1) {
field_slice[i] = field_slice[i - 1];
@@ -309,7 +308,7 @@ pub fn MultiArrayList(comptime T: type) type {
pub fn swapRemove(self: *Self, index: usize) void {
const slices = self.slice();
inline for (fields, 0..) |_, i| {
- const field_slice = slices.items(@enumFromInt(Field, i));
+ const field_slice = slices.items(@as(Field, @enumFromInt(i)));
field_slice[index] = field_slice[self.len - 1];
field_slice[self.len - 1] = undefined;
}
@@ -321,7 +320,7 @@ pub fn MultiArrayList(comptime T: type) type {
pub fn orderedRemove(self: *Self, index: usize) void {
const slices = self.slice();
inline for (fields, 0..) |_, field_index| {
- const field_slice = slices.items(@enumFromInt(Field, field_index));
+ const field_slice = slices.items(@as(Field, @enumFromInt(field_index)));
var i = index;
while (i < self.len - 1) : (i += 1) {
field_slice[i] = field_slice[i + 1];
@@ -358,7 +357,7 @@ pub fn MultiArrayList(comptime T: type) type {
const self_slice = self.slice();
inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
- const field = @enumFromInt(Field, i);
+ const field = @as(Field, @enumFromInt(i));
const dest_slice = self_slice.items(field)[new_len..];
// We use memset here for more efficient codegen in safety-checked,
// valgrind-enabled builds. Otherwise the valgrind client request
@@ -379,7 +378,7 @@ pub fn MultiArrayList(comptime T: type) type {
const other_slice = other.slice();
inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
- const field = @enumFromInt(Field, i);
+ const field = @as(Field, @enumFromInt(i));
@memcpy(other_slice.items(field), self_slice.items(field));
}
}
@@ -440,7 +439,7 @@ pub fn MultiArrayList(comptime T: type) type {
const other_slice = other.slice();
inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
- const field = @enumFromInt(Field, i);
+ const field = @as(Field, @enumFromInt(i));
@memcpy(other_slice.items(field), self_slice.items(field));
}
}
@@ -459,7 +458,7 @@ pub fn MultiArrayList(comptime T: type) type {
const result_slice = result.slice();
inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
- const field = @enumFromInt(Field, i);
+ const field = @as(Field, @enumFromInt(i));
@memcpy(result_slice.items(field), self_slice.items(field));
}
}
@@ -476,7 +475,7 @@ pub fn MultiArrayList(comptime T: type) type {
pub fn swap(sc: @This(), a_index: usize, b_index: usize) void {
inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
- const field = @enumFromInt(Field, i);
+ const field = @as(Field, @enumFromInt(i));
const ptr = sc.slice.items(field);
mem.swap(field_info.type, &ptr[a_index], &ptr[b_index]);
}
@@ -592,9 +591,9 @@ test "basic usage" {
var i: usize = 0;
while (i < 6) : (i += 1) {
try list.append(ally, .{
- .a = @intCast(u32, 4 + i),
+ .a = @as(u32, @intCast(4 + i)),
.b = "whatever",
- .c = @intCast(u8, 'd' + i),
+ .c = @as(u8, @intCast('d' + i)),
});
}
@@ -791,7 +790,7 @@ test "union" {
// Add 6 more things to force a capacity increase.
for (0..6) |i| {
- try list.append(ally, .{ .a = @intCast(u32, 4 + i) });
+ try list.append(ally, .{ .a = @as(u32, @intCast(4 + i)) });
}
try testing.expectEqualSlices(
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 0f8ecbf21e..af291f6414 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -137,8 +137,8 @@ pub const Address = extern union {
/// on the address family.
pub fn initPosix(addr: *align(4) const os.sockaddr) Address {
switch (addr.family) {
- os.AF.INET => return Address{ .in = Ip4Address{ .sa = @ptrCast(*const os.sockaddr.in, addr).* } },
- os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @ptrCast(*const os.sockaddr.in6, addr).* } },
+ os.AF.INET => return Address{ .in = Ip4Address{ .sa = @as(*const os.sockaddr.in, @ptrCast(addr)).* } },
+ os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @as(*const os.sockaddr.in6, @ptrCast(addr)).* } },
else => unreachable,
}
}
@@ -165,8 +165,8 @@ pub const Address = extern union {
}
pub fn eql(a: Address, b: Address) bool {
- const a_bytes = @ptrCast([*]const u8, &a.any)[0..a.getOsSockLen()];
- const b_bytes = @ptrCast([*]const u8, &b.any)[0..b.getOsSockLen()];
+ const a_bytes = @as([*]const u8, @ptrCast(&a.any))[0..a.getOsSockLen()];
+ const b_bytes = @as([*]const u8, @ptrCast(&b.any))[0..b.getOsSockLen()];
return mem.eql(u8, a_bytes, b_bytes);
}
@@ -187,7 +187,7 @@ pub const Address = extern union {
// provide the full buffer size (e.g. getsockname, getpeername, recvfrom, accept).
//
// To access the path, std.mem.sliceTo(&address.un.path, 0) should be used.
- return @intCast(os.socklen_t, @sizeOf(os.sockaddr.un));
+ return @as(os.socklen_t, @intCast(@sizeOf(os.sockaddr.un)));
},
else => unreachable,
@@ -260,7 +260,7 @@ pub const Ip4Address = extern struct {
return Ip4Address{
.sa = os.sockaddr.in{
.port = mem.nativeToBig(u16, port),
- .addr = @ptrCast(*align(1) const u32, &addr).*,
+ .addr = @as(*align(1) const u32, @ptrCast(&addr)).*,
},
};
}
@@ -285,7 +285,7 @@ pub const Ip4Address = extern struct {
) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
_ = options;
- const bytes = @ptrCast(*const [4]u8, &self.sa.addr);
+ const bytes = @as(*const [4]u8, @ptrCast(&self.sa.addr));
try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{
bytes[0],
bytes[1],
@@ -354,9 +354,9 @@ pub const Ip6Address = extern struct {
if (index == 14) {
return error.InvalidEnd;
}
- ip_slice[index] = @truncate(u8, x >> 8);
+ ip_slice[index] = @as(u8, @truncate(x >> 8));
index += 1;
- ip_slice[index] = @truncate(u8, x);
+ ip_slice[index] = @as(u8, @truncate(x));
index += 1;
x = 0;
@@ -408,13 +408,13 @@ pub const Ip6Address = extern struct {
}
if (index == 14) {
- ip_slice[14] = @truncate(u8, x >> 8);
- ip_slice[15] = @truncate(u8, x);
+ ip_slice[14] = @as(u8, @truncate(x >> 8));
+ ip_slice[15] = @as(u8, @truncate(x));
return result;
} else {
- ip_slice[index] = @truncate(u8, x >> 8);
+ ip_slice[index] = @as(u8, @truncate(x >> 8));
index += 1;
- ip_slice[index] = @truncate(u8, x);
+ ip_slice[index] = @as(u8, @truncate(x));
index += 1;
@memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]);
return result;
@@ -473,9 +473,9 @@ pub const Ip6Address = extern struct {
if (index == 14) {
return error.InvalidEnd;
}
- ip_slice[index] = @truncate(u8, x >> 8);
+ ip_slice[index] = @as(u8, @truncate(x >> 8));
index += 1;
- ip_slice[index] = @truncate(u8, x);
+ ip_slice[index] = @as(u8, @truncate(x));
index += 1;
x = 0;
@@ -542,13 +542,13 @@ pub const Ip6Address = extern struct {
result.sa.scope_id = resolved_scope_id;
if (index == 14) {
- ip_slice[14] = @truncate(u8, x >> 8);
- ip_slice[15] = @truncate(u8, x);
+ ip_slice[14] = @as(u8, @truncate(x >> 8));
+ ip_slice[15] = @as(u8, @truncate(x));
return result;
} else {
- ip_slice[index] = @truncate(u8, x >> 8);
+ ip_slice[index] = @as(u8, @truncate(x >> 8));
index += 1;
- ip_slice[index] = @truncate(u8, x);
+ ip_slice[index] = @as(u8, @truncate(x));
index += 1;
@memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]);
return result;
@@ -597,7 +597,7 @@ pub const Ip6Address = extern struct {
});
return;
}
- const big_endian_parts = @ptrCast(*align(1) const [8]u16, &self.sa.addr);
+ const big_endian_parts = @as(*align(1) const [8]u16, @ptrCast(&self.sa.addr));
const native_endian_parts = switch (native_endian) {
.Big => big_endian_parts.*,
.Little => blk: {
@@ -668,7 +668,7 @@ fn if_nametoindex(name: []const u8) !u32 {
// TODO investigate if this needs to be integrated with evented I/O.
try os.ioctl_SIOCGIFINDEX(sockfd, &ifr);
- return @bitCast(u32, ifr.ifru.ivalue);
+ return @as(u32, @bitCast(ifr.ifru.ivalue));
}
if (comptime builtin.target.os.tag.isDarwin()) {
@@ -682,7 +682,7 @@ fn if_nametoindex(name: []const u8) !u32 {
const index = os.system.if_nametoindex(if_slice);
if (index == 0)
return error.InterfaceNotFound;
- return @bitCast(u32, index);
+ return @as(u32, @bitCast(index));
}
@compileError("std.net.if_nametoindex unimplemented for this OS");
@@ -804,8 +804,8 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
var first = true;
while (true) {
const rc = ws2_32.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res);
- switch (@enumFromInt(os.windows.ws2_32.WinsockError, @intCast(u16, rc))) {
- @enumFromInt(os.windows.ws2_32.WinsockError, 0) => break,
+ switch (@as(os.windows.ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(rc))))) {
+ @as(os.windows.ws2_32.WinsockError, @enumFromInt(0)) => break,
.WSATRY_AGAIN => return error.TemporaryNameServerFailure,
.WSANO_RECOVERY => return error.NameServerFailure,
.WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported,
@@ -841,7 +841,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
var i: usize = 0;
while (it) |info| : (it = info.next) {
const addr = info.addr orelse continue;
- result.addrs[i] = Address.initPosix(@alignCast(4, addr));
+ result.addrs[i] = Address.initPosix(@alignCast(addr));
if (info.canonname) |n| {
if (result.canon_name == null) {
@@ -874,7 +874,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
};
var res: ?*os.addrinfo = null;
switch (sys.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res)) {
- @enumFromInt(sys.EAI, 0) => {},
+ @as(sys.EAI, @enumFromInt(0)) => {},
.ADDRFAMILY => return error.HostLacksNetworkAddresses,
.AGAIN => return error.TemporaryNameServerFailure,
.BADFLAGS => unreachable, // Invalid hints
@@ -908,7 +908,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
var i: usize = 0;
while (it) |info| : (it = info.next) {
const addr = info.addr orelse continue;
- result.addrs[i] = Address.initPosix(@alignCast(4, addr));
+ result.addrs[i] = Address.initPosix(@alignCast(addr));
if (info.canonname) |n| {
if (result.canon_name == null) {
@@ -1020,7 +1020,7 @@ fn linuxLookupName(
for (addrs.items, 0..) |*addr, i| {
var key: i32 = 0;
var sa6: os.sockaddr.in6 = undefined;
- @memset(@ptrCast([*]u8, &sa6)[0..@sizeOf(os.sockaddr.in6)], 0);
+ @memset(@as([*]u8, @ptrCast(&sa6))[0..@sizeOf(os.sockaddr.in6)], 0);
var da6 = os.sockaddr.in6{
.family = os.AF.INET6,
.scope_id = addr.addr.in6.sa.scope_id,
@@ -1029,7 +1029,7 @@ fn linuxLookupName(
.addr = [1]u8{0} ** 16,
};
var sa4: os.sockaddr.in = undefined;
- @memset(@ptrCast([*]u8, &sa4)[0..@sizeOf(os.sockaddr.in)], 0);
+ @memset(@as([*]u8, @ptrCast(&sa4))[0..@sizeOf(os.sockaddr.in)], 0);
var da4 = os.sockaddr.in{
.family = os.AF.INET,
.port = 65535,
@@ -1042,18 +1042,18 @@ fn linuxLookupName(
var dalen: os.socklen_t = undefined;
if (addr.addr.any.family == os.AF.INET6) {
da6.addr = addr.addr.in6.sa.addr;
- da = @ptrCast(*os.sockaddr, &da6);
+ da = @ptrCast(&da6);
dalen = @sizeOf(os.sockaddr.in6);
- sa = @ptrCast(*os.sockaddr, &sa6);
+ sa = @ptrCast(&sa6);
salen = @sizeOf(os.sockaddr.in6);
} else {
sa6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*;
da6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*;
mem.writeIntNative(u32, da6.addr[12..], addr.addr.in.sa.addr);
da4.addr = addr.addr.in.sa.addr;
- da = @ptrCast(*os.sockaddr, &da4);
+ da = @ptrCast(&da4);
dalen = @sizeOf(os.sockaddr.in);
- sa = @ptrCast(*os.sockaddr, &sa4);
+ sa = @ptrCast(&sa4);
salen = @sizeOf(os.sockaddr.in);
}
const dpolicy = policyOf(da6.addr);
@@ -1070,7 +1070,7 @@ fn linuxLookupName(
os.getsockname(fd, sa, &salen) catch break :syscalls;
if (addr.addr.any.family == os.AF.INET) {
// TODO sa6.addr[12..16] should return *[4]u8, making this cast unnecessary.
- mem.writeIntNative(u32, @ptrCast(*[4]u8, &sa6.addr[12]), sa4.addr);
+ mem.writeIntNative(u32, @as(*[4]u8, @ptrCast(&sa6.addr[12])), sa4.addr);
}
if (dscope == @as(i32, scopeOf(sa6.addr))) key |= DAS_MATCHINGSCOPE;
if (dlabel == labelOf(sa6.addr)) key |= DAS_MATCHINGLABEL;
@@ -1079,7 +1079,7 @@ fn linuxLookupName(
key |= dprec << DAS_PREC_SHIFT;
key |= (15 - dscope) << DAS_SCOPE_SHIFT;
key |= prefixlen << DAS_PREFIX_SHIFT;
- key |= (MAXADDRS - @intCast(i32, i)) << DAS_ORDER_SHIFT;
+ key |= (MAXADDRS - @as(i32, @intCast(i))) << DAS_ORDER_SHIFT;
addr.sortkey = key;
}
mem.sort(LookupAddr, addrs.items, {}, addrCmpLessThan);
@@ -1171,7 +1171,7 @@ fn prefixMatch(s: [16]u8, d: [16]u8) u8 {
// address. However the definition of the source prefix length is
// not clear and thus this limiting is not yet implemented.
var i: u8 = 0;
- while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @intCast(u3, i % 8))) == 0) : (i += 1) {}
+ while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @as(u3, @intCast(i % 8)))) == 0) : (i += 1) {}
return i;
}
@@ -1577,7 +1577,7 @@ fn resMSendRc(
// Get local address and open/bind a socket
var sa: Address = undefined;
- @memset(@ptrCast([*]u8, &sa)[0..@sizeOf(Address)], 0);
+ @memset(@as([*]u8, @ptrCast(&sa))[0..@sizeOf(Address)], 0);
sa.any.family = family;
try os.bind(fd, &sa.any, sl);
@@ -1588,13 +1588,13 @@ fn resMSendRc(
}};
const retry_interval = timeout / attempts;
var next: u32 = 0;
- var t2: u64 = @bitCast(u64, std.time.milliTimestamp());
+ var t2: u64 = @as(u64, @bitCast(std.time.milliTimestamp()));
var t0 = t2;
var t1 = t2 - retry_interval;
var servfail_retry: usize = undefined;
- outer: while (t2 - t0 < timeout) : (t2 = @bitCast(u64, std.time.milliTimestamp())) {
+ outer: while (t2 - t0 < timeout) : (t2 = @as(u64, @bitCast(std.time.milliTimestamp()))) {
if (t2 - t1 >= retry_interval) {
// Query all configured nameservers in parallel
var i: usize = 0;
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 872aeef611..2c49bd9f49 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -494,7 +494,7 @@ pub fn getrandom(buffer: []u8) GetRandomError!void {
const res = if (use_c) blk: {
const rc = std.c.getrandom(buf.ptr, buf.len, 0);
break :blk .{
- .num_read = @bitCast(usize, rc),
+ .num_read = @as(usize, @bitCast(rc)),
.err = std.c.getErrno(rc),
};
} else blk: {
@@ -608,7 +608,7 @@ pub fn abort() noreturn {
sigprocmask(SIG.UNBLOCK, &sigabrtmask, null);
// Beyond this point should be unreachable.
- @ptrFromInt(*allowzero volatile u8, 0).* = 0;
+ @as(*allowzero volatile u8, @ptrFromInt(0)).* = 0;
raise(SIG.KILL) catch {};
exit(127); // Pid 1 might not be signalled in some containers.
}
@@ -678,10 +678,10 @@ pub fn exit(status: u8) noreturn {
// exit() is only available if exitBootServices() has not been called yet.
// This call to exit should not fail, so we don't care about its return value.
if (uefi.system_table.boot_services) |bs| {
- _ = bs.exit(uefi.handle, @enumFromInt(uefi.Status, status), 0, null);
+ _ = bs.exit(uefi.handle, @as(uefi.Status, @enumFromInt(status)), 0, null);
}
// If we can't exit, reboot the system instead.
- uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @enumFromInt(uefi.Status, status), 0, null);
+ uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @as(uefi.Status, @enumFromInt(status)), 0, null);
}
system.exit(status);
}
@@ -759,7 +759,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
while (true) {
const rc = system.read(fd, buf.ptr, adjusted_len);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.INVAL => unreachable,
.FAULT => unreachable,
@@ -818,7 +818,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
// TODO handle the case when iov_len is too large and get rid of this @intCast
const rc = system.readv(fd, iov.ptr, iov_count);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.INVAL => unreachable,
.FAULT => unreachable,
@@ -892,11 +892,11 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
const pread_sym = if (lfs64_abi) system.pread64 else system.pread;
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
while (true) {
const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.INVAL => unreachable,
.FAULT => unreachable,
@@ -929,7 +929,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
if (builtin.os.tag == .windows) {
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
var eof_info = windows.FILE_END_OF_FILE_INFORMATION{
- .EndOfFile = @bitCast(windows.LARGE_INTEGER, length),
+ .EndOfFile = @as(windows.LARGE_INTEGER, @bitCast(length)),
};
const rc = windows.ntdll.NtSetInformationFile(
@@ -965,7 +965,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
while (true) {
const ftruncate_sym = if (lfs64_abi) system.ftruncate64 else system.ftruncate;
- const ilen = @bitCast(i64, length); // the OS treats this as unsigned
+ const ilen = @as(i64, @bitCast(length)); // the OS treats this as unsigned
switch (errno(ftruncate_sym(fd, ilen))) {
.SUCCESS => return,
.INTR => continue,
@@ -1001,7 +1001,7 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
if (have_pread_but_not_preadv) {
// We could loop here; but proper usage of `preadv` must handle partial reads anyway.
// So we simply read into the first vector only.
- if (iov.len == 0) return @intCast(usize, 0);
+ if (iov.len == 0) return @as(usize, @intCast(0));
const first = iov[0];
return pread(fd, first.iov_base[0..first.iov_len], offset);
}
@@ -1030,11 +1030,11 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
const preadv_sym = if (lfs64_abi) system.preadv64 else system.preadv;
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
while (true) {
const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset);
switch (errno(rc)) {
- .SUCCESS => return @bitCast(usize, rc),
+ .SUCCESS => return @as(usize, @bitCast(rc)),
.INTR => continue,
.INVAL => unreachable,
.FAULT => unreachable,
@@ -1143,7 +1143,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
while (true) {
const rc = system.write(fd, bytes.ptr, adjusted_len);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.INVAL => return error.InvalidArgument,
.FAULT => unreachable,
@@ -1212,11 +1212,11 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
}
}
- const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len);
+ const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len));
while (true) {
const rc = system.writev(fd, iov.ptr, iov_count);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.INVAL => return error.InvalidArgument,
.FAULT => unreachable,
@@ -1304,11 +1304,11 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
const pwrite_sym = if (lfs64_abi) system.pwrite64 else system.pwrite;
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
while (true) {
const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.INVAL => return error.InvalidArgument,
.FAULT => unreachable,
@@ -1390,12 +1390,12 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
const pwritev_sym = if (lfs64_abi) system.pwritev64 else system.pwritev;
- const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len);
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len));
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
while (true) {
const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.INVAL => return error.InvalidArgument,
.FAULT => unreachable,
@@ -1504,7 +1504,7 @@ pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t
while (true) {
const rc = open_sym(file_path, flags, perm);
switch (errno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.INTR => continue,
.FAULT => unreachable,
@@ -1653,11 +1653,11 @@ fn openOptionsFromFlagsWasi(fd: fd_t, oflag: u32) OpenError!WasiOpenOptions {
rights &= fsb_cur.fs_rights_inheriting;
return WasiOpenOptions{
- .oflags = @truncate(w.oflags_t, (oflag >> 12)) & 0xfff,
+ .oflags = @as(w.oflags_t, @truncate((oflag >> 12))) & 0xfff,
.lookup_flags = if (oflag & O.NOFOLLOW == 0) w.LOOKUP_SYMLINK_FOLLOW else 0,
.fs_rights_base = rights,
.fs_rights_inheriting = fsb_cur.fs_rights_inheriting,
- .fs_flags = @truncate(w.fdflags_t, oflag & 0xfff),
+ .fs_flags = @as(w.fdflags_t, @truncate(oflag & 0xfff)),
};
}
@@ -1717,7 +1717,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t)
while (true) {
const rc = openat_sym(dir_fd, file_path, flags, mode);
switch (errno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.INTR => continue,
.FAULT => unreachable,
@@ -1765,7 +1765,7 @@ pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t)
pub fn dup(old_fd: fd_t) !fd_t {
const rc = system.dup(old_fd);
return switch (errno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.MFILE => error.ProcessFdQuotaExceeded,
.BADF => unreachable, // invalid file descriptor
else => |err| return unexpectedErrno(err),
@@ -2024,7 +2024,7 @@ pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 {
const err = if (builtin.link_libc) blk: {
const c_err = if (std.c.getcwd(out_buffer.ptr, out_buffer.len)) |_| 0 else std.c._errno().*;
- break :blk @enumFromInt(E, c_err);
+ break :blk @as(E, @enumFromInt(c_err));
} else blk: {
break :blk errno(system.getcwd(out_buffer.ptr, out_buffer.len));
};
@@ -2661,12 +2661,12 @@ pub fn renameatW(
const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2;
if (struct_len > struct_buf_len) return error.NameTooLong;
- const rename_info = @ptrCast(*windows.FILE_RENAME_INFORMATION, &rename_info_buf);
+ const rename_info = @as(*windows.FILE_RENAME_INFORMATION, @ptrCast(&rename_info_buf));
rename_info.* = .{
.ReplaceIfExists = ReplaceIfExists,
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(new_path_w)) null else new_dir_fd,
- .FileNameLength = @intCast(u32, new_path_w.len * 2), // already checked error.NameTooLong
+ .FileNameLength = @as(u32, @intCast(new_path_w.len * 2)), // already checked error.NameTooLong
.FileName = undefined,
};
@memcpy(@as([*]u16, &rename_info.FileName)[0..new_path_w.len], new_path_w);
@@ -2677,7 +2677,7 @@ pub fn renameatW(
src_fd,
&io_status_block,
rename_info,
- @intCast(u32, struct_len), // already checked for error.NameTooLong
+ @as(u32, @intCast(struct_len)), // already checked for error.NameTooLong
.FileRenameInformation,
);
@@ -3049,7 +3049,7 @@ pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8
}
const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len);
switch (errno(rc)) {
- .SUCCESS => return out_buffer[0..@bitCast(usize, rc)],
+ .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))],
.ACCES => return error.AccessDenied,
.FAULT => unreachable,
.INVAL => return error.NotLink,
@@ -3115,7 +3115,7 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read
}
const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len);
switch (errno(rc)) {
- .SUCCESS => return out_buffer[0..@bitCast(usize, rc)],
+ .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))],
.ACCES => return error.AccessDenied,
.FAULT => unreachable,
.INVAL => return error.NotLink,
@@ -3227,7 +3227,7 @@ pub fn isatty(handle: fd_t) bool {
if (builtin.os.tag == .linux) {
while (true) {
var wsz: linux.winsize = undefined;
- const fd = @bitCast(usize, @as(isize, handle));
+ const fd = @as(usize, @bitCast(@as(isize, handle)));
const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz));
switch (linux.getErrno(rc)) {
.SUCCESS => return true,
@@ -3271,14 +3271,14 @@ pub fn isCygwinPty(handle: fd_t) bool {
var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (name_bytes_offset + num_name_bytes);
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @intCast(u32, name_info_bytes.len), .FileNameInformation);
+ const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @as(u32, @intCast(name_info_bytes.len)), .FileNameInformation);
switch (rc) {
.SUCCESS => {},
.INVALID_PARAMETER => unreachable,
else => return false,
}
- const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
+ const name_info = @as(*const windows.FILE_NAME_INFO, @ptrCast(&name_info_bytes[0]));
const name_bytes = name_info_bytes[name_bytes_offset .. name_bytes_offset + @as(usize, name_info.FileNameLength)];
const name_wide = mem.bytesAsSlice(u16, name_bytes);
// Note: The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master
@@ -3325,9 +3325,9 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t
else
0;
const rc = try windows.WSASocketW(
- @bitCast(i32, domain),
- @bitCast(i32, filtered_sock_type),
- @bitCast(i32, protocol),
+ @as(i32, @bitCast(domain)),
+ @as(i32, @bitCast(filtered_sock_type)),
+ @as(i32, @bitCast(protocol)),
null,
0,
flags,
@@ -3353,7 +3353,7 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t
const rc = system.socket(domain, filtered_sock_type, protocol);
switch (errno(rc)) {
.SUCCESS => {
- const fd = @intCast(fd_t, rc);
+ const fd = @as(fd_t, @intCast(rc));
if (!have_sock_flags) {
try setSockFlags(fd, socket_type);
}
@@ -3679,7 +3679,7 @@ pub fn accept(
} else {
switch (errno(rc)) {
.SUCCESS => {
- break @intCast(socket_t, rc);
+ break @as(socket_t, @intCast(rc));
},
.INTR => continue,
.AGAIN => return error.WouldBlock,
@@ -3723,7 +3723,7 @@ pub const EpollCreateError = error{
pub fn epoll_create1(flags: u32) EpollCreateError!i32 {
const rc = system.epoll_create1(flags);
switch (errno(rc)) {
- .SUCCESS => return @intCast(i32, rc),
+ .SUCCESS => return @as(i32, @intCast(rc)),
else => |err| return unexpectedErrno(err),
.INVAL => unreachable,
@@ -3782,9 +3782,9 @@ pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*linux.epoll_event) EpollC
pub fn epoll_wait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize {
while (true) {
// TODO get rid of the @intCast
- const rc = system.epoll_wait(epfd, events.ptr, @intCast(u32, events.len), timeout);
+ const rc = system.epoll_wait(epfd, events.ptr, @as(u32, @intCast(events.len)), timeout);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.BADF => unreachable,
.FAULT => unreachable,
@@ -3803,7 +3803,7 @@ pub const EventFdError = error{
pub fn eventfd(initval: u32, flags: u32) EventFdError!i32 {
const rc = system.eventfd(initval, flags);
switch (errno(rc)) {
- .SUCCESS => return @intCast(i32, rc),
+ .SUCCESS => return @as(i32, @intCast(rc)),
else => |err| return unexpectedErrno(err),
.INVAL => unreachable, // invalid parameters
@@ -3937,7 +3937,7 @@ pub const ConnectError = error{
/// return error.WouldBlock when EAGAIN or EINPROGRESS is received.
pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void {
if (builtin.os.tag == .windows) {
- const rc = windows.ws2_32.connect(sock, sock_addr, @intCast(i32, len));
+ const rc = windows.ws2_32.connect(sock, sock_addr, @as(i32, @intCast(len)));
if (rc == 0) return;
switch (windows.ws2_32.WSAGetLastError()) {
.WSAEADDRINUSE => return error.AddressInUse,
@@ -3992,10 +3992,10 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne
pub fn getsockoptError(sockfd: fd_t) ConnectError!void {
var err_code: i32 = undefined;
var size: u32 = @sizeOf(u32);
- const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @ptrCast([*]u8, &err_code), &size);
+ const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @as([*]u8, @ptrCast(&err_code)), &size);
assert(size == 4);
switch (errno(rc)) {
- .SUCCESS => switch (@enumFromInt(E, err_code)) {
+ .SUCCESS => switch (@as(E, @enumFromInt(err_code))) {
.SUCCESS => return,
.ACCES => return error.PermissionDenied,
.PERM => return error.PermissionDenied,
@@ -4035,13 +4035,13 @@ pub const WaitPidResult = struct {
pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult {
const Status = if (builtin.link_libc) c_int else u32;
var status: Status = undefined;
- const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags;
+ const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags;
while (true) {
const rc = system.waitpid(pid, &status, coerced_flags);
switch (errno(rc)) {
.SUCCESS => return .{
- .pid = @intCast(pid_t, rc),
- .status = @bitCast(u32, status),
+ .pid = @as(pid_t, @intCast(rc)),
+ .status = @as(u32, @bitCast(status)),
},
.INTR => continue,
.CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error.
@@ -4054,13 +4054,13 @@ pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult {
pub fn wait4(pid: pid_t, flags: u32, ru: ?*rusage) WaitPidResult {
const Status = if (builtin.link_libc) c_int else u32;
var status: Status = undefined;
- const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags;
+ const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags;
while (true) {
const rc = system.wait4(pid, &status, coerced_flags, ru);
switch (errno(rc)) {
.SUCCESS => return .{
- .pid = @intCast(pid_t, rc),
- .status = @bitCast(u32, status),
+ .pid = @as(pid_t, @intCast(rc)),
+ .status = @as(u32, @bitCast(status)),
},
.INTR => continue,
.CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error.
@@ -4182,7 +4182,7 @@ pub const KQueueError = error{
pub fn kqueue() KQueueError!i32 {
const rc = system.kqueue();
switch (errno(rc)) {
- .SUCCESS => return @intCast(i32, rc),
+ .SUCCESS => return @as(i32, @intCast(rc)),
.MFILE => return error.ProcessFdQuotaExceeded,
.NFILE => return error.SystemFdQuotaExceeded,
else => |err| return unexpectedErrno(err),
@@ -4223,7 +4223,7 @@ pub fn kevent(
timeout,
);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.ACCES => return error.AccessDenied,
.FAULT => unreachable,
.BADF => unreachable, // Always a race condition.
@@ -4247,7 +4247,7 @@ pub const INotifyInitError = error{
pub fn inotify_init1(flags: u32) INotifyInitError!i32 {
const rc = system.inotify_init1(flags);
switch (errno(rc)) {
- .SUCCESS => return @intCast(i32, rc),
+ .SUCCESS => return @as(i32, @intCast(rc)),
.INVAL => unreachable,
.MFILE => return error.ProcessFdQuotaExceeded,
.NFILE => return error.SystemFdQuotaExceeded,
@@ -4276,7 +4276,7 @@ pub fn inotify_add_watch(inotify_fd: i32, pathname: []const u8, mask: u32) INoti
pub fn inotify_add_watchZ(inotify_fd: i32, pathname: [*:0]const u8, mask: u32) INotifyAddWatchError!i32 {
const rc = system.inotify_add_watch(inotify_fd, pathname, mask);
switch (errno(rc)) {
- .SUCCESS => return @intCast(i32, rc),
+ .SUCCESS => return @as(i32, @intCast(rc)),
.ACCES => return error.AccessDenied,
.BADF => unreachable,
.FAULT => unreachable,
@@ -4319,7 +4319,7 @@ pub const MProtectError = error{
pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
assert(mem.isAligned(memory.len, mem.page_size));
if (builtin.os.tag == .windows) {
- const win_prot: windows.DWORD = switch (@truncate(u3, protection)) {
+ const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
0b000 => windows.PAGE_NOACCESS,
0b001 => windows.PAGE_READONLY,
0b010 => unreachable, // +w -r not allowed
@@ -4350,7 +4350,7 @@ pub const ForkError = error{SystemResources} || UnexpectedError;
pub fn fork() ForkError!pid_t {
const rc = system.fork();
switch (errno(rc)) {
- .SUCCESS => return @intCast(pid_t, rc),
+ .SUCCESS => return @as(pid_t, @intCast(rc)),
.AGAIN => return error.SystemResources,
.NOMEM => return error.SystemResources,
else => |err| return unexpectedErrno(err),
@@ -4391,14 +4391,14 @@ pub fn mmap(
) MMapError![]align(mem.page_size) u8 {
const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset);
const err = if (builtin.link_libc) blk: {
- if (rc != std.c.MAP.FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length];
- break :blk @enumFromInt(E, system._errno().*);
+ if (rc != std.c.MAP.FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length];
+ break :blk @as(E, @enumFromInt(system._errno().*));
} else blk: {
const err = errno(rc);
- if (err == .SUCCESS) return @ptrFromInt([*]align(mem.page_size) u8, rc)[0..length];
+ if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length];
break :blk err;
};
switch (err) {
@@ -4781,7 +4781,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
}
if (builtin.os.tag == .wasi and !builtin.link_libc) {
var new_offset: wasi.filesize_t = undefined;
- switch (wasi.fd_seek(fd, @bitCast(wasi.filedelta_t, offset), .SET, &new_offset)) {
+ switch (wasi.fd_seek(fd, @as(wasi.filedelta_t, @bitCast(offset)), .SET, &new_offset)) {
.SUCCESS => return,
.BADF => unreachable, // always a race condition
.INVAL => return error.Unseekable,
@@ -4795,7 +4795,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
switch (errno(lseek_sym(fd, ioffset, SEEK.SET))) {
.SUCCESS => return,
.BADF => unreachable, // always a race condition
@@ -4811,7 +4811,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
var result: u64 = undefined;
- switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.CUR))) {
+ switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.CUR))) {
.SUCCESS => return,
.BADF => unreachable, // always a race condition
.INVAL => return error.Unseekable,
@@ -4839,7 +4839,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
}
const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
switch (errno(lseek_sym(fd, ioffset, SEEK.CUR))) {
.SUCCESS => return,
.BADF => unreachable, // always a race condition
@@ -4855,7 +4855,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void {
if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
var result: u64 = undefined;
- switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.END))) {
+ switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.END))) {
.SUCCESS => return,
.BADF => unreachable, // always a race condition
.INVAL => return error.Unseekable,
@@ -4883,7 +4883,7 @@ pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void {
}
const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
- const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+ const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
switch (errno(lseek_sym(fd, ioffset, SEEK.END))) {
.SUCCESS => return,
.BADF => unreachable, // always a race condition
@@ -4929,7 +4929,7 @@ pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 {
const rc = lseek_sym(fd, 0, SEEK.CUR);
switch (errno(rc)) {
- .SUCCESS => return @bitCast(u64, rc),
+ .SUCCESS => return @as(u64, @bitCast(rc)),
.BADF => unreachable, // always a race condition
.INVAL => return error.Unseekable,
.OVERFLOW => return error.Unseekable,
@@ -4952,7 +4952,7 @@ pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize {
while (true) {
const rc = system.fcntl(fd, cmd, arg);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.INTR => continue,
.AGAIN, .ACCES => return error.Locked,
.BADF => unreachable,
@@ -5122,7 +5122,7 @@ pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealP
return getFdPath(fd, out_buffer);
}
- const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@enumFromInt(E, std.c._errno().*)) {
+ const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@as(E, @enumFromInt(std.c._errno().*))) {
.SUCCESS => unreachable,
.INVAL => unreachable,
.BADF => unreachable,
@@ -5269,7 +5269,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
};
var i: usize = 0;
while (i < len) {
- const kf: *align(1) system.kinfo_file = @ptrCast(*align(1) system.kinfo_file, &buf[i]);
+ const kf: *align(1) system.kinfo_file = @as(*align(1) system.kinfo_file, @ptrCast(&buf[i]));
if (kf.fd == fd) {
len = mem.indexOfScalar(u8, &kf.path, 0) orelse MAX_PATH_BYTES;
if (len == 0) return error.NameTooLong;
@@ -5277,7 +5277,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
@memcpy(result, kf.path[0..len]);
return result;
}
- i += @intCast(usize, kf.structsize);
+ i += @as(usize, @intCast(kf.structsize));
}
return error.InvalidHandle;
}
@@ -5357,22 +5357,22 @@ pub fn dl_iterate_phdr(
if (builtin.link_libc) {
switch (system.dl_iterate_phdr(struct {
fn callbackC(info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int {
- const context_ptr = @ptrCast(*const Context, @alignCast(@alignOf(*const Context), data));
+ const context_ptr: *const Context = @ptrCast(@alignCast(data));
callback(info, size, context_ptr.*) catch |err| return @intFromError(err);
return 0;
}
- }.callbackC, @ptrFromInt(?*anyopaque, @intFromPtr(&context)))) {
+ }.callbackC, @as(?*anyopaque, @ptrFromInt(@intFromPtr(&context))))) {
0 => return,
- else => |err| return @errSetCast(Error, @errorFromInt(@intCast(u16, err))), // TODO don't hardcode u16
+ else => |err| return @as(Error, @errSetCast(@errorFromInt(@as(u16, @intCast(err))))), // TODO don't hardcode u16
}
}
const elf_base = std.process.getBaseAddress();
- const ehdr = @ptrFromInt(*elf.Ehdr, elf_base);
+ const ehdr = @as(*elf.Ehdr, @ptrFromInt(elf_base));
// Make sure the base address points to an ELF image.
assert(mem.eql(u8, ehdr.e_ident[0..4], elf.MAGIC));
const n_phdr = ehdr.e_phnum;
- const phdrs = (@ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff))[0..n_phdr];
+ const phdrs = (@as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)))[0..n_phdr];
var it = dl.linkmap_iterator(phdrs) catch unreachable;
@@ -5406,12 +5406,12 @@ pub fn dl_iterate_phdr(
var dlpi_phnum: u16 = undefined;
if (entry.l_addr != 0) {
- const elf_header = @ptrFromInt(*elf.Ehdr, entry.l_addr);
- dlpi_phdr = @ptrFromInt([*]elf.Phdr, entry.l_addr + elf_header.e_phoff);
+ const elf_header = @as(*elf.Ehdr, @ptrFromInt(entry.l_addr));
+ dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(entry.l_addr + elf_header.e_phoff));
dlpi_phnum = elf_header.e_phnum;
} else {
// This is the running ELF image
- dlpi_phdr = @ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff);
+ dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff));
dlpi_phnum = ehdr.e_phnum;
}
@@ -5433,11 +5433,11 @@ pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError;
pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
if (builtin.os.tag == .wasi and !builtin.link_libc) {
var ts: timestamp_t = undefined;
- switch (system.clock_time_get(@bitCast(u32, clk_id), 1, &ts)) {
+ switch (system.clock_time_get(@as(u32, @bitCast(clk_id)), 1, &ts)) {
.SUCCESS => {
tp.* = .{
- .tv_sec = @intCast(i64, ts / std.time.ns_per_s),
- .tv_nsec = @intCast(isize, ts % std.time.ns_per_s),
+ .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)),
+ .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)),
};
},
.INVAL => return error.UnsupportedClock,
@@ -5453,8 +5453,8 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
const ft_per_s = std.time.ns_per_s / 100;
tp.* = .{
- .tv_sec = @intCast(i64, ft64 / ft_per_s) + std.time.epoch.windows,
- .tv_nsec = @intCast(c_long, ft64 % ft_per_s) * 100,
+ .tv_sec = @as(i64, @intCast(ft64 / ft_per_s)) + std.time.epoch.windows,
+ .tv_nsec = @as(c_long, @intCast(ft64 % ft_per_s)) * 100,
};
return;
} else {
@@ -5474,10 +5474,10 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void {
if (builtin.os.tag == .wasi and !builtin.link_libc) {
var ts: timestamp_t = undefined;
- switch (system.clock_res_get(@bitCast(u32, clk_id), &ts)) {
+ switch (system.clock_res_get(@as(u32, @bitCast(clk_id)), &ts)) {
.SUCCESS => res.* = .{
- .tv_sec = @intCast(i64, ts / std.time.ns_per_s),
- .tv_nsec = @intCast(isize, ts % std.time.ns_per_s),
+ .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)),
+ .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)),
},
.INVAL => return error.UnsupportedClock,
else => |err| return unexpectedErrno(err),
@@ -5747,7 +5747,7 @@ pub fn res_mkquery(
// TODO determine the circumstances for this and whether or
// not this should be an error.
if (j - i - 1 > 62) unreachable;
- q[i - 1] = @intCast(u8, j - i);
+ q[i - 1] = @as(u8, @intCast(j - i));
}
q[i + 1] = ty;
q[i + 3] = class;
@@ -5756,10 +5756,10 @@ pub fn res_mkquery(
var ts: timespec = undefined;
clock_gettime(CLOCK.REALTIME, &ts) catch {};
const UInt = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(ts.tv_nsec)));
- const unsec = @bitCast(UInt, ts.tv_nsec);
- const id = @truncate(u32, unsec + unsec / 65536);
- q[0] = @truncate(u8, id / 256);
- q[1] = @truncate(u8, id);
+ const unsec = @as(UInt, @bitCast(ts.tv_nsec));
+ const id = @as(u32, @truncate(unsec + unsec / 65536));
+ q[0] = @as(u8, @truncate(id / 256));
+ q[1] = @as(u8, @truncate(id));
@memcpy(buf[0..n], q[0..n]);
return n;
@@ -5865,11 +5865,11 @@ pub fn sendmsg(
else => |err| return windows.unexpectedWSAError(err),
}
} else {
- return @intCast(usize, rc);
+ return @as(usize, @intCast(rc));
}
} else {
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.ACCES => return error.AccessDenied,
.AGAIN => return error.WouldBlock,
@@ -5965,13 +5965,13 @@ pub fn sendto(
.WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function.
else => |err| return windows.unexpectedWSAError(err),
},
- else => |rc| return @intCast(usize, rc),
+ else => |rc| return @as(usize, @intCast(rc)),
}
}
while (true) {
const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.ACCES => return error.AccessDenied,
.AGAIN => return error.WouldBlock,
@@ -6125,16 +6125,16 @@ pub fn sendfile(
// Here we match BSD behavior, making a zero count value send as many bytes as possible.
const adjusted_count_tmp = if (in_len == 0) max_count else @min(in_len, @as(size_t, max_count));
// TODO we should not need this cast; improve return type of @min
- const adjusted_count = @intCast(usize, adjusted_count_tmp);
+ const adjusted_count = @as(usize, @intCast(adjusted_count_tmp));
const sendfile_sym = if (lfs64_abi) system.sendfile64 else system.sendfile;
while (true) {
- var offset: off_t = @bitCast(off_t, in_offset);
+ var offset: off_t = @as(off_t, @bitCast(in_offset));
const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count);
switch (errno(rc)) {
.SUCCESS => {
- const amt = @bitCast(usize, rc);
+ const amt = @as(usize, @bitCast(rc));
total_written += amt;
if (in_len == 0 and amt == 0) {
// We have detected EOF from `in_fd`.
@@ -6209,9 +6209,9 @@ pub fn sendfile(
while (true) {
var sbytes: off_t = undefined;
- const offset = @bitCast(off_t, in_offset);
+ const offset = @as(off_t, @bitCast(in_offset));
const err = errno(system.sendfile(in_fd, out_fd, offset, adjusted_count, hdtr, &sbytes, flags));
- const amt = @bitCast(usize, sbytes);
+ const amt = @as(usize, @bitCast(sbytes));
switch (err) {
.SUCCESS => return amt,
@@ -6286,13 +6286,13 @@ pub fn sendfile(
const adjusted_count_temporary = @min(in_len, @as(u63, max_count));
// TODO we should not need this int cast; improve the return type of `@min`
- const adjusted_count = @intCast(u63, adjusted_count_temporary);
+ const adjusted_count = @as(u63, @intCast(adjusted_count_temporary));
while (true) {
var sbytes: off_t = adjusted_count;
- const signed_offset = @bitCast(i64, in_offset);
+ const signed_offset = @as(i64, @bitCast(in_offset));
const err = errno(system.sendfile(in_fd, out_fd, signed_offset, &sbytes, hdtr, flags));
- const amt = @bitCast(usize, sbytes);
+ const amt = @as(usize, @bitCast(sbytes));
switch (err) {
.SUCCESS => return amt,
@@ -6342,7 +6342,7 @@ pub fn sendfile(
// Here we match BSD behavior, making a zero count value send as many bytes as possible.
const adjusted_count_tmp = if (in_len == 0) buf.len else @min(buf.len, in_len);
// TODO we should not need this cast; improve return type of @min
- const adjusted_count = @intCast(usize, adjusted_count_tmp);
+ const adjusted_count = @as(usize, @intCast(adjusted_count_tmp));
const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset);
if (amt_read == 0) {
if (in_len == 0) {
@@ -6413,14 +6413,14 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok) and
has_copy_file_range_syscall.load(.Monotonic)))
{
- var off_in_copy = @bitCast(i64, off_in);
- var off_out_copy = @bitCast(i64, off_out);
+ var off_in_copy = @as(i64, @bitCast(off_in));
+ var off_out_copy = @as(i64, @bitCast(off_out));
while (true) {
const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
if (builtin.os.tag == .freebsd) {
switch (system.getErrno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.BADF => return error.FilesOpenedWithWrongFlags,
.FBIG => return error.FileTooBig,
.IO => return error.InputOutput,
@@ -6433,7 +6433,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
}
} else { // assume linux
switch (system.getErrno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.BADF => return error.FilesOpenedWithWrongFlags,
.FBIG => return error.FileTooBig,
.IO => return error.InputOutput,
@@ -6486,11 +6486,11 @@ pub fn poll(fds: []pollfd, timeout: i32) PollError!usize {
else => |err| return windows.unexpectedWSAError(err),
}
} else {
- return @intCast(usize, rc);
+ return @as(usize, @intCast(rc));
}
} else {
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.FAULT => unreachable,
.INTR => continue,
.INVAL => unreachable,
@@ -6520,7 +6520,7 @@ pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) P
const fds_count = math.cast(nfds_t, fds.len) orelse return error.SystemResources;
const rc = system.ppoll(fds.ptr, fds_count, ts_ptr, mask);
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.FAULT => unreachable,
.INTR => return error.SignalInterrupt,
.INVAL => unreachable,
@@ -6585,11 +6585,11 @@ pub fn recvfrom(
else => |err| return windows.unexpectedWSAError(err),
}
} else {
- return @intCast(usize, rc);
+ return @as(usize, @intCast(rc));
}
} else {
switch (errno(rc)) {
- .SUCCESS => return @intCast(usize, rc),
+ .SUCCESS => return @as(usize, @intCast(rc)),
.BADF => unreachable, // always a race condition
.FAULT => unreachable,
.INVAL => unreachable,
@@ -6681,7 +6681,7 @@ pub const SetSockOptError = error{
/// Set a socket's options.
pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void {
if (builtin.os.tag == .windows) {
- const rc = windows.ws2_32.setsockopt(fd, @intCast(i32, level), @intCast(i32, optname), opt.ptr, @intCast(i32, opt.len));
+ const rc = windows.ws2_32.setsockopt(fd, @as(i32, @intCast(level)), @as(i32, @intCast(optname)), opt.ptr, @as(i32, @intCast(opt.len)));
if (rc == windows.ws2_32.SOCKET_ERROR) {
switch (windows.ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable,
@@ -6694,7 +6694,7 @@ pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSo
}
return;
} else {
- switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)))) {
+ switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @as(socklen_t, @intCast(opt.len))))) {
.SUCCESS => {},
.BADF => unreachable, // always a race condition
.NOTSOCK => unreachable, // always a race condition
@@ -6731,7 +6731,7 @@ pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t {
const getErrno = if (use_c) std.c.getErrno else linux.getErrno;
const rc = sys.memfd_create(name, flags);
switch (getErrno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.FAULT => unreachable, // name has invalid memory
.INVAL => unreachable, // name/flags are faulty
.NFILE => return error.SystemFdQuotaExceeded,
@@ -6881,7 +6881,7 @@ pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void {
pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
const rc = system.signalfd(fd, mask, flags);
switch (errno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.BADF, .INVAL => unreachable,
.NFILE => return error.SystemFdQuotaExceeded,
.NOMEM => return error.SystemResources,
@@ -6989,7 +6989,7 @@ pub fn prctl(option: PR, args: anytype) PrctlError!u31 {
const rc = system.prctl(@intFromEnum(option), buf[0], buf[1], buf[2], buf[3]);
switch (errno(rc)) {
- .SUCCESS => return @intCast(u31, rc),
+ .SUCCESS => return @as(u31, @intCast(rc)),
.ACCES => return error.AccessDenied,
.BADF => return error.InvalidFileDescriptor,
.FAULT => return error.InvalidAddress,
@@ -7170,7 +7170,7 @@ pub fn perf_event_open(
) PerfEventOpenError!fd_t {
const rc = system.perf_event_open(attr, pid, cpu, group_fd, flags);
switch (errno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.@"2BIG" => return error.TooBig,
.ACCES => return error.PermissionDenied,
.BADF => unreachable, // group_fd file descriptor is not valid.
@@ -7205,7 +7205,7 @@ pub const TimerFdSetError = TimerFdGetError || error{Canceled};
pub fn timerfd_create(clokid: i32, flags: u32) TimerFdCreateError!fd_t {
var rc = linux.timerfd_create(clokid, flags);
return switch (errno(rc)) {
- .SUCCESS => @intCast(fd_t, rc),
+ .SUCCESS => @as(fd_t, @intCast(rc)),
.INVAL => unreachable,
.MFILE => return error.ProcessFdQuotaExceeded,
.NFILE => return error.SystemFdQuotaExceeded,
@@ -7267,7 +7267,7 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, signal: usize) PtraceError!
.macos, .ios, .tvos, .watchos => switch (errno(darwin.ptrace(
math.cast(i32, request) orelse return error.Overflow,
pid,
- @ptrFromInt(?[*]u8, addr),
+ @as(?[*]u8, @ptrFromInt(addr)),
math.cast(i32, signal) orelse return error.Overflow,
))) {
.SUCCESS => {},
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index b7ec29383b..6362e9ece1 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -175,62 +175,62 @@ const require_aligned_register_pair =
// Split a 64bit value into a {LSB,MSB} pair.
// The LE/BE variants specify the endianness to assume.
fn splitValueLE64(val: i64) [2]u32 {
- const u = @bitCast(u64, val);
+ const u = @as(u64, @bitCast(val));
return [2]u32{
- @truncate(u32, u),
- @truncate(u32, u >> 32),
+ @as(u32, @truncate(u)),
+ @as(u32, @truncate(u >> 32)),
};
}
fn splitValueBE64(val: i64) [2]u32 {
- const u = @bitCast(u64, val);
+ const u = @as(u64, @bitCast(val));
return [2]u32{
- @truncate(u32, u >> 32),
- @truncate(u32, u),
+ @as(u32, @truncate(u >> 32)),
+ @as(u32, @truncate(u)),
};
}
fn splitValue64(val: i64) [2]u32 {
- const u = @bitCast(u64, val);
+ const u = @as(u64, @bitCast(val));
switch (native_endian) {
.Little => return [2]u32{
- @truncate(u32, u),
- @truncate(u32, u >> 32),
+ @as(u32, @truncate(u)),
+ @as(u32, @truncate(u >> 32)),
},
.Big => return [2]u32{
- @truncate(u32, u >> 32),
- @truncate(u32, u),
+ @as(u32, @truncate(u >> 32)),
+ @as(u32, @truncate(u)),
},
}
}
/// Get the errno from a syscall return value, or 0 for no error.
pub fn getErrno(r: usize) E {
- const signed_r = @bitCast(isize, r);
+ const signed_r = @as(isize, @bitCast(r));
const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0;
- return @enumFromInt(E, int);
+ return @as(E, @enumFromInt(int));
}
pub fn dup(old: i32) usize {
- return syscall1(.dup, @bitCast(usize, @as(isize, old)));
+ return syscall1(.dup, @as(usize, @bitCast(@as(isize, old))));
}
pub fn dup2(old: i32, new: i32) usize {
if (@hasField(SYS, "dup2")) {
- return syscall2(.dup2, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)));
+ return syscall2(.dup2, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))));
} else {
if (old == new) {
if (std.debug.runtime_safety) {
- const rc = syscall2(.fcntl, @bitCast(usize, @as(isize, old)), F.GETFD);
- if (@bitCast(isize, rc) < 0) return rc;
+ const rc = syscall2(.fcntl, @as(usize, @bitCast(@as(isize, old))), F.GETFD);
+ if (@as(isize, @bitCast(rc)) < 0) return rc;
}
- return @intCast(usize, old);
+ return @as(usize, @intCast(old));
} else {
- return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), 0);
+ return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), 0);
}
}
}
pub fn dup3(old: i32, new: i32, flags: u32) usize {
- return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), flags);
+ return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), flags);
}
pub fn chdir(path: [*:0]const u8) usize {
@@ -238,7 +238,7 @@ pub fn chdir(path: [*:0]const u8) usize {
}
pub fn fchdir(fd: fd_t) usize {
- return syscall1(.fchdir, @bitCast(usize, @as(isize, fd)));
+ return syscall1(.fchdir, @as(usize, @bitCast(@as(isize, fd))));
}
pub fn chroot(path: [*:0]const u8) usize {
@@ -273,7 +273,7 @@ pub fn futimens(fd: i32, times: *const [2]timespec) usize {
}
pub fn utimensat(dirfd: i32, path: ?[*:0]const u8, times: *const [2]timespec, flags: u32) usize {
- return syscall4(.utimensat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(times), flags);
+ return syscall4(.utimensat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(times), flags);
}
pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize {
@@ -282,8 +282,8 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize {
const length_halves = splitValue64(length);
return syscall6(
.fallocate,
- @bitCast(usize, @as(isize, fd)),
- @bitCast(usize, @as(isize, mode)),
+ @as(usize, @bitCast(@as(isize, fd))),
+ @as(usize, @bitCast(@as(isize, mode))),
offset_halves[0],
offset_halves[1],
length_halves[0],
@@ -292,20 +292,20 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize {
} else {
return syscall4(
.fallocate,
- @bitCast(usize, @as(isize, fd)),
- @bitCast(usize, @as(isize, mode)),
- @bitCast(u64, offset),
- @bitCast(u64, length),
+ @as(usize, @bitCast(@as(isize, fd))),
+ @as(usize, @bitCast(@as(isize, mode))),
+ @as(u64, @bitCast(offset)),
+ @as(u64, @bitCast(length)),
);
}
}
pub fn futex_wait(uaddr: *const i32, futex_op: u32, val: i32, timeout: ?*const timespec) usize {
- return syscall4(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val), @intFromPtr(timeout));
+ return syscall4(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val)), @intFromPtr(timeout));
}
pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize {
- return syscall3(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val));
+ return syscall3(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val)));
}
pub fn getcwd(buf: [*]u8, size: usize) usize {
@@ -315,7 +315,7 @@ pub fn getcwd(buf: [*]u8, size: usize) usize {
pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize {
return syscall3(
.getdents,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(dirp),
@min(len, maxInt(c_int)),
);
@@ -324,7 +324,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize {
pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize {
return syscall3(
.getdents64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(dirp),
@min(len, maxInt(c_int)),
);
@@ -335,35 +335,35 @@ pub fn inotify_init1(flags: u32) usize {
}
pub fn inotify_add_watch(fd: i32, pathname: [*:0]const u8, mask: u32) usize {
- return syscall3(.inotify_add_watch, @bitCast(usize, @as(isize, fd)), @intFromPtr(pathname), mask);
+ return syscall3(.inotify_add_watch, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(pathname), mask);
}
pub fn inotify_rm_watch(fd: i32, wd: i32) usize {
- return syscall2(.inotify_rm_watch, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, wd)));
+ return syscall2(.inotify_rm_watch, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, wd))));
}
pub fn readlink(noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
if (@hasField(SYS, "readlink")) {
return syscall3(.readlink, @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
} else {
- return syscall4(.readlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
+ return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
}
}
pub fn readlinkat(dirfd: i32, noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
- return syscall4(.readlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
+ return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
}
pub fn mkdir(path: [*:0]const u8, mode: u32) usize {
if (@hasField(SYS, "mkdir")) {
return syscall2(.mkdir, @intFromPtr(path), mode);
} else {
- return syscall3(.mkdirat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode);
+ return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode);
}
}
pub fn mkdirat(dirfd: i32, path: [*:0]const u8, mode: u32) usize {
- return syscall3(.mkdirat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode);
+ return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode);
}
pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize {
@@ -375,7 +375,7 @@ pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize {
}
pub fn mknodat(dirfd: i32, path: [*:0]const u8, mode: u32, dev: u32) usize {
- return syscall4(.mknodat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, dev);
+ return syscall4(.mknodat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, dev);
}
pub fn mount(special: [*:0]const u8, dir: [*:0]const u8, fstype: ?[*:0]const u8, flags: u32, data: usize) usize {
@@ -394,7 +394,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of
if (@hasField(SYS, "mmap2")) {
// Make sure the offset is also specified in multiples of page size
if ((offset & (MMAP2_UNIT - 1)) != 0)
- return @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL)));
+ return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL))));
return syscall6(
.mmap2,
@@ -402,8 +402,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of
length,
prot,
flags,
- @bitCast(usize, @as(isize, fd)),
- @truncate(usize, @bitCast(u64, offset) / MMAP2_UNIT),
+ @as(usize, @bitCast(@as(isize, fd))),
+ @as(usize, @truncate(@as(u64, @bitCast(offset)) / MMAP2_UNIT)),
);
} else {
return syscall6(
@@ -412,8 +412,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of
length,
prot,
flags,
- @bitCast(usize, @as(isize, fd)),
- @bitCast(u64, offset),
+ @as(usize, @bitCast(@as(isize, fd))),
+ @as(u64, @bitCast(offset)),
);
}
}
@@ -429,7 +429,7 @@ pub const MSF = struct {
};
pub fn msync(address: [*]const u8, length: usize, flags: i32) usize {
- return syscall3(.msync, @intFromPtr(address), length, @bitCast(u32, flags));
+ return syscall3(.msync, @intFromPtr(address), length, @as(u32, @bitCast(flags)));
}
pub fn munmap(address: [*]const u8, length: usize) usize {
@@ -438,7 +438,7 @@ pub fn munmap(address: [*]const u8, length: usize) usize {
pub fn poll(fds: [*]pollfd, n: nfds_t, timeout: i32) usize {
if (@hasField(SYS, "poll")) {
- return syscall3(.poll, @intFromPtr(fds), n, @bitCast(u32, timeout));
+ return syscall3(.poll, @intFromPtr(fds), n, @as(u32, @bitCast(timeout)));
} else {
return syscall5(
.ppoll,
@@ -462,69 +462,69 @@ pub fn ppoll(fds: [*]pollfd, n: nfds_t, timeout: ?*timespec, sigmask: ?*const si
}
pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
- return syscall3(.read, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count);
+ return syscall3(.read, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count);
}
pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: i64) usize {
- const offset_u = @bitCast(u64, offset);
+ const offset_u = @as(u64, @bitCast(offset));
return syscall5(
.preadv,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(iov),
count,
// Kernel expects the offset is split into largest natural word-size.
// See following link for detail:
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=601cc11d054ae4b5e9b5babec3d8e4667a2cb9b5
- @truncate(usize, offset_u),
- if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+ @as(usize, @truncate(offset_u)),
+ if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
);
}
pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: i64, flags: kernel_rwf) usize {
- const offset_u = @bitCast(u64, offset);
+ const offset_u = @as(u64, @bitCast(offset));
return syscall6(
.preadv2,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(iov),
count,
// See comments in preadv
- @truncate(usize, offset_u),
- if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+ @as(usize, @truncate(offset_u)),
+ if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
flags,
);
}
pub fn readv(fd: i32, iov: [*]const iovec, count: usize) usize {
- return syscall3(.readv, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count);
+ return syscall3(.readv, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count);
}
pub fn writev(fd: i32, iov: [*]const iovec_const, count: usize) usize {
- return syscall3(.writev, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count);
+ return syscall3(.writev, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count);
}
pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64) usize {
- const offset_u = @bitCast(u64, offset);
+ const offset_u = @as(u64, @bitCast(offset));
return syscall5(
.pwritev,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(iov),
count,
// See comments in preadv
- @truncate(usize, offset_u),
- if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+ @as(usize, @truncate(offset_u)),
+ if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
);
}
pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64, flags: kernel_rwf) usize {
- const offset_u = @bitCast(u64, offset);
+ const offset_u = @as(u64, @bitCast(offset));
return syscall6(
.pwritev2,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(iov),
count,
// See comments in preadv
- @truncate(usize, offset_u),
- if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+ @as(usize, @truncate(offset_u)),
+ if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
flags,
);
}
@@ -533,7 +533,7 @@ pub fn rmdir(path: [*:0]const u8) usize {
if (@hasField(SYS, "rmdir")) {
return syscall1(.rmdir, @intFromPtr(path));
} else {
- return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), AT.REMOVEDIR);
+ return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), AT.REMOVEDIR);
}
}
@@ -541,12 +541,12 @@ pub fn symlink(existing: [*:0]const u8, new: [*:0]const u8) usize {
if (@hasField(SYS, "symlink")) {
return syscall2(.symlink, @intFromPtr(existing), @intFromPtr(new));
} else {
- return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new));
+ return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new));
}
}
pub fn symlinkat(existing: [*:0]const u8, newfd: i32, newpath: [*:0]const u8) usize {
- return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, newfd)), @intFromPtr(newpath));
+ return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath));
}
pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
@@ -555,7 +555,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
if (require_aligned_register_pair) {
return syscall6(
.pread64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(buf),
count,
0,
@@ -565,7 +565,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
} else {
return syscall5(
.pread64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(buf),
count,
offset_halves[0],
@@ -580,10 +580,10 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
.pread;
return syscall4(
syscall_number,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(buf),
count,
- @bitCast(u64, offset),
+ @as(u64, @bitCast(offset)),
);
}
}
@@ -592,12 +592,12 @@ pub fn access(path: [*:0]const u8, mode: u32) usize {
if (@hasField(SYS, "access")) {
return syscall2(.access, @intFromPtr(path), mode);
} else {
- return syscall4(.faccessat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode, 0);
+ return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode, 0);
}
}
pub fn faccessat(dirfd: i32, path: [*:0]const u8, mode: u32, flags: u32) usize {
- return syscall4(.faccessat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, flags);
+ return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, flags);
}
pub fn pipe(fd: *[2]i32) usize {
@@ -615,7 +615,7 @@ pub fn pipe2(fd: *[2]i32, flags: u32) usize {
}
pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
- return syscall3(.write, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count);
+ return syscall3(.write, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count);
}
pub fn ftruncate(fd: i32, length: i64) usize {
@@ -624,7 +624,7 @@ pub fn ftruncate(fd: i32, length: i64) usize {
if (require_aligned_register_pair) {
return syscall4(
.ftruncate64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
0,
length_halves[0],
length_halves[1],
@@ -632,7 +632,7 @@ pub fn ftruncate(fd: i32, length: i64) usize {
} else {
return syscall3(
.ftruncate64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
length_halves[0],
length_halves[1],
);
@@ -640,8 +640,8 @@ pub fn ftruncate(fd: i32, length: i64) usize {
} else {
return syscall2(
.ftruncate,
- @bitCast(usize, @as(isize, fd)),
- @bitCast(usize, length),
+ @as(usize, @bitCast(@as(isize, fd))),
+ @as(usize, @bitCast(length)),
);
}
}
@@ -653,7 +653,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize {
if (require_aligned_register_pair) {
return syscall6(
.pwrite64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(buf),
count,
0,
@@ -663,7 +663,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize {
} else {
return syscall5(
.pwrite64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(buf),
count,
offset_halves[0],
@@ -678,10 +678,10 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize {
.pwrite;
return syscall4(
syscall_number,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(buf),
count,
- @bitCast(u64, offset),
+ @as(u64, @bitCast(offset)),
);
}
}
@@ -690,9 +690,9 @@ pub fn rename(old: [*:0]const u8, new: [*:0]const u8) usize {
if (@hasField(SYS, "rename")) {
return syscall2(.rename, @intFromPtr(old), @intFromPtr(new));
} else if (@hasField(SYS, "renameat")) {
- return syscall4(.renameat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new));
+ return syscall4(.renameat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new));
} else {
- return syscall5(.renameat2, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new), 0);
+ return syscall5(.renameat2, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new), 0);
}
}
@@ -700,17 +700,17 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const
if (@hasField(SYS, "renameat")) {
return syscall4(
.renameat,
- @bitCast(usize, @as(isize, oldfd)),
+ @as(usize, @bitCast(@as(isize, oldfd))),
@intFromPtr(oldpath),
- @bitCast(usize, @as(isize, newfd)),
+ @as(usize, @bitCast(@as(isize, newfd))),
@intFromPtr(newpath),
);
} else {
return syscall5(
.renameat2,
- @bitCast(usize, @as(isize, oldfd)),
+ @as(usize, @bitCast(@as(isize, oldfd))),
@intFromPtr(oldpath),
- @bitCast(usize, @as(isize, newfd)),
+ @as(usize, @bitCast(@as(isize, newfd))),
@intFromPtr(newpath),
0,
);
@@ -720,9 +720,9 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const
pub fn renameat2(oldfd: i32, oldpath: [*:0]const u8, newfd: i32, newpath: [*:0]const u8, flags: u32) usize {
return syscall5(
.renameat2,
- @bitCast(usize, @as(isize, oldfd)),
+ @as(usize, @bitCast(@as(isize, oldfd))),
@intFromPtr(oldpath),
- @bitCast(usize, @as(isize, newfd)),
+ @as(usize, @bitCast(@as(isize, newfd))),
@intFromPtr(newpath),
flags,
);
@@ -734,7 +734,7 @@ pub fn open(path: [*:0]const u8, flags: u32, perm: mode_t) usize {
} else {
return syscall4(
.openat,
- @bitCast(usize, @as(isize, AT.FDCWD)),
+ @as(usize, @bitCast(@as(isize, AT.FDCWD))),
@intFromPtr(path),
flags,
perm,
@@ -748,7 +748,7 @@ pub fn create(path: [*:0]const u8, perm: mode_t) usize {
pub fn openat(dirfd: i32, path: [*:0]const u8, flags: u32, mode: mode_t) usize {
// dirfd could be negative, for example AT.FDCWD is -100
- return syscall4(.openat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags, mode);
+ return syscall4(.openat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags, mode);
}
/// See also `clone` (from the arch-specific include)
@@ -762,11 +762,11 @@ pub fn clone2(flags: u32, child_stack_ptr: usize) usize {
}
pub fn close(fd: i32) usize {
- return syscall1(.close, @bitCast(usize, @as(isize, fd)));
+ return syscall1(.close, @as(usize, @bitCast(@as(isize, fd))));
}
pub fn fchmod(fd: i32, mode: mode_t) usize {
- return syscall2(.fchmod, @bitCast(usize, @as(isize, fd)), mode);
+ return syscall2(.fchmod, @as(usize, @bitCast(@as(isize, fd))), mode);
}
pub fn chmod(path: [*:0]const u8, mode: mode_t) usize {
@@ -775,7 +775,7 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize {
} else {
return syscall4(
.fchmodat,
- @bitCast(usize, @as(isize, AT.FDCWD)),
+ @as(usize, @bitCast(@as(isize, AT.FDCWD))),
@intFromPtr(path),
mode,
0,
@@ -785,14 +785,14 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize {
pub fn fchown(fd: i32, owner: uid_t, group: gid_t) usize {
if (@hasField(SYS, "fchown32")) {
- return syscall3(.fchown32, @bitCast(usize, @as(isize, fd)), owner, group);
+ return syscall3(.fchown32, @as(usize, @bitCast(@as(isize, fd))), owner, group);
} else {
- return syscall3(.fchown, @bitCast(usize, @as(isize, fd)), owner, group);
+ return syscall3(.fchown, @as(usize, @bitCast(@as(isize, fd))), owner, group);
}
}
pub fn fchmodat(fd: i32, path: [*:0]const u8, mode: mode_t, flags: u32) usize {
- return syscall4(.fchmodat, @bitCast(usize, @as(isize, fd)), @intFromPtr(path), mode, flags);
+ return syscall4(.fchmodat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(path), mode, flags);
}
/// Can only be called on 32 bit systems. For 64 bit see `lseek`.
@@ -801,9 +801,9 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize {
// endianness.
return syscall5(
._llseek,
- @bitCast(usize, @as(isize, fd)),
- @truncate(usize, offset >> 32),
- @truncate(usize, offset),
+ @as(usize, @bitCast(@as(isize, fd))),
+ @as(usize, @truncate(offset >> 32)),
+ @as(usize, @truncate(offset)),
@intFromPtr(result),
whence,
);
@@ -811,16 +811,16 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize {
/// Can only be called on 64 bit systems. For 32 bit see `llseek`.
pub fn lseek(fd: i32, offset: i64, whence: usize) usize {
- return syscall3(.lseek, @bitCast(usize, @as(isize, fd)), @bitCast(usize, offset), whence);
+ return syscall3(.lseek, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(offset)), whence);
}
pub fn exit(status: i32) noreturn {
- _ = syscall1(.exit, @bitCast(usize, @as(isize, status)));
+ _ = syscall1(.exit, @as(usize, @bitCast(@as(isize, status))));
unreachable;
}
pub fn exit_group(status: i32) noreturn {
- _ = syscall1(.exit_group, @bitCast(usize, @as(isize, status)));
+ _ = syscall1(.exit_group, @as(usize, @bitCast(@as(isize, status))));
unreachable;
}
@@ -886,15 +886,15 @@ pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize {
}
pub fn kill(pid: pid_t, sig: i32) usize {
- return syscall2(.kill, @bitCast(usize, @as(isize, pid)), @bitCast(usize, @as(isize, sig)));
+ return syscall2(.kill, @as(usize, @bitCast(@as(isize, pid))), @as(usize, @bitCast(@as(isize, sig))));
}
pub fn tkill(tid: pid_t, sig: i32) usize {
- return syscall2(.tkill, @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig)));
+ return syscall2(.tkill, @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig))));
}
pub fn tgkill(tgid: pid_t, tid: pid_t, sig: i32) usize {
- return syscall3(.tgkill, @bitCast(usize, @as(isize, tgid)), @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig)));
+ return syscall3(.tgkill, @as(usize, @bitCast(@as(isize, tgid))), @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig))));
}
pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize {
@@ -903,16 +903,16 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize {
.link,
@intFromPtr(oldpath),
@intFromPtr(newpath),
- @bitCast(usize, @as(isize, flags)),
+ @as(usize, @bitCast(@as(isize, flags))),
);
} else {
return syscall5(
.linkat,
- @bitCast(usize, @as(isize, AT.FDCWD)),
+ @as(usize, @bitCast(@as(isize, AT.FDCWD))),
@intFromPtr(oldpath),
- @bitCast(usize, @as(isize, AT.FDCWD)),
+ @as(usize, @bitCast(@as(isize, AT.FDCWD))),
@intFromPtr(newpath),
- @bitCast(usize, @as(isize, flags)),
+ @as(usize, @bitCast(@as(isize, flags))),
);
}
}
@@ -920,11 +920,11 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize {
pub fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: i32) usize {
return syscall5(
.linkat,
- @bitCast(usize, @as(isize, oldfd)),
+ @as(usize, @bitCast(@as(isize, oldfd))),
@intFromPtr(oldpath),
- @bitCast(usize, @as(isize, newfd)),
+ @as(usize, @bitCast(@as(isize, newfd))),
@intFromPtr(newpath),
- @bitCast(usize, @as(isize, flags)),
+ @as(usize, @bitCast(@as(isize, flags))),
);
}
@@ -932,22 +932,22 @@ pub fn unlink(path: [*:0]const u8) usize {
if (@hasField(SYS, "unlink")) {
return syscall1(.unlink, @intFromPtr(path));
} else {
- return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), 0);
+ return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), 0);
}
}
pub fn unlinkat(dirfd: i32, path: [*:0]const u8, flags: u32) usize {
- return syscall3(.unlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags);
+ return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags);
}
pub fn waitpid(pid: pid_t, status: *u32, flags: u32) usize {
- return syscall4(.wait4, @bitCast(usize, @as(isize, pid)), @intFromPtr(status), flags, 0);
+ return syscall4(.wait4, @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(status), flags, 0);
}
pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize {
return syscall4(
.wait4,
- @bitCast(usize, @as(isize, pid)),
+ @as(usize, @bitCast(@as(isize, pid))),
@intFromPtr(status),
flags,
@intFromPtr(usage),
@@ -955,18 +955,18 @@ pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize {
}
pub fn waitid(id_type: P, id: i32, infop: *siginfo_t, flags: u32) usize {
- return syscall5(.waitid, @intFromEnum(id_type), @bitCast(usize, @as(isize, id)), @intFromPtr(infop), flags, 0);
+ return syscall5(.waitid, @intFromEnum(id_type), @as(usize, @bitCast(@as(isize, id))), @intFromPtr(infop), flags, 0);
}
pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) usize {
- return syscall3(.fcntl, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, cmd)), arg);
+ return syscall3(.fcntl, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, cmd))), arg);
}
pub fn flock(fd: fd_t, operation: i32) usize {
- return syscall2(.flock, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, operation)));
+ return syscall2(.flock, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, operation))));
}
-var vdso_clock_gettime = @ptrCast(?*const anyopaque, &init_vdso_clock_gettime);
+var vdso_clock_gettime = @as(?*const anyopaque, @ptrCast(&init_vdso_clock_gettime));
// We must follow the C calling convention when we call into the VDSO
const vdso_clock_gettime_ty = *align(1) const fn (i32, *timespec) callconv(.C) usize;
@@ -975,36 +975,36 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
if (@hasDecl(VDSO, "CGT_SYM")) {
const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .Unordered);
if (ptr) |fn_ptr| {
- const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
+ const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
const rc = f(clk_id, tp);
switch (rc) {
- 0, @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL))) => return rc,
+ 0, @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL)))) => return rc,
else => {},
}
}
}
- return syscall2(.clock_gettime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp));
+ return syscall2(.clock_gettime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp));
}
fn init_vdso_clock_gettime(clk: i32, ts: *timespec) callconv(.C) usize {
- const ptr = @ptrFromInt(?*const anyopaque, vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM));
+ const ptr = @as(?*const anyopaque, @ptrFromInt(vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM)));
// Note that we may not have a VDSO at all, update the stub address anyway
// so that clock_gettime will fall back on the good old (and slow) syscall
@atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .Monotonic);
// Call into the VDSO if available
if (ptr) |fn_ptr| {
- const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
+ const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
return f(clk, ts);
}
- return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS)));
+ return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS))));
}
pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
- return syscall2(.clock_getres, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp));
+ return syscall2(.clock_getres, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp));
}
pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
- return syscall2(.clock_settime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp));
+ return syscall2(.clock_settime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp));
}
pub fn gettimeofday(tv: *timeval, tz: *timezone) usize {
@@ -1053,33 +1053,33 @@ pub fn setregid(rgid: gid_t, egid: gid_t) usize {
pub fn getuid() uid_t {
if (@hasField(SYS, "getuid32")) {
- return @intCast(uid_t, syscall0(.getuid32));
+ return @as(uid_t, @intCast(syscall0(.getuid32)));
} else {
- return @intCast(uid_t, syscall0(.getuid));
+ return @as(uid_t, @intCast(syscall0(.getuid)));
}
}
pub fn getgid() gid_t {
if (@hasField(SYS, "getgid32")) {
- return @intCast(gid_t, syscall0(.getgid32));
+ return @as(gid_t, @intCast(syscall0(.getgid32)));
} else {
- return @intCast(gid_t, syscall0(.getgid));
+ return @as(gid_t, @intCast(syscall0(.getgid)));
}
}
pub fn geteuid() uid_t {
if (@hasField(SYS, "geteuid32")) {
- return @intCast(uid_t, syscall0(.geteuid32));
+ return @as(uid_t, @intCast(syscall0(.geteuid32)));
} else {
- return @intCast(uid_t, syscall0(.geteuid));
+ return @as(uid_t, @intCast(syscall0(.geteuid)));
}
}
pub fn getegid() gid_t {
if (@hasField(SYS, "getegid32")) {
- return @intCast(gid_t, syscall0(.getegid32));
+ return @as(gid_t, @intCast(syscall0(.getegid32)));
} else {
- return @intCast(gid_t, syscall0(.getegid));
+ return @as(gid_t, @intCast(syscall0(.getegid)));
}
}
@@ -1154,11 +1154,11 @@ pub fn setgroups(size: usize, list: [*]const gid_t) usize {
}
pub fn getpid() pid_t {
- return @bitCast(pid_t, @truncate(u32, syscall0(.getpid)));
+ return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.getpid)))));
}
pub fn gettid() pid_t {
- return @bitCast(pid_t, @truncate(u32, syscall0(.gettid)));
+ return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.gettid)))));
}
pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*sigset_t) usize {
@@ -1182,9 +1182,9 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
.handler = new.handler.handler,
.flags = new.flags | SA.RESTORER,
.mask = undefined,
- .restorer = @ptrCast(k_sigaction_funcs.restorer, restorer_fn),
+ .restorer = @as(k_sigaction_funcs.restorer, @ptrCast(restorer_fn)),
};
- @memcpy(@ptrCast([*]u8, &ksa.mask)[0..mask_size], @ptrCast([*]const u8, &new.mask));
+ @memcpy(@as([*]u8, @ptrCast(&ksa.mask))[0..mask_size], @as([*]const u8, @ptrCast(&new.mask)));
}
const ksa_arg = if (act != null) @intFromPtr(&ksa) else 0;
@@ -1199,8 +1199,8 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
if (oact) |old| {
old.handler.handler = oldksa.handler;
- old.flags = @truncate(c_uint, oldksa.flags);
- @memcpy(@ptrCast([*]u8, &old.mask)[0..mask_size], @ptrCast([*]const u8, &oldksa.mask));
+ old.flags = @as(c_uint, @truncate(oldksa.flags));
+ @memcpy(@as([*]u8, @ptrCast(&old.mask))[0..mask_size], @as([*]const u8, @ptrCast(&oldksa.mask)));
}
return 0;
@@ -1211,28 +1211,28 @@ const usize_bits = @typeInfo(usize).Int.bits;
pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
// shift in musl: s&8*sizeof *set->__bits-1
- const shift = @intCast(u5, s & (usize_bits - 1));
- const val = @intCast(u32, 1) << shift;
- (set.*)[@intCast(usize, s) / usize_bits] |= val;
+ const shift = @as(u5, @intCast(s & (usize_bits - 1)));
+ const val = @as(u32, @intCast(1)) << shift;
+ (set.*)[@as(usize, @intCast(s)) / usize_bits] |= val;
}
pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
- return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0;
+ return ((set.*)[@as(usize, @intCast(s)) / usize_bits] & (@as(usize, @intCast(1)) << (s & (usize_bits - 1)))) != 0;
}
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
if (native_arch == .x86) {
- return socketcall(SC.getsockname, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) });
+ return socketcall(SC.getsockname, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) });
}
- return syscall3(.getsockname, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len));
+ return syscall3(.getsockname, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len));
}
pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
if (native_arch == .x86) {
- return socketcall(SC.getpeername, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) });
+ return socketcall(SC.getpeername, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) });
}
- return syscall3(.getpeername, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len));
+ return syscall3(.getpeername, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len));
}
pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
@@ -1244,20 +1244,20 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize {
if (native_arch == .x86) {
- return socketcall(SC.setsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen) });
+ return socketcall(SC.setsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen)) });
}
- return syscall5(.setsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen));
+ return syscall5(.setsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen)));
}
pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize {
if (native_arch == .x86) {
- return socketcall(SC.getsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen) });
+ return socketcall(SC.getsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen) });
}
- return syscall5(.getsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen));
+ return syscall5(.getsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen));
}
pub fn sendmsg(fd: i32, msg: *const msghdr_const, flags: u32) usize {
- const fd_usize = @bitCast(usize, @as(isize, fd));
+ const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
const msg_usize = @intFromPtr(msg);
if (native_arch == .x86) {
return socketcall(SC.sendmsg, &[3]usize{ fd_usize, msg_usize, flags });
@@ -1275,13 +1275,13 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
var next_unsent: usize = 0;
for (msgvec[0..kvlen], 0..) |*msg, i| {
var size: i32 = 0;
- const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
+ const msg_iovlen = @as(usize, @intCast(msg.msg_hdr.msg_iovlen)); // kernel side this is treated as unsigned
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
- if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @intCast(i32, iov.iov_len))[1] != 0) {
+ if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @as(i32, @intCast(iov.iov_len)))[1] != 0) {
// batch-send all messages up to the current message
if (next_unsent < i) {
const batch_size = i - next_unsent;
- const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
+ const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
if (getErrno(r) != 0) return next_unsent;
if (r < batch_size) return next_unsent + r;
}
@@ -1289,7 +1289,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
const r = sendmsg(fd, &msg.msg_hdr, flags);
if (getErrno(r) != 0) return r;
// Linux limits the total bytes sent by sendmsg to INT_MAX, so this cast is safe.
- msg.msg_len = @intCast(u32, r);
+ msg.msg_len = @as(u32, @intCast(r));
next_unsent = i + 1;
break;
}
@@ -1297,17 +1297,17 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
}
if (next_unsent < kvlen or next_unsent == 0) { // want to make sure at least one syscall occurs (e.g. to trigger MSG.EOR)
const batch_size = kvlen - next_unsent;
- const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
+ const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
if (getErrno(r) != 0) return r;
return next_unsent + r;
}
return kvlen;
}
- return syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(msgvec), vlen, flags);
+ return syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(msgvec), vlen, flags);
}
pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize {
- const fd_usize = @bitCast(usize, @as(isize, fd));
+ const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
const addr_usize = @intFromPtr(addr);
if (native_arch == .x86) {
return socketcall(SC.connect, &[3]usize{ fd_usize, addr_usize, len });
@@ -1317,7 +1317,7 @@ pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize {
}
pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
- const fd_usize = @bitCast(usize, @as(isize, fd));
+ const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
const msg_usize = @intFromPtr(msg);
if (native_arch == .x86) {
return socketcall(SC.recvmsg, &[3]usize{ fd_usize, msg_usize, flags });
@@ -1334,7 +1334,7 @@ pub fn recvfrom(
noalias addr: ?*sockaddr,
noalias alen: ?*socklen_t,
) usize {
- const fd_usize = @bitCast(usize, @as(isize, fd));
+ const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
const buf_usize = @intFromPtr(buf);
const addr_usize = @intFromPtr(addr);
const alen_usize = @intFromPtr(alen);
@@ -1347,46 +1347,46 @@ pub fn recvfrom(
pub fn shutdown(fd: i32, how: i32) usize {
if (native_arch == .x86) {
- return socketcall(SC.shutdown, &[2]usize{ @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)) });
+ return socketcall(SC.shutdown, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how))) });
}
- return syscall2(.shutdown, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)));
+ return syscall2(.shutdown, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how))));
}
pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
if (native_arch == .x86) {
- return socketcall(SC.bind, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len) });
+ return socketcall(SC.bind, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len)) });
}
- return syscall3(.bind, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len));
+ return syscall3(.bind, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len)));
}
pub fn listen(fd: i32, backlog: u32) usize {
if (native_arch == .x86) {
- return socketcall(SC.listen, &[2]usize{ @bitCast(usize, @as(isize, fd)), backlog });
+ return socketcall(SC.listen, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), backlog });
}
- return syscall2(.listen, @bitCast(usize, @as(isize, fd)), backlog);
+ return syscall2(.listen, @as(usize, @bitCast(@as(isize, fd))), backlog);
}
pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
if (native_arch == .x86) {
- return socketcall(SC.sendto, &[6]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen) });
+ return socketcall(SC.sendto, &[6]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen)) });
}
- return syscall6(.sendto, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen));
+ return syscall6(.sendto, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen)));
}
pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize {
if (@hasField(SYS, "sendfile64")) {
return syscall4(
.sendfile64,
- @bitCast(usize, @as(isize, outfd)),
- @bitCast(usize, @as(isize, infd)),
+ @as(usize, @bitCast(@as(isize, outfd))),
+ @as(usize, @bitCast(@as(isize, infd))),
@intFromPtr(offset),
count,
);
} else {
return syscall4(
.sendfile,
- @bitCast(usize, @as(isize, outfd)),
- @bitCast(usize, @as(isize, infd)),
+ @as(usize, @bitCast(@as(isize, outfd))),
+ @as(usize, @bitCast(@as(isize, infd))),
@intFromPtr(offset),
count,
);
@@ -1395,9 +1395,9 @@ pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize {
pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: *[2]i32) usize {
if (native_arch == .x86) {
- return socketcall(SC.socketpair, &[4]usize{ @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd) });
+ return socketcall(SC.socketpair, &[4]usize{ @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd) });
}
- return syscall4(.socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd));
+ return syscall4(.socketpair, @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd));
}
pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize {
@@ -1409,16 +1409,16 @@ pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize
pub fn accept4(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t, flags: u32) usize {
if (native_arch == .x86) {
- return socketcall(SC.accept4, &[4]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags });
+ return socketcall(SC.accept4, &[4]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags });
}
- return syscall4(.accept4, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags);
+ return syscall4(.accept4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags);
}
pub fn fstat(fd: i32, stat_buf: *Stat) usize {
if (@hasField(SYS, "fstat64")) {
- return syscall2(.fstat64, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf));
+ return syscall2(.fstat64, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf));
} else {
- return syscall2(.fstat, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf));
+ return syscall2(.fstat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf));
}
}
@@ -1440,9 +1440,9 @@ pub fn lstat(pathname: [*:0]const u8, statbuf: *Stat) usize {
pub fn fstatat(dirfd: i32, path: [*:0]const u8, stat_buf: *Stat, flags: u32) usize {
if (@hasField(SYS, "fstatat64")) {
- return syscall4(.fstatat64, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags);
+ return syscall4(.fstatat64, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags);
} else {
- return syscall4(.fstatat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags);
+ return syscall4(.fstatat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags);
}
}
@@ -1450,14 +1450,14 @@ pub fn statx(dirfd: i32, path: [*]const u8, flags: u32, mask: u32, statx_buf: *S
if (@hasField(SYS, "statx")) {
return syscall5(
.statx,
- @bitCast(usize, @as(isize, dirfd)),
+ @as(usize, @bitCast(@as(isize, dirfd))),
@intFromPtr(path),
flags,
mask,
@intFromPtr(statx_buf),
);
}
- return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS)));
+ return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS))));
}
pub fn listxattr(path: [*:0]const u8, list: [*]u8, size: usize) usize {
@@ -1513,9 +1513,9 @@ pub fn sched_yield() usize {
}
pub fn sched_getaffinity(pid: pid_t, size: usize, set: *cpu_set_t) usize {
- const rc = syscall3(.sched_getaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set));
- if (@bitCast(isize, rc) < 0) return rc;
- if (rc < size) @memset(@ptrCast([*]u8, set)[rc..size], 0);
+ const rc = syscall3(.sched_getaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set));
+ if (@as(isize, @bitCast(rc)) < 0) return rc;
+ if (rc < size) @memset(@as([*]u8, @ptrCast(set))[rc..size], 0);
return 0;
}
@@ -1526,18 +1526,18 @@ pub fn getcpu(cpu: *u32, node: *u32) usize {
pub fn sched_getcpu() usize {
var cpu: u32 = undefined;
const rc = syscall3(.getcpu, @intFromPtr(&cpu), 0, 0);
- if (@bitCast(isize, rc) < 0) return rc;
- return @intCast(usize, cpu);
+ if (@as(isize, @bitCast(rc)) < 0) return rc;
+ return @as(usize, @intCast(cpu));
}
/// libc has no wrapper for this syscall
pub fn mbind(addr: ?*anyopaque, len: u32, mode: i32, nodemask: *const u32, maxnode: u32, flags: u32) usize {
- return syscall6(.mbind, @intFromPtr(addr), len, @bitCast(usize, @as(isize, mode)), @intFromPtr(nodemask), maxnode, flags);
+ return syscall6(.mbind, @intFromPtr(addr), len, @as(usize, @bitCast(@as(isize, mode))), @intFromPtr(nodemask), maxnode, flags);
}
pub fn sched_setaffinity(pid: pid_t, size: usize, set: *const cpu_set_t) usize {
- const rc = syscall3(.sched_setaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set));
- if (@bitCast(isize, rc) < 0) return rc;
+ const rc = syscall3(.sched_setaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set));
+ if (@as(isize, @bitCast(rc)) < 0) return rc;
return 0;
}
@@ -1550,7 +1550,7 @@ pub fn epoll_create1(flags: usize) usize {
}
pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: ?*epoll_event) usize {
- return syscall4(.epoll_ctl, @bitCast(usize, @as(isize, epoll_fd)), @intCast(usize, op), @bitCast(usize, @as(isize, fd)), @intFromPtr(ev));
+ return syscall4(.epoll_ctl, @as(usize, @bitCast(@as(isize, epoll_fd))), @as(usize, @intCast(op)), @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(ev));
}
pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32) usize {
@@ -1560,10 +1560,10 @@ pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout
pub fn epoll_pwait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32, sigmask: ?*const sigset_t) usize {
return syscall6(
.epoll_pwait,
- @bitCast(usize, @as(isize, epoll_fd)),
+ @as(usize, @bitCast(@as(isize, epoll_fd))),
@intFromPtr(events),
- @intCast(usize, maxevents),
- @bitCast(usize, @as(isize, timeout)),
+ @as(usize, @intCast(maxevents)),
+ @as(usize, @bitCast(@as(isize, timeout))),
@intFromPtr(sigmask),
@sizeOf(sigset_t),
);
@@ -1574,7 +1574,7 @@ pub fn eventfd(count: u32, flags: u32) usize {
}
pub fn timerfd_create(clockid: i32, flags: u32) usize {
- return syscall2(.timerfd_create, @bitCast(usize, @as(isize, clockid)), flags);
+ return syscall2(.timerfd_create, @as(usize, @bitCast(@as(isize, clockid))), flags);
}
pub const itimerspec = extern struct {
@@ -1583,11 +1583,11 @@ pub const itimerspec = extern struct {
};
pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize {
- return syscall2(.timerfd_gettime, @bitCast(usize, @as(isize, fd)), @intFromPtr(curr_value));
+ return syscall2(.timerfd_gettime, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(curr_value));
}
pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
- return syscall4(.timerfd_settime, @bitCast(usize, @as(isize, fd)), flags, @intFromPtr(new_value), @intFromPtr(old_value));
+ return syscall4(.timerfd_settime, @as(usize, @bitCast(@as(isize, fd))), flags, @intFromPtr(new_value), @intFromPtr(old_value));
}
pub const sigevent = extern struct {
@@ -1609,8 +1609,8 @@ pub const timer_t = ?*anyopaque;
pub fn timer_create(clockid: i32, sevp: *sigevent, timerid: *timer_t) usize {
var t: timer_t = undefined;
- const rc = syscall3(.timer_create, @bitCast(usize, @as(isize, clockid)), @intFromPtr(sevp), @intFromPtr(&t));
- if (@bitCast(isize, rc) < 0) return rc;
+ const rc = syscall3(.timer_create, @as(usize, @bitCast(@as(isize, clockid))), @intFromPtr(sevp), @intFromPtr(&t));
+ if (@as(isize, @bitCast(rc)) < 0) return rc;
timerid.* = t;
return rc;
}
@@ -1624,7 +1624,7 @@ pub fn timer_gettime(timerid: timer_t, curr_value: *itimerspec) usize {
}
pub fn timer_settime(timerid: timer_t, flags: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
- return syscall4(.timer_settime, @intFromPtr(timerid), @bitCast(usize, @as(isize, flags)), @intFromPtr(new_value), @intFromPtr(old_value));
+ return syscall4(.timer_settime, @intFromPtr(timerid), @as(usize, @bitCast(@as(isize, flags))), @intFromPtr(new_value), @intFromPtr(old_value));
}
// Flags for the 'setitimer' system call
@@ -1635,11 +1635,11 @@ pub const ITIMER = enum(i32) {
};
pub fn getitimer(which: i32, curr_value: *itimerspec) usize {
- return syscall2(.getitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(curr_value));
+ return syscall2(.getitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(curr_value));
}
pub fn setitimer(which: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
- return syscall3(.setitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(new_value), @intFromPtr(old_value));
+ return syscall3(.setitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(new_value), @intFromPtr(old_value));
}
pub fn unshare(flags: usize) usize {
@@ -1667,11 +1667,11 @@ pub fn io_uring_setup(entries: u32, p: *io_uring_params) usize {
}
pub fn io_uring_enter(fd: i32, to_submit: u32, min_complete: u32, flags: u32, sig: ?*sigset_t) usize {
- return syscall6(.io_uring_enter, @bitCast(usize, @as(isize, fd)), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8);
+ return syscall6(.io_uring_enter, @as(usize, @bitCast(@as(isize, fd))), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8);
}
pub fn io_uring_register(fd: i32, opcode: IORING_REGISTER, arg: ?*const anyopaque, nr_args: u32) usize {
- return syscall4(.io_uring_register, @bitCast(usize, @as(isize, fd)), @intFromEnum(opcode), @intFromPtr(arg), nr_args);
+ return syscall4(.io_uring_register, @as(usize, @bitCast(@as(isize, fd))), @intFromEnum(opcode), @intFromPtr(arg), nr_args);
}
pub fn memfd_create(name: [*:0]const u8, flags: u32) usize {
@@ -1679,43 +1679,43 @@ pub fn memfd_create(name: [*:0]const u8, flags: u32) usize {
}
pub fn getrusage(who: i32, usage: *rusage) usize {
- return syscall2(.getrusage, @bitCast(usize, @as(isize, who)), @intFromPtr(usage));
+ return syscall2(.getrusage, @as(usize, @bitCast(@as(isize, who))), @intFromPtr(usage));
}
pub fn tcgetattr(fd: fd_t, termios_p: *termios) usize {
- return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CGETS, @intFromPtr(termios_p));
+ return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CGETS, @intFromPtr(termios_p));
}
pub fn tcsetattr(fd: fd_t, optional_action: TCSA, termios_p: *const termios) usize {
- return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p));
+ return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p));
}
pub fn tcgetpgrp(fd: fd_t, pgrp: *pid_t) usize {
- return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCGPGRP, @intFromPtr(pgrp));
+ return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCGPGRP, @intFromPtr(pgrp));
}
pub fn tcsetpgrp(fd: fd_t, pgrp: *const pid_t) usize {
- return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCSPGRP, @intFromPtr(pgrp));
+ return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCSPGRP, @intFromPtr(pgrp));
}
pub fn tcdrain(fd: fd_t) usize {
- return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSBRK, 1);
+ return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSBRK, 1);
}
pub fn ioctl(fd: fd_t, request: u32, arg: usize) usize {
- return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), request, arg);
+ return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), request, arg);
}
pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) usize {
- return syscall4(.signalfd4, @bitCast(usize, @as(isize, fd)), @intFromPtr(mask), NSIG / 8, flags);
+ return syscall4(.signalfd4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(mask), NSIG / 8, flags);
}
pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) usize {
return syscall6(
.copy_file_range,
- @bitCast(usize, @as(isize, fd_in)),
+ @as(usize, @bitCast(@as(isize, fd_in))),
@intFromPtr(off_in),
- @bitCast(usize, @as(isize, fd_out)),
+ @as(usize, @bitCast(@as(isize, fd_out))),
@intFromPtr(off_out),
len,
flags,
@@ -1731,19 +1731,19 @@ pub fn sync() void {
}
pub fn syncfs(fd: fd_t) usize {
- return syscall1(.syncfs, @bitCast(usize, @as(isize, fd)));
+ return syscall1(.syncfs, @as(usize, @bitCast(@as(isize, fd))));
}
pub fn fsync(fd: fd_t) usize {
- return syscall1(.fsync, @bitCast(usize, @as(isize, fd)));
+ return syscall1(.fsync, @as(usize, @bitCast(@as(isize, fd))));
}
pub fn fdatasync(fd: fd_t) usize {
- return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd)));
+ return syscall1(.fdatasync, @as(usize, @bitCast(@as(isize, fd))));
}
pub fn prctl(option: i32, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
- return syscall5(.prctl, @bitCast(usize, @as(isize, option)), arg2, arg3, arg4, arg5);
+ return syscall5(.prctl, @as(usize, @bitCast(@as(isize, option))), arg2, arg3, arg4, arg5);
}
pub fn getrlimit(resource: rlimit_resource, rlim: *rlimit) usize {
@@ -1759,8 +1759,8 @@ pub fn setrlimit(resource: rlimit_resource, rlim: *const rlimit) usize {
pub fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: ?*const rlimit, old_limit: ?*rlimit) usize {
return syscall4(
.prlimit64,
- @bitCast(usize, @as(isize, pid)),
- @bitCast(usize, @as(isize, @intFromEnum(resource))),
+ @as(usize, @bitCast(@as(isize, pid))),
+ @as(usize, @bitCast(@as(isize, @intFromEnum(resource)))),
@intFromPtr(new_limit),
@intFromPtr(old_limit),
);
@@ -1775,14 +1775,14 @@ pub fn madvise(address: [*]u8, len: usize, advice: u32) usize {
}
pub fn pidfd_open(pid: pid_t, flags: u32) usize {
- return syscall2(.pidfd_open, @bitCast(usize, @as(isize, pid)), flags);
+ return syscall2(.pidfd_open, @as(usize, @bitCast(@as(isize, pid))), flags);
}
pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize {
return syscall3(
.pidfd_getfd,
- @bitCast(usize, @as(isize, pidfd)),
- @bitCast(usize, @as(isize, targetfd)),
+ @as(usize, @bitCast(@as(isize, pidfd))),
+ @as(usize, @bitCast(@as(isize, targetfd))),
flags,
);
}
@@ -1790,8 +1790,8 @@ pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize {
pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) usize {
return syscall4(
.pidfd_send_signal,
- @bitCast(usize, @as(isize, pidfd)),
- @bitCast(usize, @as(isize, sig)),
+ @as(usize, @bitCast(@as(isize, pidfd))),
+ @as(usize, @bitCast(@as(isize, sig))),
@intFromPtr(info),
flags,
);
@@ -1800,7 +1800,7 @@ pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) u
pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, flags: usize) usize {
return syscall6(
.process_vm_readv,
- @bitCast(usize, @as(isize, pid)),
+ @as(usize, @bitCast(@as(isize, pid))),
@intFromPtr(local.ptr),
local.len,
@intFromPtr(remote.ptr),
@@ -1812,7 +1812,7 @@ pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const,
pub fn process_vm_writev(pid: pid_t, local: []const iovec_const, remote: []const iovec_const, flags: usize) usize {
return syscall6(
.process_vm_writev,
- @bitCast(usize, @as(isize, pid)),
+ @as(usize, @bitCast(@as(isize, pid))),
@intFromPtr(local.ptr),
local.len,
@intFromPtr(remote.ptr),
@@ -1830,7 +1830,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
return syscall7(
.fadvise64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
0,
offset_halves[0],
offset_halves[1],
@@ -1846,7 +1846,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
return syscall6(
.fadvise64_64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
advice,
offset_halves[0],
offset_halves[1],
@@ -1862,7 +1862,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
return syscall6(
.fadvise64_64,
- @bitCast(usize, @as(isize, fd)),
+ @as(usize, @bitCast(@as(isize, fd))),
offset_halves[0],
offset_halves[1],
length_halves[0],
@@ -1872,9 +1872,9 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
} else {
return syscall4(
.fadvise64,
- @bitCast(usize, @as(isize, fd)),
- @bitCast(usize, offset),
- @bitCast(usize, len),
+ @as(usize, @bitCast(@as(isize, fd))),
+ @as(usize, @bitCast(offset)),
+ @as(usize, @bitCast(len)),
advice,
);
}
@@ -1890,9 +1890,9 @@ pub fn perf_event_open(
return syscall5(
.perf_event_open,
@intFromPtr(attr),
- @bitCast(usize, @as(isize, pid)),
- @bitCast(usize, @as(isize, cpu)),
- @bitCast(usize, @as(isize, group_fd)),
+ @as(usize, @bitCast(@as(isize, pid))),
+ @as(usize, @bitCast(@as(isize, cpu))),
+ @as(usize, @bitCast(@as(isize, group_fd))),
flags,
);
}
@@ -1911,7 +1911,7 @@ pub fn ptrace(
return syscall5(
.ptrace,
req,
- @bitCast(usize, @as(isize, pid)),
+ @as(usize, @bitCast(@as(isize, pid))),
addr,
data,
addr2,
@@ -2057,7 +2057,7 @@ pub const W = struct {
pub const NOWAIT = 0x1000000;
pub fn EXITSTATUS(s: u32) u8 {
- return @intCast(u8, (s & 0xff00) >> 8);
+ return @as(u8, @intCast((s & 0xff00) >> 8));
}
pub fn TERMSIG(s: u32) u32 {
return s & 0x7f;
@@ -2069,7 +2069,7 @@ pub const W = struct {
return TERMSIG(s) == 0;
}
pub fn IFSTOPPED(s: u32) bool {
- return @truncate(u16, ((s & 0xffff) *% 0x10001) >> 8) > 0x7f00;
+ return @as(u16, @truncate(((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00;
}
pub fn IFSIGNALED(s: u32) bool {
return (s & 0xffff) -% 1 < 0xff;
@@ -2154,9 +2154,9 @@ pub const SIG = if (is_mips) struct {
pub const SYS = 31;
pub const UNUSED = SIG.SYS;
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
} else if (is_sparc) struct {
pub const BLOCK = 1;
pub const UNBLOCK = 2;
@@ -2198,9 +2198,9 @@ pub const SIG = if (is_mips) struct {
pub const PWR = LOST;
pub const IO = SIG.POLL;
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
} else struct {
pub const BLOCK = 0;
pub const UNBLOCK = 1;
@@ -2241,9 +2241,9 @@ pub const SIG = if (is_mips) struct {
pub const SYS = 31;
pub const UNUSED = SIG.SYS;
- pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
- pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
- pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+ pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+ pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+ pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
};
pub const kernel_rwf = u32;
@@ -3541,7 +3541,7 @@ pub const CAP = struct {
}
pub fn TO_MASK(cap: u8) u32 {
- return @as(u32, 1) << @intCast(u5, cap & 31);
+ return @as(u32, 1) << @as(u5, @intCast(cap & 31));
}
pub fn TO_INDEX(cap: u8) u8 {
@@ -3598,7 +3598,7 @@ pub const cpu_count_t = std.meta.Int(.unsigned, std.math.log2(CPU_SETSIZE * 8));
fn cpu_mask(s: usize) cpu_count_t {
var x = s & (CPU_SETSIZE * 8);
- return @intCast(cpu_count_t, 1) << @intCast(u4, x);
+ return @as(cpu_count_t, @intCast(1)) << @as(u4, @intCast(x));
}
pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
@@ -3999,7 +3999,7 @@ pub const io_uring_cqe = extern struct {
pub fn err(self: io_uring_cqe) E {
if (self.res > -4096 and self.res < 0) {
- return @enumFromInt(E, -self.res);
+ return @as(E, @enumFromInt(-self.res));
}
return .SUCCESS;
}
diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig
index 87b92587f9..751e5dc95a 100644
--- a/lib/std/os/linux/bpf.zig
+++ b/lib/std/os/linux/bpf.zig
@@ -643,7 +643,7 @@ pub const Insn = packed struct {
.dst = @intFromEnum(dst),
.src = @intFromEnum(src),
.off = 0,
- .imm = @intCast(i32, @truncate(u32, imm)),
+ .imm = @as(i32, @intCast(@as(u32, @truncate(imm)))),
};
}
@@ -653,7 +653,7 @@ pub const Insn = packed struct {
.dst = 0,
.src = 0,
.off = 0,
- .imm = @intCast(i32, @truncate(u32, imm >> 32)),
+ .imm = @as(i32, @intCast(@as(u32, @truncate(imm >> 32)))),
};
}
@@ -666,11 +666,11 @@ pub const Insn = packed struct {
}
pub fn ld_map_fd1(dst: Reg, map_fd: fd_t) Insn {
- return ld_imm_impl1(dst, @enumFromInt(Reg, PSEUDO_MAP_FD), @intCast(u64, map_fd));
+ return ld_imm_impl1(dst, @as(Reg, @enumFromInt(PSEUDO_MAP_FD)), @as(u64, @intCast(map_fd)));
}
pub fn ld_map_fd2(map_fd: fd_t) Insn {
- return ld_imm_impl2(@intCast(u64, map_fd));
+ return ld_imm_impl2(@as(u64, @intCast(map_fd)));
}
pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn {
@@ -786,17 +786,17 @@ test "opcodes" {
// TODO: byteswap instructions
try expect_opcode(0xd4, Insn.le(.half_word, .r1));
- try expectEqual(@intCast(i32, 16), Insn.le(.half_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(16)), Insn.le(.half_word, .r1).imm);
try expect_opcode(0xd4, Insn.le(.word, .r1));
- try expectEqual(@intCast(i32, 32), Insn.le(.word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(32)), Insn.le(.word, .r1).imm);
try expect_opcode(0xd4, Insn.le(.double_word, .r1));
- try expectEqual(@intCast(i32, 64), Insn.le(.double_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(64)), Insn.le(.double_word, .r1).imm);
try expect_opcode(0xdc, Insn.be(.half_word, .r1));
- try expectEqual(@intCast(i32, 16), Insn.be(.half_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(16)), Insn.be(.half_word, .r1).imm);
try expect_opcode(0xdc, Insn.be(.word, .r1));
- try expectEqual(@intCast(i32, 32), Insn.be(.word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(32)), Insn.be(.word, .r1).imm);
try expect_opcode(0xdc, Insn.be(.double_word, .r1));
- try expectEqual(@intCast(i32, 64), Insn.be(.double_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(64)), Insn.be(.double_word, .r1).imm);
// memory instructions
try expect_opcode(0x18, Insn.ld_dw1(.r1, 0));
@@ -804,7 +804,7 @@ test "opcodes" {
// loading a map fd
try expect_opcode(0x18, Insn.ld_map_fd1(.r1, 0));
- try expectEqual(@intCast(u4, PSEUDO_MAP_FD), Insn.ld_map_fd1(.r1, 0).src);
+ try expectEqual(@as(u4, @intCast(PSEUDO_MAP_FD)), Insn.ld_map_fd1(.r1, 0).src);
try expect_opcode(0x00, Insn.ld_map_fd2(0));
try expect_opcode(0x38, Insn.ld_abs(.double_word, .r1, .r2, 0));
@@ -1518,7 +1518,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries
const rc = linux.bpf(.map_create, &attr, @sizeOf(MapCreateAttr));
switch (errno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.INVAL => return error.MapTypeOrAttrInvalid,
.NOMEM => return error.SystemResources,
.PERM => return error.AccessDenied,
@@ -1668,20 +1668,20 @@ pub fn prog_load(
attr.prog_load.prog_type = @intFromEnum(prog_type);
attr.prog_load.insns = @intFromPtr(insns.ptr);
- attr.prog_load.insn_cnt = @intCast(u32, insns.len);
+ attr.prog_load.insn_cnt = @as(u32, @intCast(insns.len));
attr.prog_load.license = @intFromPtr(license.ptr);
attr.prog_load.kern_version = kern_version;
attr.prog_load.prog_flags = flags;
if (log) |l| {
attr.prog_load.log_buf = @intFromPtr(l.buf.ptr);
- attr.prog_load.log_size = @intCast(u32, l.buf.len);
+ attr.prog_load.log_size = @as(u32, @intCast(l.buf.len));
attr.prog_load.log_level = l.level;
}
const rc = linux.bpf(.prog_load, &attr, @sizeOf(ProgLoadAttr));
return switch (errno(rc)) {
- .SUCCESS => @intCast(fd_t, rc),
+ .SUCCESS => @as(fd_t, @intCast(rc)),
.ACCES => error.UnsafeProgram,
.FAULT => unreachable,
.INVAL => error.InvalidProgram,
diff --git a/lib/std/os/linux/bpf/helpers.zig b/lib/std/os/linux/bpf/helpers.zig
index b26e7eda29..027220088e 100644
--- a/lib/std/os/linux/bpf/helpers.zig
+++ b/lib/std/os/linux/bpf/helpers.zig
@@ -11,147 +11,147 @@ const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
-pub const map_lookup_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, 1);
-pub const map_update_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, 2);
-pub const map_delete_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, 3);
-pub const probe_read = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 4);
-pub const ktime_get_ns = @ptrFromInt(*const fn () u64, 5);
-pub const trace_printk = @ptrFromInt(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, 6);
-pub const get_prandom_u32 = @ptrFromInt(*const fn () u32, 7);
-pub const get_smp_processor_id = @ptrFromInt(*const fn () u32, 8);
-pub const skb_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, 9);
-pub const l3_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, 10);
-pub const l4_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, 11);
-pub const tail_call = @ptrFromInt(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, 12);
-pub const clone_redirect = @ptrFromInt(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, 13);
-pub const get_current_pid_tgid = @ptrFromInt(*const fn () u64, 14);
-pub const get_current_uid_gid = @ptrFromInt(*const fn () u64, 15);
-pub const get_current_comm = @ptrFromInt(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, 16);
-pub const get_cgroup_classid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 17);
+pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
+pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
+pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
+pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
+pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
+pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
+pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
+pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
+pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
+pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
+pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
+pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
+pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
+pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
+pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
+pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
+pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
// Note vlan_proto is big endian
-pub const skb_vlan_push = @ptrFromInt(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, 18);
-pub const skb_vlan_pop = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 19);
-pub const skb_get_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 20);
-pub const skb_set_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 21);
-pub const perf_event_read = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64) u64, 22);
-pub const redirect = @ptrFromInt(*const fn (ifindex: u32, flags: u64) c_long, 23);
-pub const get_route_realm = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 24);
-pub const perf_event_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 25);
-pub const skb_load_bytes = @ptrFromInt(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, 26);
-pub const get_stackid = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, 27);
+pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
+pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
+pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
+pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
+pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
+pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
+pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
+pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
+pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
+pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
// from and to point to __be32
-pub const csum_diff = @ptrFromInt(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, 28);
-pub const skb_get_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 29);
-pub const skb_set_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 30);
+pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
+pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
+pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
// proto is __be16
-pub const skb_change_proto = @ptrFromInt(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, 31);
-pub const skb_change_type = @ptrFromInt(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, 32);
-pub const skb_under_cgroup = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, 33);
-pub const get_hash_recalc = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 34);
-pub const get_current_task = @ptrFromInt(*const fn () u64, 35);
-pub const probe_write_user = @ptrFromInt(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, 36);
-pub const current_task_under_cgroup = @ptrFromInt(*const fn (map: *const kern.MapDef, index: u32) c_long, 37);
-pub const skb_change_tail = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 38);
-pub const skb_pull_data = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32) c_long, 39);
-pub const csum_update = @ptrFromInt(*const fn (skb: *kern.SkBuff, csum: u32) i64, 40);
-pub const set_hash_invalid = @ptrFromInt(*const fn (skb: *kern.SkBuff) void, 41);
-pub const get_numa_node_id = @ptrFromInt(*const fn () c_long, 42);
-pub const skb_change_head = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 43);
-pub const xdp_adjust_head = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 44);
-pub const probe_read_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 45);
-pub const get_socket_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 46);
-pub const get_socket_uid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 47);
-pub const set_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, hash: u32) c_long, 48);
-pub const setsockopt = @ptrFromInt(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 49);
-pub const skb_adjust_room = @ptrFromInt(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, 50);
-pub const redirect_map = @ptrFromInt(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, 51);
-pub const sk_redirect_map = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, 52);
-pub const sock_map_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 53);
-pub const xdp_adjust_meta = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 54);
-pub const perf_event_read_value = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, 55);
-pub const perf_prog_read_value = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, 56);
-pub const getsockopt = @ptrFromInt(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 57);
-pub const override_return = @ptrFromInt(*const fn (regs: *PtRegs, rc: u64) c_long, 58);
-pub const sock_ops_cb_flags_set = @ptrFromInt(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, 59);
-pub const msg_redirect_map = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, 60);
-pub const msg_apply_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 61);
-pub const msg_cork_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 62);
-pub const msg_pull_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, 63);
-pub const bind = @ptrFromInt(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, 64);
-pub const xdp_adjust_tail = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 65);
-pub const skb_get_xfrm_state = @ptrFromInt(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, 66);
-pub const get_stack = @ptrFromInt(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 67);
-pub const skb_load_bytes_relative = @ptrFromInt(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, 68);
-pub const fib_lookup = @ptrFromInt(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, 69);
-pub const sock_hash_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 70);
-pub const msg_redirect_hash = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 71);
-pub const sk_redirect_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 72);
-pub const lwt_push_encap = @ptrFromInt(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, 73);
-pub const lwt_seg6_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, 74);
-pub const lwt_seg6_adjust_srh = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, 75);
-pub const lwt_seg6_action = @ptrFromInt(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, 76);
-pub const rc_repeat = @ptrFromInt(*const fn (ctx: ?*anyopaque) c_long, 77);
-pub const rc_keydown = @ptrFromInt(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, 78);
-pub const skb_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff) u64, 79);
-pub const get_current_cgroup_id = @ptrFromInt(*const fn () u64, 80);
-pub const get_local_storage = @ptrFromInt(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, 81);
-pub const sk_select_reuseport = @ptrFromInt(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 82);
-pub const skb_ancestor_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, 83);
-pub const sk_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 84);
-pub const sk_lookup_udp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 85);
-pub const sk_release = @ptrFromInt(*const fn (sock: *kern.Sock) c_long, 86);
-pub const map_push_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, 87);
-pub const map_pop_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 88);
-pub const map_peek_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 89);
-pub const msg_push_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 90);
-pub const msg_pop_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 91);
-pub const rc_pointer_rel = @ptrFromInt(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, 92);
-pub const spin_lock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 93);
-pub const spin_unlock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 94);
-pub const sk_fullsock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*SkFullSock, 95);
-pub const tcp_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.TcpSock, 96);
-pub const skb_ecn_set_ce = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 97);
-pub const get_listener_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.Sock, 98);
-pub const skc_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 99);
-pub const tcp_check_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, 100);
-pub const sysctl_get_name = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, 101);
-pub const sysctl_get_current_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 102);
-pub const sysctl_get_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 103);
-pub const sysctl_set_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, 104);
-pub const strtol = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, 105);
-pub const strtoul = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, 106);
-pub const sk_storage_get = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, 107);
-pub const sk_storage_delete = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, 108);
-pub const send_signal = @ptrFromInt(*const fn (sig: u32) c_long, 109);
-pub const tcp_gen_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, 110);
-pub const skb_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 111);
-pub const probe_read_user = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 112);
-pub const probe_read_kernel = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 113);
-pub const probe_read_user_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 114);
-pub const probe_read_kernel_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 115);
-pub const tcp_send_ack = @ptrFromInt(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, 116);
-pub const send_signal_thread = @ptrFromInt(*const fn (sig: u32) c_long, 117);
-pub const jiffies64 = @ptrFromInt(*const fn () u64, 118);
-pub const read_branch_records = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, 119);
-pub const get_ns_current_pid_tgid = @ptrFromInt(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, 120);
-pub const xdp_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 121);
-pub const get_netns_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 122);
-pub const get_current_ancestor_cgroup_id = @ptrFromInt(*const fn (ancestor_level: c_int) u64, 123);
-pub const sk_assign = @ptrFromInt(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, 124);
-pub const ktime_get_boot_ns = @ptrFromInt(*const fn () u64, 125);
-pub const seq_printf = @ptrFromInt(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, 126);
-pub const seq_write = @ptrFromInt(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, 127);
-pub const sk_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock) u64, 128);
-pub const sk_ancestor_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, 129);
-pub const ringbuf_output = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, 130);
-pub const ringbuf_reserve = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, 131);
-pub const ringbuf_submit = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 132);
-pub const ringbuf_discard = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 133);
-pub const ringbuf_query = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, 134);
-pub const csum_level = @ptrFromInt(*const fn (skb: *kern.SkBuff, level: u64) c_long, 135);
-pub const skc_to_tcp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, 136);
-pub const skc_to_tcp_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, 137);
-pub const skc_to_tcp_timewait_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, 138);
-pub const skc_to_tcp_request_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, 139);
-pub const skc_to_udp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, 140);
-pub const get_task_stack = @ptrFromInt(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 141);
+pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
+pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
+pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
+pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
+pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
+pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
+pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
+pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
+pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
+pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
+pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
+pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
+pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
+pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
+pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
+pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
+pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
+pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
+pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
+pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
+pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
+pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
+pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
+pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
+pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
+pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
+pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
+pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
+pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
+pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
+pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
+pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
+pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
+pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
+pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
+pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
+pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
+pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
+pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
+pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
+pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
+pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
+pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
+pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
+pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
+pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
+pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
+pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
+pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
+pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
+pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
+pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
+pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
+pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
+pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
+pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
+pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
+pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
+pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
+pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
+pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
+pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
+pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
+pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
+pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
+pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
+pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
+pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
+pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
+pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
+pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
+pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
+pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
+pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
+pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
+pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
+pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
+pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
+pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
+pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
+pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
+pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
+pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
+pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
+pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
+pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
+pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
+pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
+pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
+pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
+pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
+pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
+pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
+pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
+pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
+pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
+pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
+pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
+pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
+pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
+pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
+pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
+pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
+pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
+pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
+pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
+pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
+pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
+pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
+pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
+pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index 875138cf4f..df8cd20773 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -60,7 +60,7 @@ pub const IO_Uring = struct {
.NOSYS => return error.SystemOutdated,
else => |errno| return os.unexpectedErrno(errno),
}
- const fd = @intCast(os.fd_t, res);
+ const fd = @as(os.fd_t, @intCast(res));
assert(fd >= 0);
errdefer os.close(fd);
@@ -198,7 +198,7 @@ pub const IO_Uring = struct {
.INTR => return error.SignalInterrupt,
else => |errno| return os.unexpectedErrno(errno),
}
- return @intCast(u32, res);
+ return @as(u32, @intCast(res));
}
/// Sync internal state with kernel ring state on the SQ side.
@@ -937,8 +937,8 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_FILES,
- @ptrCast(*const anyopaque, fds.ptr),
- @intCast(u32, fds.len),
+ @as(*const anyopaque, @ptrCast(fds.ptr)),
+ @as(u32, @intCast(fds.len)),
);
try handle_registration_result(res);
}
@@ -968,8 +968,8 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_FILES_UPDATE,
- @ptrCast(*const anyopaque, &update),
- @intCast(u32, fds.len),
+ @as(*const anyopaque, @ptrCast(&update)),
+ @as(u32, @intCast(fds.len)),
);
try handle_registration_result(res);
}
@@ -982,7 +982,7 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_EVENTFD,
- @ptrCast(*const anyopaque, &fd),
+ @as(*const anyopaque, @ptrCast(&fd)),
1,
);
try handle_registration_result(res);
@@ -997,7 +997,7 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_EVENTFD_ASYNC,
- @ptrCast(*const anyopaque, &fd),
+ @as(*const anyopaque, @ptrCast(&fd)),
1,
);
try handle_registration_result(res);
@@ -1022,7 +1022,7 @@ pub const IO_Uring = struct {
self.fd,
.REGISTER_BUFFERS,
buffers.ptr,
- @intCast(u32, buffers.len),
+ @as(u32, @intCast(buffers.len)),
);
try handle_registration_result(res);
}
@@ -1122,20 +1122,17 @@ pub const SubmissionQueue = struct {
errdefer os.munmap(mmap_sqes);
assert(mmap_sqes.len == size_sqes);
- const array = @ptrCast([*]u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.array]));
- const sqes = @ptrCast([*]linux.io_uring_sqe, @alignCast(@alignOf(linux.io_uring_sqe), &mmap_sqes[0]));
+ const array: [*]u32 = @ptrCast(@alignCast(&mmap[p.sq_off.array]));
+ const sqes: [*]linux.io_uring_sqe = @ptrCast(@alignCast(&mmap_sqes[0]));
// We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries,
// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844.
- assert(
- p.sq_entries ==
- @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*,
- );
+ assert(p.sq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_entries]))).*);
return SubmissionQueue{
- .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.head])),
- .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.tail])),
- .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_mask])).*,
- .flags = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.flags])),
- .dropped = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.dropped])),
+ .head = @ptrCast(@alignCast(&mmap[p.sq_off.head])),
+ .tail = @ptrCast(@alignCast(&mmap[p.sq_off.tail])),
+ .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_mask]))).*,
+ .flags = @ptrCast(@alignCast(&mmap[p.sq_off.flags])),
+ .dropped = @ptrCast(@alignCast(&mmap[p.sq_off.dropped])),
.array = array[0..p.sq_entries],
.sqes = sqes[0..p.sq_entries],
.mmap = mmap,
@@ -1160,17 +1157,13 @@ pub const CompletionQueue = struct {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
const mmap = sq.mmap;
- const cqes = @ptrCast(
- [*]linux.io_uring_cqe,
- @alignCast(@alignOf(linux.io_uring_cqe), &mmap[p.cq_off.cqes]),
- );
- assert(p.cq_entries ==
- @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*);
+ const cqes: [*]linux.io_uring_cqe = @ptrCast(@alignCast(&mmap[p.cq_off.cqes]));
+ assert(p.cq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_entries]))).*);
return CompletionQueue{
- .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.head])),
- .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.tail])),
- .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_mask])).*,
- .overflow = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.overflow])),
+ .head = @ptrCast(@alignCast(&mmap[p.cq_off.head])),
+ .tail = @ptrCast(@alignCast(&mmap[p.cq_off.tail])),
+ .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_mask]))).*,
+ .overflow = @ptrCast(@alignCast(&mmap[p.cq_off.overflow])),
.cqes = cqes[0..p.cq_entries],
};
}
@@ -1233,7 +1226,7 @@ pub fn io_uring_prep_rw(
.fd = fd,
.off = offset,
.addr = addr,
- .len = @intCast(u32, len),
+ .len = @as(u32, @intCast(len)),
.rw_flags = 0,
.user_data = 0,
.buf_index = 0,
@@ -1319,7 +1312,7 @@ pub fn io_uring_prep_epoll_ctl(
op: u32,
ev: ?*linux.epoll_event,
) void {
- io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @intCast(u64, fd));
+ io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @as(u64, @intCast(fd)));
}
pub fn io_uring_prep_recv(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void {
@@ -1459,7 +1452,7 @@ pub fn io_uring_prep_fallocate(
.fd = fd,
.off = offset,
.addr = len,
- .len = @intCast(u32, mode),
+ .len = @as(u32, @intCast(mode)),
.rw_flags = 0,
.user_data = 0,
.buf_index = 0,
@@ -1514,7 +1507,7 @@ pub fn io_uring_prep_renameat(
0,
@intFromPtr(new_path),
);
- sqe.len = @bitCast(u32, new_dir_fd);
+ sqe.len = @as(u32, @bitCast(new_dir_fd));
sqe.rw_flags = flags;
}
@@ -1569,7 +1562,7 @@ pub fn io_uring_prep_linkat(
0,
@intFromPtr(new_path),
);
- sqe.len = @bitCast(u32, new_dir_fd);
+ sqe.len = @as(u32, @bitCast(new_dir_fd));
sqe.rw_flags = flags;
}
@@ -1582,8 +1575,8 @@ pub fn io_uring_prep_provide_buffers(
buffer_id: usize,
) void {
const ptr = @intFromPtr(buffers);
- io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @intCast(i32, num), ptr, buffer_len, buffer_id);
- sqe.buf_index = @intCast(u16, group_id);
+ io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @as(i32, @intCast(num)), ptr, buffer_len, buffer_id);
+ sqe.buf_index = @as(u16, @intCast(group_id));
}
pub fn io_uring_prep_remove_buffers(
@@ -1591,8 +1584,8 @@ pub fn io_uring_prep_remove_buffers(
num: usize,
group_id: usize,
) void {
- io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @intCast(i32, num), 0, 0, 0);
- sqe.buf_index = @intCast(u16, group_id);
+ io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @as(i32, @intCast(num)), 0, 0, 0);
+ sqe.buf_index = @as(u16, @intCast(group_id));
}
test "structs/offsets/entries" {
@@ -1886,12 +1879,12 @@ test "write_fixed/read_fixed" {
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x45454545,
- .res = @intCast(i32, buffers[0].iov_len),
+ .res = @as(i32, @intCast(buffers[0].iov_len)),
.flags = 0,
}, cqe_write);
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x12121212,
- .res = @intCast(i32, buffers[1].iov_len),
+ .res = @as(i32, @intCast(buffers[1].iov_len)),
.flags = 0,
}, cqe_read);
@@ -2145,7 +2138,7 @@ test "timeout (after a relative time)" {
}, cqe);
// Tests should not depend on timings: skip test if outside margin.
- if (!std.math.approxEqAbs(f64, ms, @floatFromInt(f64, stopped - started), margin)) return error.SkipZigTest;
+ if (!std.math.approxEqAbs(f64, ms, @as(f64, @floatFromInt(stopped - started)), margin)) return error.SkipZigTest;
}
test "timeout (after a number of completions)" {
@@ -2637,7 +2630,7 @@ test "renameat" {
);
try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode);
try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
+ try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2850,7 +2843,7 @@ test "linkat" {
);
try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode);
try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
+ try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2898,7 +2891,7 @@ test "provide_buffers: read" {
// Provide 4 buffers
{
- const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+ const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
try testing.expectEqual(@as(u32, buffers[0].len), sqe.len);
@@ -2939,7 +2932,7 @@ test "provide_buffers: read" {
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
}
// This read should fail
@@ -2971,7 +2964,7 @@ test "provide_buffers: read" {
const reprovided_buffer_id = 2;
{
- _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id);
+ _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -3003,7 +2996,7 @@ test "provide_buffers: read" {
try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
}
}
@@ -3030,7 +3023,7 @@ test "remove_buffers" {
// Provide 4 buffers
{
- _ = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+ _ = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -3076,7 +3069,7 @@ test "remove_buffers" {
try testing.expect(used_buffer_id >= 0 and used_buffer_id < 4);
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
}
// Final read should _not_ work
@@ -3119,7 +3112,7 @@ test "provide_buffers: accept/connect/send/recv" {
// Provide 4 buffers
{
- const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+ const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
try testing.expectEqual(@as(u32, buffer_len), sqe.len);
@@ -3181,7 +3174,7 @@ test "provide_buffers: accept/connect/send/recv" {
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
- const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)];
+ const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
try testing.expectEqualSlices(u8, &([_]u8{'z'} ** buffer_len), buffer);
}
@@ -3213,7 +3206,7 @@ test "provide_buffers: accept/connect/send/recv" {
const reprovided_buffer_id = 2;
{
- _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id);
+ _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -3259,7 +3252,7 @@ test "provide_buffers: accept/connect/send/recv" {
try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)];
+ const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
try testing.expectEqualSlices(u8, &([_]u8{'w'} ** buffer_len), buffer);
}
}
diff --git a/lib/std/os/linux/ioctl.zig b/lib/std/os/linux/ioctl.zig
index 96ec96c306..7f5d36b72d 100644
--- a/lib/std/os/linux/ioctl.zig
+++ b/lib/std/os/linux/ioctl.zig
@@ -32,7 +32,7 @@ fn io_impl(dir: Direction, io_type: u8, nr: u8, comptime T: type) u32 {
.io_type = io_type,
.nr = nr,
};
- return @bitCast(u32, request);
+ return @as(u32, @bitCast(request));
}
pub fn IO(io_type: u8, nr: u8) u32 {
diff --git a/lib/std/os/linux/start_pie.zig b/lib/std/os/linux/start_pie.zig
index c9b1cb1e92..cf557f9d66 100644
--- a/lib/std/os/linux/start_pie.zig
+++ b/lib/std/os/linux/start_pie.zig
@@ -103,17 +103,17 @@ pub fn relocate(phdrs: []elf.Phdr) void {
// Apply the relocations.
if (rel_addr != 0) {
- const rel = std.mem.bytesAsSlice(elf.Rel, @ptrFromInt([*]u8, rel_addr)[0..rel_size]);
+ const rel = std.mem.bytesAsSlice(elf.Rel, @as([*]u8, @ptrFromInt(rel_addr))[0..rel_size]);
for (rel) |r| {
if (r.r_type() != R_RELATIVE) continue;
- @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr;
+ @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr;
}
}
if (rela_addr != 0) {
- const rela = std.mem.bytesAsSlice(elf.Rela, @ptrFromInt([*]u8, rela_addr)[0..rela_size]);
+ const rela = std.mem.bytesAsSlice(elf.Rela, @as([*]u8, @ptrFromInt(rela_addr))[0..rela_size]);
for (rela) |r| {
if (r.r_type() != R_RELATIVE) continue;
- @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr + @bitCast(usize, r.r_addend);
+ @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr + @as(usize, @bitCast(r.r_addend));
}
}
}
diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig
index e1ad36b2e5..170bde6334 100644
--- a/lib/std/os/linux/test.zig
+++ b/lib/std/os/linux/test.zig
@@ -50,7 +50,7 @@ test "timer" {
.it_value = time_interval,
};
- err = linux.getErrno(linux.timerfd_settime(@intCast(i32, timer_fd), 0, &new_time, null));
+ err = linux.getErrno(linux.timerfd_settime(@as(i32, @intCast(timer_fd)), 0, &new_time, null));
try expect(err == .SUCCESS);
var event = linux.epoll_event{
@@ -58,13 +58,13 @@ test "timer" {
.data = linux.epoll_data{ .ptr = 0 },
};
- err = linux.getErrno(linux.epoll_ctl(@intCast(i32, epoll_fd), linux.EPOLL.CTL_ADD, @intCast(i32, timer_fd), &event));
+ err = linux.getErrno(linux.epoll_ctl(@as(i32, @intCast(epoll_fd)), linux.EPOLL.CTL_ADD, @as(i32, @intCast(timer_fd)), &event));
try expect(err == .SUCCESS);
const events_one: linux.epoll_event = undefined;
var events = [_]linux.epoll_event{events_one} ** 8;
- err = linux.getErrno(linux.epoll_wait(@intCast(i32, epoll_fd), &events, 8, -1));
+ err = linux.getErrno(linux.epoll_wait(@as(i32, @intCast(epoll_fd)), &events, 8, -1));
try expect(err == .SUCCESS);
}
@@ -91,11 +91,11 @@ test "statx" {
}
try expect(stat_buf.mode == statx_buf.mode);
- try expect(@bitCast(u32, stat_buf.uid) == statx_buf.uid);
- try expect(@bitCast(u32, stat_buf.gid) == statx_buf.gid);
- try expect(@bitCast(u64, @as(i64, stat_buf.size)) == statx_buf.size);
- try expect(@bitCast(u64, @as(i64, stat_buf.blksize)) == statx_buf.blksize);
- try expect(@bitCast(u64, @as(i64, stat_buf.blocks)) == statx_buf.blocks);
+ try expect(@as(u32, @bitCast(stat_buf.uid)) == statx_buf.uid);
+ try expect(@as(u32, @bitCast(stat_buf.gid)) == statx_buf.gid);
+ try expect(@as(u64, @bitCast(@as(i64, stat_buf.size))) == statx_buf.size);
+ try expect(@as(u64, @bitCast(@as(i64, stat_buf.blksize))) == statx_buf.blksize);
+ try expect(@as(u64, @bitCast(@as(i64, stat_buf.blocks))) == statx_buf.blocks);
}
test "user and group ids" {
diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig
index b60a2ed388..94fa0d1a09 100644
--- a/lib/std/os/linux/tls.zig
+++ b/lib/std/os/linux/tls.zig
@@ -205,7 +205,7 @@ fn initTLS(phdrs: []elf.Phdr) void {
// the data stored in the PT_TLS segment is p_filesz and may be less
// than the former
tls_align_factor = phdr.p_align;
- tls_data = @ptrFromInt([*]u8, img_base + phdr.p_vaddr)[0..phdr.p_filesz];
+ tls_data = @as([*]u8, @ptrFromInt(img_base + phdr.p_vaddr))[0..phdr.p_filesz];
tls_data_alloc_size = phdr.p_memsz;
} else {
tls_align_factor = @alignOf(usize);
@@ -263,12 +263,12 @@ fn initTLS(phdrs: []elf.Phdr) void {
.dtv_offset = dtv_offset,
.data_offset = data_offset,
.data_size = tls_data_alloc_size,
- .gdt_entry_number = @bitCast(usize, @as(isize, -1)),
+ .gdt_entry_number = @as(usize, @bitCast(@as(isize, -1))),
};
}
inline fn alignPtrCast(comptime T: type, ptr: [*]u8) *T {
- return @ptrCast(*T, @alignCast(@alignOf(T), ptr));
+ return @ptrCast(@alignCast(ptr));
}
/// Initializes all the fields of the static TLS area and returns the computed
diff --git a/lib/std/os/linux/vdso.zig b/lib/std/os/linux/vdso.zig
index c7dc7ae599..50e7ce1dfd 100644
--- a/lib/std/os/linux/vdso.zig
+++ b/lib/std/os/linux/vdso.zig
@@ -8,7 +8,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const vdso_addr = std.os.system.getauxval(std.elf.AT_SYSINFO_EHDR);
if (vdso_addr == 0) return 0;
- const eh = @ptrFromInt(*elf.Ehdr, vdso_addr);
+ const eh = @as(*elf.Ehdr, @ptrFromInt(vdso_addr));
var ph_addr: usize = vdso_addr + eh.e_phoff;
var maybe_dynv: ?[*]usize = null;
@@ -19,14 +19,14 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
i += 1;
ph_addr += eh.e_phentsize;
}) {
- const this_ph = @ptrFromInt(*elf.Phdr, ph_addr);
+ const this_ph = @as(*elf.Phdr, @ptrFromInt(ph_addr));
switch (this_ph.p_type) {
// On WSL1 as well as older kernels, the VDSO ELF image is pre-linked in the upper half
// of the memory space (e.g. p_vaddr = 0xffffffffff700000 on WSL1).
// Wrapping operations are used on this line as well as subsequent calculations relative to base
// (lines 47, 78) to ensure no overflow check is tripped.
elf.PT_LOAD => base = vdso_addr +% this_ph.p_offset -% this_ph.p_vaddr,
- elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, vdso_addr + this_ph.p_offset),
+ elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(vdso_addr + this_ph.p_offset)),
else => {},
}
}
@@ -45,11 +45,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
while (dynv[i] != 0) : (i += 2) {
const p = base +% dynv[i + 1];
switch (dynv[i]) {
- elf.DT_STRTAB => maybe_strings = @ptrFromInt([*]u8, p),
- elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p),
- elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]linux.Elf_Symndx, p),
- elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p),
- elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p),
+ elf.DT_STRTAB => maybe_strings = @as([*]u8, @ptrFromInt(p)),
+ elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)),
+ elf.DT_HASH => maybe_hashtab = @as([*]linux.Elf_Symndx, @ptrFromInt(p)),
+ elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)),
+ elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)),
else => {},
}
}
@@ -65,10 +65,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
var i: usize = 0;
while (i < hashtab[1]) : (i += 1) {
- if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info & 0xf) & OK_TYPES)) continue;
- if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info >> 4) & OK_BINDS)) continue;
+ if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info & 0xf)) & OK_TYPES)) continue;
+ if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info >> 4)) & OK_BINDS)) continue;
if (0 == syms[i].st_shndx) continue;
- const sym_name = @ptrCast([*:0]u8, strings + syms[i].st_name);
+ const sym_name = @as([*:0]u8, @ptrCast(strings + syms[i].st_name));
if (!mem.eql(u8, name, mem.sliceTo(sym_name, 0))) continue;
if (maybe_versym) |versym| {
if (!checkver(maybe_verdef.?, versym[i], vername, strings))
@@ -82,15 +82,15 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool {
var def = def_arg;
- const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+ const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff;
while (true) {
if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
break;
if (def.vd_next == 0)
return false;
- def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next);
+ def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next));
}
- const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux);
- const vda_name = @ptrCast([*:0]u8, strings + aux.vda_name);
+ const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux));
+ const vda_name = @as([*:0]u8, @ptrCast(strings + aux.vda_name));
return mem.eql(u8, vername, mem.sliceTo(vda_name, 0));
}
diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig
index b628bc2afc..3e1137c7ce 100644
--- a/lib/std/os/plan9.zig
+++ b/lib/std/os/plan9.zig
@@ -8,9 +8,9 @@ pub const syscall_bits = switch (builtin.cpu.arch) {
pub const E = @import("plan9/errno.zig").E;
/// Get the errno from a syscall return value, or 0 for no error.
pub fn getErrno(r: usize) E {
- const signed_r = @bitCast(isize, r);
+ const signed_r = @as(isize, @bitCast(r));
const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0;
- return @enumFromInt(E, int);
+ return @as(E, @enumFromInt(int));
}
pub const SIG = struct {
/// hangup
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 888b2f5c1c..d5451f64ac 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -488,7 +488,7 @@ fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void {
const reloc_addr = info.dlpi_addr + phdr.p_vaddr;
// Find the ELF header
- const elf_header = @ptrFromInt(*elf.Ehdr, reloc_addr - phdr.p_offset);
+ const elf_header = @as(*elf.Ehdr, @ptrFromInt(reloc_addr - phdr.p_offset));
// Validate the magic
if (!mem.eql(u8, elf_header.e_ident[0..4], elf.MAGIC)) return error.BadElfMagic;
// Consistency check
@@ -751,7 +751,7 @@ test "getrlimit and setrlimit" {
}
inline for (std.meta.fields(os.rlimit_resource)) |field| {
- const resource = @enumFromInt(os.rlimit_resource, field.value);
+ const resource = @as(os.rlimit_resource, @enumFromInt(field.value));
const limit = try os.getrlimit(resource);
// On 32 bit MIPS musl includes a fix which changes limits greater than -1UL/2 to RLIM_INFINITY.
diff --git a/lib/std/os/uefi.zig b/lib/std/os/uefi.zig
index f51caaa86f..7c6eb08a93 100644
--- a/lib/std/os/uefi.zig
+++ b/lib/std/os/uefi.zig
@@ -143,7 +143,7 @@ pub const FileHandle = *opaque {};
test "GUID formatting" {
var bytes = [_]u8{ 137, 60, 203, 50, 128, 128, 124, 66, 186, 19, 80, 73, 135, 59, 194, 135 };
- var guid = @bitCast(Guid, bytes);
+ var guid = @as(Guid, @bitCast(bytes));
var str = try std.fmt.allocPrint(std.testing.allocator, "{}", .{guid});
defer std.testing.allocator.free(str);
diff --git a/lib/std/os/uefi/pool_allocator.zig b/lib/std/os/uefi/pool_allocator.zig
index c24d9416f1..3f64a2f3f6 100644
--- a/lib/std/os/uefi/pool_allocator.zig
+++ b/lib/std/os/uefi/pool_allocator.zig
@@ -9,7 +9,7 @@ const Allocator = mem.Allocator;
const UefiPoolAllocator = struct {
fn getHeader(ptr: [*]u8) *[*]align(8) u8 {
- return @ptrFromInt(*[*]align(8) u8, @intFromPtr(ptr) - @sizeOf(usize));
+ return @as(*[*]align(8) u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize)));
}
fn alloc(
@@ -22,7 +22,7 @@ const UefiPoolAllocator = struct {
assert(len > 0);
- const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+ const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align);
@@ -135,5 +135,5 @@ fn uefi_free(
) void {
_ = log2_old_ptr_align;
_ = ret_addr;
- _ = uefi.system_table.boot_services.?.freePool(@alignCast(8, buf.ptr));
+ _ = uefi.system_table.boot_services.?.freePool(@alignCast(buf.ptr));
}
diff --git a/lib/std/os/uefi/protocols/device_path_protocol.zig b/lib/std/os/uefi/protocols/device_path_protocol.zig
index c64084e6ed..a083959521 100644
--- a/lib/std/os/uefi/protocols/device_path_protocol.zig
+++ b/lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -23,10 +23,10 @@ pub const DevicePathProtocol = extern struct {
/// Returns the next DevicePathProtocol node in the sequence, if any.
pub fn next(self: *DevicePathProtocol) ?*DevicePathProtocol {
- if (self.type == .End and @enumFromInt(EndDevicePath.Subtype, self.subtype) == .EndEntire)
+ if (self.type == .End and @as(EndDevicePath.Subtype, @enumFromInt(self.subtype)) == .EndEntire)
return null;
- return @ptrCast(*DevicePathProtocol, @ptrCast([*]u8, self) + self.length);
+ return @as(*DevicePathProtocol, @ptrCast(@as([*]u8, @ptrCast(self)) + self.length));
}
/// Calculates the total length of the device path structure in bytes, including the end of device path node.
@@ -48,30 +48,30 @@ pub const DevicePathProtocol = extern struct {
// DevicePathProtocol for the extra node before the end
var buf = try allocator.alloc(u8, path_size + 2 * (path.len + 1) + @sizeOf(DevicePathProtocol));
- @memcpy(buf[0..path_size.len], @ptrCast([*]const u8, self)[0..path_size]);
+ @memcpy(buf[0..path_size.len], @as([*]const u8, @ptrCast(self))[0..path_size]);
// Pointer to the copy of the end node of the current chain, which is - 4 from the buffer
// as the end node itself is 4 bytes (type: u8 + subtype: u8 + length: u16).
- var new = @ptrCast(*MediaDevicePath.FilePathDevicePath, buf.ptr + path_size - 4);
+ var new = @as(*MediaDevicePath.FilePathDevicePath, @ptrCast(buf.ptr + path_size - 4));
new.type = .Media;
new.subtype = .FilePath;
- new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@intCast(u16, path.len) + 1);
+ new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@as(u16, @intCast(path.len)) + 1);
// The same as new.getPath(), but not const as we're filling it in.
- var ptr = @ptrCast([*:0]align(1) u16, @ptrCast([*]u8, new) + @sizeOf(MediaDevicePath.FilePathDevicePath));
+ var ptr = @as([*:0]align(1) u16, @ptrCast(@as([*]u8, @ptrCast(new)) + @sizeOf(MediaDevicePath.FilePathDevicePath)));
for (path, 0..) |s, i|
ptr[i] = s;
ptr[path.len] = 0;
- var end = @ptrCast(*EndDevicePath.EndEntireDevicePath, @ptrCast(*DevicePathProtocol, new).next().?);
+ var end = @as(*EndDevicePath.EndEntireDevicePath, @ptrCast(@as(*DevicePathProtocol, @ptrCast(new)).next().?));
end.type = .End;
end.subtype = .EndEntire;
end.length = @sizeOf(EndDevicePath.EndEntireDevicePath);
- return @ptrCast(*DevicePathProtocol, buf.ptr);
+ return @as(*DevicePathProtocol, @ptrCast(buf.ptr));
}
pub fn getDevicePath(self: *const DevicePathProtocol) ?DevicePath {
@@ -103,7 +103,7 @@ pub const DevicePathProtocol = extern struct {
if (self.subtype == tag_val) {
// e.g. expr = .{ .Pci = @ptrCast(...) }
- return @unionInit(TUnion, subtype.name, @ptrCast(subtype.type, self));
+ return @unionInit(TUnion, subtype.name, @as(subtype.type, @ptrCast(self)));
}
}
@@ -332,7 +332,7 @@ pub const AcpiDevicePath = union(Subtype) {
pub fn adrs(self: *const AdrDevicePath) []align(1) const u32 {
// self.length is a minimum of 8 with one adr which is size 4.
var entries = (self.length - 4) / @sizeOf(u32);
- return @ptrCast([*]align(1) const u32, &self.adr)[0..entries];
+ return @as([*]align(1) const u32, @ptrCast(&self.adr))[0..entries];
}
};
@@ -550,7 +550,7 @@ pub const MessagingDevicePath = union(Subtype) {
pub fn serial_number(self: *const UsbWwidDevicePath) []align(1) const u16 {
var serial_len = (self.length - @sizeOf(UsbWwidDevicePath)) / @sizeOf(u16);
- return @ptrCast([*]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(UsbWwidDevicePath))[0..serial_len];
+ return @as([*]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(UsbWwidDevicePath)))[0..serial_len];
}
};
@@ -943,7 +943,7 @@ pub const MediaDevicePath = union(Subtype) {
length: u16 align(1),
pub fn getPath(self: *const FilePathDevicePath) [*:0]align(1) const u16 {
- return @ptrCast([*:0]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(FilePathDevicePath));
+ return @as([*:0]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FilePathDevicePath)));
}
};
@@ -1068,7 +1068,7 @@ pub const BiosBootSpecificationDevicePath = union(Subtype) {
status_flag: u16 align(1),
pub fn getDescription(self: *const BBS101DevicePath) [*:0]const u8 {
- return @ptrCast([*:0]const u8, self) + @sizeOf(BBS101DevicePath);
+ return @as([*:0]const u8, @ptrCast(self)) + @sizeOf(BBS101DevicePath);
}
};
diff --git a/lib/std/os/uefi/protocols/file_protocol.zig b/lib/std/os/uefi/protocols/file_protocol.zig
index 729d4020b4..53ec5f81e3 100644
--- a/lib/std/os/uefi/protocols/file_protocol.zig
+++ b/lib/std/os/uefi/protocols/file_protocol.zig
@@ -152,7 +152,7 @@ pub const FileInfo = extern struct {
attribute: u64,
pub fn getFileName(self: *const FileInfo) [*:0]const u16 {
- return @ptrCast([*:0]const u16, @ptrCast([*]const u8, self) + @sizeOf(FileInfo));
+ return @as([*:0]const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FileInfo)));
}
pub const efi_file_read_only: u64 = 0x0000000000000001;
@@ -182,7 +182,7 @@ pub const FileSystemInfo = extern struct {
_volume_label: u16,
pub fn getVolumeLabel(self: *const FileSystemInfo) [*:0]const u16 {
- return @ptrCast([*:0]const u16, &self._volume_label);
+ return @as([*:0]const u16, @ptrCast(&self._volume_label));
}
pub const guid align(8) = Guid{
diff --git a/lib/std/os/uefi/protocols/hii.zig b/lib/std/os/uefi/protocols/hii.zig
index 437fa29739..c7199d2950 100644
--- a/lib/std/os/uefi/protocols/hii.zig
+++ b/lib/std/os/uefi/protocols/hii.zig
@@ -39,7 +39,7 @@ pub const HIISimplifiedFontPackage = extern struct {
number_of_wide_glyphs: u16,
pub fn getNarrowGlyphs(self: *HIISimplifiedFontPackage) []NarrowGlyph {
- return @ptrCast([*]NarrowGlyph, @ptrCast([*]u8, self) + @sizeOf(HIISimplifiedFontPackage))[0..self.number_of_narrow_glyphs];
+ return @as([*]NarrowGlyph, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(HIISimplifiedFontPackage)))[0..self.number_of_narrow_glyphs];
}
};
diff --git a/lib/std/os/uefi/protocols/managed_network_protocol.zig b/lib/std/os/uefi/protocols/managed_network_protocol.zig
index aff9febd17..5ea63f5a65 100644
--- a/lib/std/os/uefi/protocols/managed_network_protocol.zig
+++ b/lib/std/os/uefi/protocols/managed_network_protocol.zig
@@ -118,7 +118,7 @@ pub const ManagedNetworkTransmitData = extern struct {
fragment_count: u16,
pub fn getFragments(self: *ManagedNetworkTransmitData) []ManagedNetworkFragmentData {
- return @ptrCast([*]ManagedNetworkFragmentData, @ptrCast([*]u8, self) + @sizeOf(ManagedNetworkTransmitData))[0..self.fragment_count];
+ return @as([*]ManagedNetworkFragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(ManagedNetworkTransmitData)))[0..self.fragment_count];
}
};
diff --git a/lib/std/os/uefi/protocols/udp6_protocol.zig b/lib/std/os/uefi/protocols/udp6_protocol.zig
index 96a1d4c318..f772d38d52 100644
--- a/lib/std/os/uefi/protocols/udp6_protocol.zig
+++ b/lib/std/os/uefi/protocols/udp6_protocol.zig
@@ -87,7 +87,7 @@ pub const Udp6ReceiveData = extern struct {
fragment_count: u32,
pub fn getFragments(self: *Udp6ReceiveData) []Udp6FragmentData {
- return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6ReceiveData))[0..self.fragment_count];
+ return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6ReceiveData)))[0..self.fragment_count];
}
};
@@ -97,7 +97,7 @@ pub const Udp6TransmitData = extern struct {
fragment_count: u32,
pub fn getFragments(self: *Udp6TransmitData) []Udp6FragmentData {
- return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6TransmitData))[0..self.fragment_count];
+ return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6TransmitData)))[0..self.fragment_count];
}
};
diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig
index bfd3865e95..7fc32decb9 100644
--- a/lib/std/os/uefi/tables/boot_services.zig
+++ b/lib/std/os/uefi/tables/boot_services.zig
@@ -165,7 +165,7 @@ pub const BootServices = extern struct {
try self.openProtocol(
handle,
&protocol.guid,
- @ptrCast(*?*anyopaque, &ptr),
+ @as(*?*anyopaque, @ptrCast(&ptr)),
// Invoking handle (loaded image)
uefi.handle,
// Control handle (null as not a driver)
diff --git a/lib/std/os/wasi.zig b/lib/std/os/wasi.zig
index 711352e2fe..951d8ee26d 100644
--- a/lib/std/os/wasi.zig
+++ b/lib/std/os/wasi.zig
@@ -103,13 +103,13 @@ pub const timespec = extern struct {
const tv_sec: timestamp_t = tm / 1_000_000_000;
const tv_nsec = tm - tv_sec * 1_000_000_000;
return timespec{
- .tv_sec = @intCast(time_t, tv_sec),
- .tv_nsec = @intCast(isize, tv_nsec),
+ .tv_sec = @as(time_t, @intCast(tv_sec)),
+ .tv_nsec = @as(isize, @intCast(tv_nsec)),
};
}
pub fn toTimestamp(ts: timespec) timestamp_t {
- const tm = @intCast(timestamp_t, ts.tv_sec * 1_000_000_000) + @intCast(timestamp_t, ts.tv_nsec);
+ const tm = @as(timestamp_t, @intCast(ts.tv_sec * 1_000_000_000)) + @as(timestamp_t, @intCast(ts.tv_nsec));
return tm;
}
};
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 421815c04d..e12e8ac4d3 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -30,7 +30,7 @@ pub const gdi32 = @import("windows/gdi32.zig");
pub const winmm = @import("windows/winmm.zig");
pub const crypt32 = @import("windows/crypt32.zig");
-pub const self_process_handle = @ptrFromInt(HANDLE, maxInt(usize));
+pub const self_process_handle = @as(HANDLE, @ptrFromInt(maxInt(usize)));
const Self = @This();
@@ -198,9 +198,9 @@ pub fn DeviceIoControl(
var io: IO_STATUS_BLOCK = undefined;
const in_ptr = if (in) |i| i.ptr else null;
- const in_len = if (in) |i| @intCast(ULONG, i.len) else 0;
+ const in_len = if (in) |i| @as(ULONG, @intCast(i.len)) else 0;
const out_ptr = if (out) |o| o.ptr else null;
- const out_len = if (out) |o| @intCast(ULONG, o.len) else 0;
+ const out_len = if (out) |o| @as(ULONG, @intCast(o.len)) else 0;
const rc = blk: {
if (is_fsctl) {
@@ -307,7 +307,7 @@ pub fn WaitForSingleObjectEx(handle: HANDLE, milliseconds: DWORD, alertable: boo
pub fn WaitForMultipleObjectsEx(handles: []const HANDLE, waitAll: bool, milliseconds: DWORD, alertable: bool) !u32 {
assert(handles.len < MAXIMUM_WAIT_OBJECTS);
- const nCount: DWORD = @intCast(DWORD, handles.len);
+ const nCount: DWORD = @as(DWORD, @intCast(handles.len));
switch (kernel32.WaitForMultipleObjectsEx(
nCount,
handles.ptr,
@@ -419,7 +419,7 @@ pub fn GetQueuedCompletionStatusEx(
const success = kernel32.GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.ptr,
- @intCast(ULONG, completion_port_entries.len),
+ @as(ULONG, @intCast(completion_port_entries.len)),
&num_entries_removed,
timeout_ms orelse INFINITE,
@intFromBool(alertable),
@@ -469,8 +469,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
.InternalHigh = 0,
.DUMMYUNIONNAME = .{
.DUMMYSTRUCTNAME = .{
- .Offset = @truncate(u32, off),
- .OffsetHigh = @truncate(u32, off >> 32),
+ .Offset = @as(u32, @truncate(off)),
+ .OffsetHigh = @as(u32, @truncate(off >> 32)),
},
},
.hEvent = null,
@@ -480,7 +480,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
loop.beginOneEvent();
suspend {
// TODO handle buffer bigger than DWORD can hold
- _ = kernel32.ReadFile(in_hFile, buffer.ptr, @intCast(DWORD, buffer.len), null, &resume_node.base.overlapped);
+ _ = kernel32.ReadFile(in_hFile, buffer.ptr, @as(DWORD, @intCast(buffer.len)), null, &resume_node.base.overlapped);
}
var bytes_transferred: DWORD = undefined;
if (kernel32.GetOverlappedResult(in_hFile, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
@@ -496,7 +496,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
if (offset == null) {
// TODO make setting the file position non-blocking
const new_off = off + bytes_transferred;
- try SetFilePointerEx_CURRENT(in_hFile, @bitCast(i64, new_off));
+ try SetFilePointerEx_CURRENT(in_hFile, @as(i64, @bitCast(new_off)));
}
return @as(usize, bytes_transferred);
} else {
@@ -510,8 +510,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
.InternalHigh = 0,
.DUMMYUNIONNAME = .{
.DUMMYSTRUCTNAME = .{
- .Offset = @truncate(u32, off),
- .OffsetHigh = @truncate(u32, off >> 32),
+ .Offset = @as(u32, @truncate(off)),
+ .OffsetHigh = @as(u32, @truncate(off >> 32)),
},
},
.hEvent = null,
@@ -563,8 +563,8 @@ pub fn WriteFile(
.InternalHigh = 0,
.DUMMYUNIONNAME = .{
.DUMMYSTRUCTNAME = .{
- .Offset = @truncate(u32, off),
- .OffsetHigh = @truncate(u32, off >> 32),
+ .Offset = @as(u32, @truncate(off)),
+ .OffsetHigh = @as(u32, @truncate(off >> 32)),
},
},
.hEvent = null,
@@ -591,7 +591,7 @@ pub fn WriteFile(
if (offset == null) {
// TODO make setting the file position non-blocking
const new_off = off + bytes_transferred;
- try SetFilePointerEx_CURRENT(handle, @bitCast(i64, new_off));
+ try SetFilePointerEx_CURRENT(handle, @as(i64, @bitCast(new_off)));
}
return bytes_transferred;
} else {
@@ -603,8 +603,8 @@ pub fn WriteFile(
.InternalHigh = 0,
.DUMMYUNIONNAME = .{
.DUMMYSTRUCTNAME = .{
- .Offset = @truncate(u32, off),
- .OffsetHigh = @truncate(u32, off >> 32),
+ .Offset = @as(u32, @truncate(off)),
+ .OffsetHigh = @as(u32, @truncate(off >> 32)),
},
},
.hEvent = null,
@@ -745,19 +745,19 @@ pub fn CreateSymbolicLink(
const header_len = @sizeOf(ULONG) + @sizeOf(USHORT) * 2;
const symlink_data = SYMLINK_DATA{
.ReparseTag = IO_REPARSE_TAG_SYMLINK,
- .ReparseDataLength = @intCast(u16, buf_len - header_len),
+ .ReparseDataLength = @as(u16, @intCast(buf_len - header_len)),
.Reserved = 0,
- .SubstituteNameOffset = @intCast(u16, target_path.len * 2),
- .SubstituteNameLength = @intCast(u16, target_path.len * 2),
+ .SubstituteNameOffset = @as(u16, @intCast(target_path.len * 2)),
+ .SubstituteNameLength = @as(u16, @intCast(target_path.len * 2)),
.PrintNameOffset = 0,
- .PrintNameLength = @intCast(u16, target_path.len * 2),
+ .PrintNameLength = @as(u16, @intCast(target_path.len * 2)),
.Flags = if (dir) |_| SYMLINK_FLAG_RELATIVE else 0,
};
@memcpy(buffer[0..@sizeOf(SYMLINK_DATA)], std.mem.asBytes(&symlink_data));
- @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path));
+ @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path)));
const paths_start = @sizeOf(SYMLINK_DATA) + target_path.len * 2;
- @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path));
+ @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path)));
_ = try DeviceIoControl(symlink_handle, FSCTL_SET_REPARSE_POINT, buffer[0..buf_len], null);
}
@@ -827,10 +827,10 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
else => |e| return e,
};
- const reparse_struct = @ptrCast(*const REPARSE_DATA_BUFFER, @alignCast(@alignOf(REPARSE_DATA_BUFFER), &reparse_buf[0]));
+ const reparse_struct: *const REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0]));
switch (reparse_struct.ReparseTag) {
IO_REPARSE_TAG_SYMLINK => {
- const buf = @ptrCast(*const SYMBOLIC_LINK_REPARSE_BUFFER, @alignCast(@alignOf(SYMBOLIC_LINK_REPARSE_BUFFER), &reparse_struct.DataBuffer[0]));
+ const buf: *const SYMBOLIC_LINK_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0]));
const offset = buf.SubstituteNameOffset >> 1;
const len = buf.SubstituteNameLength >> 1;
const path_buf = @as([*]const u16, &buf.PathBuffer);
@@ -838,7 +838,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
return parseReadlinkPath(path_buf[offset..][0..len], is_relative, out_buffer);
},
IO_REPARSE_TAG_MOUNT_POINT => {
- const buf = @ptrCast(*const MOUNT_POINT_REPARSE_BUFFER, @alignCast(@alignOf(MOUNT_POINT_REPARSE_BUFFER), &reparse_struct.DataBuffer[0]));
+ const buf: *const MOUNT_POINT_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0]));
const offset = buf.SubstituteNameOffset >> 1;
const len = buf.SubstituteNameLength >> 1;
const path_buf = @as([*]const u16, &buf.PathBuffer);
@@ -884,7 +884,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
else
FILE_NON_DIRECTORY_FILE | FILE_OPEN_REPARSE_POINT; // would we ever want to delete the target instead?
- const path_len_bytes = @intCast(u16, sub_path_w.len * 2);
+ const path_len_bytes = @as(u16, @intCast(sub_path_w.len * 2));
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
@@ -1020,7 +1020,7 @@ pub fn SetFilePointerEx_BEGIN(handle: HANDLE, offset: u64) SetFilePointerError!v
// "The starting point is zero or the beginning of the file. If [FILE_BEGIN]
// is specified, then the liDistanceToMove parameter is interpreted as an unsigned value."
// https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-setfilepointerex
- const ipos = @bitCast(LARGE_INTEGER, offset);
+ const ipos = @as(LARGE_INTEGER, @bitCast(offset));
if (kernel32.SetFilePointerEx(handle, ipos, null, FILE_BEGIN) == 0) {
switch (kernel32.GetLastError()) {
.INVALID_PARAMETER => unreachable,
@@ -1064,7 +1064,7 @@ pub fn SetFilePointerEx_CURRENT_get(handle: HANDLE) SetFilePointerError!u64 {
}
// Based on the docs for FILE_BEGIN, it seems that the returned signed integer
// should be interpreted as an unsigned integer.
- return @bitCast(u64, result);
+ return @as(u64, @bitCast(result));
}
pub fn QueryObjectName(
@@ -1073,7 +1073,7 @@ pub fn QueryObjectName(
) ![]u16 {
const out_buffer_aligned = mem.alignInSlice(out_buffer, @alignOf(OBJECT_NAME_INFORMATION)) orelse return error.NameTooLong;
- const info = @ptrCast(*OBJECT_NAME_INFORMATION, out_buffer_aligned);
+ const info = @as(*OBJECT_NAME_INFORMATION, @ptrCast(out_buffer_aligned));
//buffer size is specified in bytes
const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) orelse std.math.maxInt(ULONG);
//last argument would return the length required for full_buffer, not exposed here
@@ -1197,26 +1197,26 @@ pub fn GetFinalPathNameByHandle(
};
defer CloseHandle(mgmt_handle);
- var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, &input_buf[0]);
+ var input_struct = @as(*MOUNTMGR_MOUNT_POINT, @ptrCast(&input_buf[0]));
input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT);
- input_struct.DeviceNameLength = @intCast(USHORT, volume_name_u16.len * 2);
- @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @ptrCast([*]const u8, volume_name_u16.ptr));
+ input_struct.DeviceNameLength = @as(USHORT, @intCast(volume_name_u16.len * 2));
+ @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @as([*]const u8, @ptrCast(volume_name_u16.ptr)));
DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, &input_buf, &output_buf) catch |err| switch (err) {
error.AccessDenied => unreachable,
else => |e| return e,
};
- const mount_points_struct = @ptrCast(*const MOUNTMGR_MOUNT_POINTS, &output_buf[0]);
+ const mount_points_struct = @as(*const MOUNTMGR_MOUNT_POINTS, @ptrCast(&output_buf[0]));
- const mount_points = @ptrCast(
+ const mount_points = @as(
[*]const MOUNTMGR_MOUNT_POINT,
- &mount_points_struct.MountPoints[0],
+ @ptrCast(&mount_points_struct.MountPoints[0]),
)[0..mount_points_struct.NumberOfMountPoints];
for (mount_points) |mount_point| {
- const symlink = @ptrCast(
+ const symlink = @as(
[*]const u16,
- @alignCast(@alignOf(u16), &output_buf[mount_point.SymbolicLinkNameOffset]),
+ @ptrCast(@alignCast(&output_buf[mount_point.SymbolicLinkNameOffset])),
)[0 .. mount_point.SymbolicLinkNameLength / 2];
// Look for `\DosDevices\` prefix. We don't really care if there are more than one symlinks
@@ -1282,7 +1282,7 @@ pub fn GetFileSizeEx(hFile: HANDLE) GetFileSizeError!u64 {
else => |err| return unexpectedError(err),
}
}
- return @bitCast(u64, file_size);
+ return @as(u64, @bitCast(file_size));
}
pub const GetFileAttributesError = error{
@@ -1313,7 +1313,7 @@ pub fn WSAStartup(majorVersion: u8, minorVersion: u8) !ws2_32.WSADATA {
var wsadata: ws2_32.WSADATA = undefined;
return switch (ws2_32.WSAStartup((@as(WORD, minorVersion) << 8) | majorVersion, &wsadata)) {
0 => wsadata,
- else => |err_int| switch (@enumFromInt(ws2_32.WinsockError, @intCast(u16, err_int))) {
+ else => |err_int| switch (@as(ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(err_int))))) {
.WSASYSNOTREADY => return error.SystemNotAvailable,
.WSAVERNOTSUPPORTED => return error.VersionNotSupported,
.WSAEINPROGRESS => return error.BlockingOperationInProgress,
@@ -1408,7 +1408,7 @@ pub fn WSASocketW(
}
pub fn bind(s: ws2_32.SOCKET, name: *const ws2_32.sockaddr, namelen: ws2_32.socklen_t) i32 {
- return ws2_32.bind(s, name, @intCast(i32, namelen));
+ return ws2_32.bind(s, name, @as(i32, @intCast(namelen)));
}
pub fn listen(s: ws2_32.SOCKET, backlog: u31) i32 {
@@ -1427,15 +1427,15 @@ pub fn closesocket(s: ws2_32.SOCKET) !void {
pub fn accept(s: ws2_32.SOCKET, name: ?*ws2_32.sockaddr, namelen: ?*ws2_32.socklen_t) ws2_32.SOCKET {
assert((name == null) == (namelen == null));
- return ws2_32.accept(s, name, @ptrCast(?*i32, namelen));
+ return ws2_32.accept(s, name, @as(?*i32, @ptrCast(namelen)));
}
pub fn getsockname(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 {
- return ws2_32.getsockname(s, name, @ptrCast(*i32, namelen));
+ return ws2_32.getsockname(s, name, @as(*i32, @ptrCast(namelen)));
}
pub fn getpeername(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 {
- return ws2_32.getpeername(s, name, @ptrCast(*i32, namelen));
+ return ws2_32.getpeername(s, name, @as(*i32, @ptrCast(namelen)));
}
pub fn sendmsg(
@@ -1447,28 +1447,28 @@ pub fn sendmsg(
if (ws2_32.WSASendMsg(s, msg, flags, &bytes_send, null, null) == ws2_32.SOCKET_ERROR) {
return ws2_32.SOCKET_ERROR;
} else {
- return @as(i32, @intCast(u31, bytes_send));
+ return @as(i32, @as(u31, @intCast(bytes_send)));
}
}
pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 {
- var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @constCast(buf) };
+ var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = @constCast(buf) };
var bytes_send: DWORD = undefined;
- if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) {
+ if (ws2_32.WSASendTo(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_send, flags, to, @as(i32, @intCast(to_len)), null, null) == ws2_32.SOCKET_ERROR) {
return ws2_32.SOCKET_ERROR;
} else {
- return @as(i32, @intCast(u31, bytes_send));
+ return @as(i32, @as(u31, @intCast(bytes_send)));
}
}
pub fn recvfrom(s: ws2_32.SOCKET, buf: [*]u8, len: usize, flags: u32, from: ?*ws2_32.sockaddr, from_len: ?*ws2_32.socklen_t) i32 {
- var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = buf };
+ var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = buf };
var bytes_received: DWORD = undefined;
var flags_inout = flags;
- if (ws2_32.WSARecvFrom(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_received, &flags_inout, from, @ptrCast(?*i32, from_len), null, null) == ws2_32.SOCKET_ERROR) {
+ if (ws2_32.WSARecvFrom(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_received, &flags_inout, from, @as(?*i32, @ptrCast(from_len)), null, null) == ws2_32.SOCKET_ERROR) {
return ws2_32.SOCKET_ERROR;
} else {
- return @as(i32, @intCast(u31, bytes_received));
+ return @as(i32, @as(u31, @intCast(bytes_received)));
}
}
@@ -1489,9 +1489,9 @@ pub fn WSAIoctl(
s,
dwIoControlCode,
if (inBuffer) |i| i.ptr else null,
- if (inBuffer) |i| @intCast(DWORD, i.len) else 0,
+ if (inBuffer) |i| @as(DWORD, @intCast(i.len)) else 0,
outBuffer.ptr,
- @intCast(DWORD, outBuffer.len),
+ @as(DWORD, @intCast(outBuffer.len)),
&bytes,
overlapped,
completionRoutine,
@@ -1741,7 +1741,7 @@ pub fn QueryPerformanceFrequency() u64 {
var result: LARGE_INTEGER = undefined;
assert(kernel32.QueryPerformanceFrequency(&result) != 0);
// The kernel treats this integer as unsigned.
- return @bitCast(u64, result);
+ return @as(u64, @bitCast(result));
}
pub fn QueryPerformanceCounter() u64 {
@@ -1750,7 +1750,7 @@ pub fn QueryPerformanceCounter() u64 {
var result: LARGE_INTEGER = undefined;
assert(kernel32.QueryPerformanceCounter(&result) != 0);
// The kernel treats this integer as unsigned.
- return @bitCast(u64, result);
+ return @as(u64, @bitCast(result));
}
pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter: ?*anyopaque, Context: ?*anyopaque) void {
@@ -1852,7 +1852,7 @@ pub fn teb() *TEB {
return switch (native_arch) {
.x86 => blk: {
if (builtin.zig_backend == .stage2_c) {
- break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_windows_teb()));
+ break :blk @ptrCast(@alignCast(zig_x86_windows_teb()));
} else {
break :blk asm volatile (
\\ movl %%fs:0x18, %[ptr]
@@ -1862,7 +1862,7 @@ pub fn teb() *TEB {
},
.x86_64 => blk: {
if (builtin.zig_backend == .stage2_c) {
- break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_64_windows_teb()));
+ break :blk @ptrCast(@alignCast(zig_x86_64_windows_teb()));
} else {
break :blk asm volatile (
\\ movq %%gs:0x30, %[ptr]
@@ -1894,7 +1894,7 @@ pub fn fromSysTime(hns: i64) i128 {
pub fn toSysTime(ns: i128) i64 {
const hns = @divFloor(ns, 100);
- return @intCast(i64, hns) - std.time.epoch.windows * (std.time.ns_per_s / 100);
+ return @as(i64, @intCast(hns)) - std.time.epoch.windows * (std.time.ns_per_s / 100);
}
pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 {
@@ -1904,22 +1904,22 @@ pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 {
/// Converts a number of nanoseconds since the POSIX epoch to a Windows FILETIME.
pub fn nanoSecondsToFileTime(ns: i128) FILETIME {
- const adjusted = @bitCast(u64, toSysTime(ns));
+ const adjusted = @as(u64, @bitCast(toSysTime(ns)));
return FILETIME{
- .dwHighDateTime = @truncate(u32, adjusted >> 32),
- .dwLowDateTime = @truncate(u32, adjusted),
+ .dwHighDateTime = @as(u32, @truncate(adjusted >> 32)),
+ .dwLowDateTime = @as(u32, @truncate(adjusted)),
};
}
/// Compares two WTF16 strings using RtlEqualUnicodeString
pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
- const a_bytes = @intCast(u16, a.len * 2);
+ const a_bytes = @as(u16, @intCast(a.len * 2));
const a_string = UNICODE_STRING{
.Length = a_bytes,
.MaximumLength = a_bytes,
.Buffer = @constCast(a.ptr),
};
- const b_bytes = @intCast(u16, b.len * 2);
+ const b_bytes = @as(u16, @intCast(b.len * 2));
const b_string = UNICODE_STRING{
.Length = b_bytes,
.MaximumLength = b_bytes,
@@ -2117,7 +2117,7 @@ pub fn wToPrefixedFileW(path: [:0]const u16) !PathSpace {
.unc_absolute => nt_prefix.len + 2,
else => nt_prefix.len,
};
- const buf_len = @intCast(u32, path_space.data.len - path_buf_offset);
+ const buf_len = @as(u32, @intCast(path_space.data.len - path_buf_offset));
const path_byte_len = ntdll.RtlGetFullPathName_U(
path.ptr,
buf_len * 2,
@@ -2263,7 +2263,7 @@ test getUnprefixedPathType {
}
fn getFullPathNameW(path: [*:0]const u16, out: []u16) !usize {
- const result = kernel32.GetFullPathNameW(path, @intCast(u32, out.len), out.ptr, null);
+ const result = kernel32.GetFullPathNameW(path, @as(u32, @intCast(out.len)), out.ptr, null);
if (result == 0) {
switch (kernel32.GetLastError()) {
else => |err| return unexpectedError(err),
@@ -2284,9 +2284,9 @@ pub fn loadWinsockExtensionFunction(comptime T: type, sock: ws2_32.SOCKET, guid:
const rc = ws2_32.WSAIoctl(
sock,
ws2_32.SIO_GET_EXTENSION_FUNCTION_POINTER,
- @ptrCast(*const anyopaque, &guid),
+ @as(*const anyopaque, @ptrCast(&guid)),
@sizeOf(GUID),
- @ptrFromInt(?*anyopaque, @intFromPtr(&function)),
+ @as(?*anyopaque, @ptrFromInt(@intFromPtr(&function))),
@sizeOf(T),
&num_bytes,
null,
@@ -2332,7 +2332,7 @@ pub fn unexpectedError(err: Win32Error) std.os.UnexpectedError {
}
pub fn unexpectedWSAError(err: ws2_32.WinsockError) std.os.UnexpectedError {
- return unexpectedError(@enumFromInt(Win32Error, @intFromEnum(err)));
+ return unexpectedError(@as(Win32Error, @enumFromInt(@intFromEnum(err))));
}
/// Call this when you made a windows NtDll call
@@ -2530,7 +2530,7 @@ pub fn CTL_CODE(deviceType: u16, function: u12, method: TransferType, access: u2
@intFromEnum(method);
}
-pub const INVALID_HANDLE_VALUE = @ptrFromInt(HANDLE, maxInt(usize));
+pub const INVALID_HANDLE_VALUE = @as(HANDLE, @ptrFromInt(maxInt(usize)));
pub const INVALID_FILE_ATTRIBUTES = @as(DWORD, maxInt(DWORD));
@@ -3119,7 +3119,7 @@ pub const GUID = extern struct {
bytes[i] = (try std.fmt.charToDigit(s[hex_offset], 16)) << 4 |
try std.fmt.charToDigit(s[hex_offset + 1], 16);
}
- return @bitCast(GUID, bytes);
+ return @as(GUID, @bitCast(bytes));
}
};
@@ -3150,16 +3150,16 @@ pub const KF_FLAG_SIMPLE_IDLIST = 256;
pub const KF_FLAG_ALIAS_ONLY = -2147483648;
pub const S_OK = 0;
-pub const E_NOTIMPL = @bitCast(c_long, @as(c_ulong, 0x80004001));
-pub const E_NOINTERFACE = @bitCast(c_long, @as(c_ulong, 0x80004002));
-pub const E_POINTER = @bitCast(c_long, @as(c_ulong, 0x80004003));
-pub const E_ABORT = @bitCast(c_long, @as(c_ulong, 0x80004004));
-pub const E_FAIL = @bitCast(c_long, @as(c_ulong, 0x80004005));
-pub const E_UNEXPECTED = @bitCast(c_long, @as(c_ulong, 0x8000FFFF));
-pub const E_ACCESSDENIED = @bitCast(c_long, @as(c_ulong, 0x80070005));
-pub const E_HANDLE = @bitCast(c_long, @as(c_ulong, 0x80070006));
-pub const E_OUTOFMEMORY = @bitCast(c_long, @as(c_ulong, 0x8007000E));
-pub const E_INVALIDARG = @bitCast(c_long, @as(c_ulong, 0x80070057));
+pub const E_NOTIMPL = @as(c_long, @bitCast(@as(c_ulong, 0x80004001)));
+pub const E_NOINTERFACE = @as(c_long, @bitCast(@as(c_ulong, 0x80004002)));
+pub const E_POINTER = @as(c_long, @bitCast(@as(c_ulong, 0x80004003)));
+pub const E_ABORT = @as(c_long, @bitCast(@as(c_ulong, 0x80004004)));
+pub const E_FAIL = @as(c_long, @bitCast(@as(c_ulong, 0x80004005)));
+pub const E_UNEXPECTED = @as(c_long, @bitCast(@as(c_ulong, 0x8000FFFF)));
+pub const E_ACCESSDENIED = @as(c_long, @bitCast(@as(c_ulong, 0x80070005)));
+pub const E_HANDLE = @as(c_long, @bitCast(@as(c_ulong, 0x80070006)));
+pub const E_OUTOFMEMORY = @as(c_long, @bitCast(@as(c_ulong, 0x8007000E)));
+pub const E_INVALIDARG = @as(c_long, @bitCast(@as(c_ulong, 0x80070057)));
pub const FILE_FLAG_BACKUP_SEMANTICS = 0x02000000;
pub const FILE_FLAG_DELETE_ON_CLOSE = 0x04000000;
@@ -3221,7 +3221,7 @@ pub const LSTATUS = LONG;
pub const HKEY = *opaque {};
-pub const HKEY_LOCAL_MACHINE: HKEY = @ptrFromInt(HKEY, 0x80000002);
+pub const HKEY_LOCAL_MACHINE: HKEY = @as(HKEY, @ptrFromInt(0x80000002));
/// Combines the STANDARD_RIGHTS_REQUIRED, KEY_QUERY_VALUE, KEY_SET_VALUE, KEY_CREATE_SUB_KEY,
/// KEY_ENUMERATE_SUB_KEYS, KEY_NOTIFY, and KEY_CREATE_LINK access rights.
@@ -4685,7 +4685,7 @@ pub const KUSER_SHARED_DATA = extern struct {
/// Read-only user-mode address for the shared data.
/// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm
/// https://msrc-blog.microsoft.com/2022/04/05/randomizing-the-kuser_shared_data-structure-on-windows/
-pub const SharedUserData: *const KUSER_SHARED_DATA = @ptrFromInt(*const KUSER_SHARED_DATA, 0x7FFE0000);
+pub const SharedUserData: *const KUSER_SHARED_DATA = @as(*const KUSER_SHARED_DATA, @ptrFromInt(0x7FFE0000));
pub fn IsProcessorFeaturePresent(feature: PF) bool {
if (@intFromEnum(feature) >= PROCESSOR_FEATURE_MAX) return false;
@@ -4886,7 +4886,7 @@ pub fn WriteProcessMemory(handle: HANDLE, addr: ?LPVOID, buffer: []const u8) Wri
switch (ntdll.NtWriteVirtualMemory(
handle,
addr,
- @ptrCast(*const anyopaque, buffer.ptr),
+ @as(*const anyopaque, @ptrCast(buffer.ptr)),
buffer.len,
&nwritten,
)) {
@@ -4919,6 +4919,6 @@ pub fn ProcessBaseAddress(handle: HANDLE) ProcessBaseAddressError!HMODULE {
var peb_buf: [@sizeOf(PEB)]u8 align(@alignOf(PEB)) = undefined;
const peb_out = try ReadProcessMemory(handle, info.PebBaseAddress, &peb_buf);
- const ppeb = @ptrCast(*const PEB, @alignCast(@alignOf(PEB), peb_out.ptr));
+ const ppeb: *const PEB = @ptrCast(@alignCast(peb_out.ptr));
return ppeb.ImageBaseAddress;
}
diff --git a/lib/std/os/windows/user32.zig b/lib/std/os/windows/user32.zig
index 0d6fc2c670..8c492cee32 100644
--- a/lib/std/os/windows/user32.zig
+++ b/lib/std/os/windows/user32.zig
@@ -1275,7 +1275,7 @@ pub const WS_EX_LAYERED = 0x00080000;
pub const WS_EX_OVERLAPPEDWINDOW = WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE;
pub const WS_EX_PALETTEWINDOW = WS_EX_WINDOWEDGE | WS_EX_TOOLWINDOW | WS_EX_TOPMOST;
-pub const CW_USEDEFAULT = @bitCast(i32, @as(u32, 0x80000000));
+pub const CW_USEDEFAULT = @as(i32, @bitCast(@as(u32, 0x80000000)));
pub extern "user32" fn CreateWindowExA(dwExStyle: DWORD, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: DWORD, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?LPVOID) callconv(WINAPI) ?HWND;
pub fn createWindowExA(dwExStyle: u32, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: u32, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?*anyopaque) !HWND {
diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig
index 821b903a34..240c8c849d 100644
--- a/lib/std/os/windows/ws2_32.zig
+++ b/lib/std/os/windows/ws2_32.zig
@@ -21,7 +21,7 @@ const LPARAM = windows.LPARAM;
const FARPROC = windows.FARPROC;
pub const SOCKET = *opaque {};
-pub const INVALID_SOCKET = @ptrFromInt(SOCKET, ~@as(usize, 0));
+pub const INVALID_SOCKET = @as(SOCKET, @ptrFromInt(~@as(usize, 0)));
pub const GROUP = u32;
pub const ADDRESS_FAMILY = u16;
diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig
index 10d8af0575..cff9eb8cf1 100644
--- a/lib/std/packed_int_array.zig
+++ b/lib/std/packed_int_array.zig
@@ -73,25 +73,25 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
//read bytes as container
- const value_ptr = @ptrCast(*align(1) const Container, &bytes[start_byte]);
+ const value_ptr = @as(*align(1) const Container, @ptrCast(&bytes[start_byte]));
var value = value_ptr.*;
if (endian != native_endian) value = @byteSwap(value);
switch (endian) {
.Big => {
- value <<= @intCast(Shift, head_keep_bits);
- value >>= @intCast(Shift, head_keep_bits);
- value >>= @intCast(Shift, tail_keep_bits);
+ value <<= @as(Shift, @intCast(head_keep_bits));
+ value >>= @as(Shift, @intCast(head_keep_bits));
+ value >>= @as(Shift, @intCast(tail_keep_bits));
},
.Little => {
- value <<= @intCast(Shift, tail_keep_bits);
- value >>= @intCast(Shift, tail_keep_bits);
- value >>= @intCast(Shift, head_keep_bits);
+ value <<= @as(Shift, @intCast(tail_keep_bits));
+ value >>= @as(Shift, @intCast(tail_keep_bits));
+ value >>= @as(Shift, @intCast(head_keep_bits));
},
}
- return @bitCast(Int, @truncate(UnInt, value));
+ return @as(Int, @bitCast(@as(UnInt, @truncate(value))));
}
/// Sets the integer at `index` to `val` within the packed data beginning
@@ -115,21 +115,21 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
const keep_shift = switch (endian) {
- .Big => @intCast(Shift, tail_keep_bits),
- .Little => @intCast(Shift, head_keep_bits),
+ .Big => @as(Shift, @intCast(tail_keep_bits)),
+ .Little => @as(Shift, @intCast(head_keep_bits)),
};
//position the bits where they need to be in the container
- const value = @intCast(Container, @bitCast(UnInt, int)) << keep_shift;
+ const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift;
//read existing bytes
- const target_ptr = @ptrCast(*align(1) Container, &bytes[start_byte]);
+ const target_ptr = @as(*align(1) Container, @ptrCast(&bytes[start_byte]));
var target = target_ptr.*;
if (endian != native_endian) target = @byteSwap(target);
//zero the bits we want to replace in the existing bytes
- const inv_mask = @intCast(Container, std.math.maxInt(UnInt)) << keep_shift;
+ const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift;
const mask = ~inv_mask;
target &= mask;
@@ -156,7 +156,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0);
var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length);
- new_slice.bit_offset = @intCast(u3, (bit_index - (start_byte * 8)));
+ new_slice.bit_offset = @as(u3, @intCast((bit_index - (start_byte * 8))));
return new_slice;
}
@@ -398,7 +398,7 @@ test "PackedIntArray init" {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 });
var i = @as(usize, 0);
- while (i < packed_array.len) : (i += 1) try testing.expectEqual(@intCast(u3, i), packed_array.get(i));
+ while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i));
}
test "PackedIntArray initAllTo" {
@@ -469,7 +469,7 @@ test "PackedIntSlice of PackedInt(Array/Slice)" {
var i = @as(usize, 0);
while (i < packed_array.len) : (i += 1) {
- packed_array.set(i, @intCast(Int, i % limit));
+ packed_array.set(i, @as(Int, @intCast(i % limit)));
}
//slice of array
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index 25a6786ec6..4d71ce2103 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -573,7 +573,7 @@ pub const Pdb = struct {
if (this_record_len % 4 != 0) {
const round_to_next_4 = (this_record_len | 0x3) + 1;
const march_forward_bytes = round_to_next_4 - this_record_len;
- try stream.seekBy(@intCast(isize, march_forward_bytes));
+ try stream.seekBy(@as(isize, @intCast(march_forward_bytes)));
this_record_len += march_forward_bytes;
}
@@ -689,14 +689,14 @@ pub const Pdb = struct {
var symbol_i: usize = 0;
while (symbol_i != module.symbols.len) {
- const prefix = @ptrCast(*align(1) RecordPrefix, &module.symbols[symbol_i]);
+ const prefix = @as(*align(1) RecordPrefix, @ptrCast(&module.symbols[symbol_i]));
if (prefix.RecordLen < 2)
return null;
switch (prefix.RecordKind) {
.S_LPROC32, .S_GPROC32 => {
- const proc_sym = @ptrCast(*align(1) ProcSym, &module.symbols[symbol_i + @sizeOf(RecordPrefix)]);
+ const proc_sym = @as(*align(1) ProcSym, @ptrCast(&module.symbols[symbol_i + @sizeOf(RecordPrefix)]));
if (address >= proc_sym.CodeOffset and address < proc_sym.CodeOffset + proc_sym.CodeSize) {
- return mem.sliceTo(@ptrCast([*:0]u8, &proc_sym.Name[0]), 0);
+ return mem.sliceTo(@as([*:0]u8, @ptrCast(&proc_sym.Name[0])), 0);
}
},
else => {},
@@ -715,7 +715,7 @@ pub const Pdb = struct {
var skip_len: usize = undefined;
const checksum_offset = module.checksum_offset orelse return error.MissingDebugInfo;
while (sect_offset != subsect_info.len) : (sect_offset += skip_len) {
- const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &subsect_info[sect_offset]);
+ const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&subsect_info[sect_offset]));
skip_len = subsect_hdr.Length;
sect_offset += @sizeOf(DebugSubsectionHeader);
@@ -723,7 +723,7 @@ pub const Pdb = struct {
.Lines => {
var line_index = sect_offset;
- const line_hdr = @ptrCast(*align(1) LineFragmentHeader, &subsect_info[line_index]);
+ const line_hdr = @as(*align(1) LineFragmentHeader, @ptrCast(&subsect_info[line_index]));
if (line_hdr.RelocSegment == 0)
return error.MissingDebugInfo;
line_index += @sizeOf(LineFragmentHeader);
@@ -737,7 +737,7 @@ pub const Pdb = struct {
const subsection_end_index = sect_offset + subsect_hdr.Length;
while (line_index < subsection_end_index) {
- const block_hdr = @ptrCast(*align(1) LineBlockFragmentHeader, &subsect_info[line_index]);
+ const block_hdr = @as(*align(1) LineBlockFragmentHeader, @ptrCast(&subsect_info[line_index]));
line_index += @sizeOf(LineBlockFragmentHeader);
const start_line_index = line_index;
@@ -749,7 +749,7 @@ pub const Pdb = struct {
// This is done with a simple linear search.
var line_i: u32 = 0;
while (line_i < block_hdr.NumLines) : (line_i += 1) {
- const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[line_index]);
+ const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[line_index]));
line_index += @sizeOf(LineNumberEntry);
const vaddr_start = frag_vaddr_start + line_num_entry.Offset;
@@ -761,7 +761,7 @@ pub const Pdb = struct {
// line_i == 0 would mean that no matching LineNumberEntry was found.
if (line_i > 0) {
const subsect_index = checksum_offset + block_hdr.NameIndex;
- const chksum_hdr = @ptrCast(*align(1) FileChecksumEntryHeader, &module.subsect_info[subsect_index]);
+ const chksum_hdr = @as(*align(1) FileChecksumEntryHeader, @ptrCast(&module.subsect_info[subsect_index]));
const strtab_offset = @sizeOf(PDBStringTableHeader) + chksum_hdr.FileNameOffset;
try self.string_table.?.seekTo(strtab_offset);
const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024);
@@ -771,13 +771,13 @@ pub const Pdb = struct {
const column = if (has_column) blk: {
const start_col_index = start_line_index + @sizeOf(LineNumberEntry) * block_hdr.NumLines;
const col_index = start_col_index + @sizeOf(ColumnNumberEntry) * line_entry_idx;
- const col_num_entry = @ptrCast(*align(1) ColumnNumberEntry, &subsect_info[col_index]);
+ const col_num_entry = @as(*align(1) ColumnNumberEntry, @ptrCast(&subsect_info[col_index]));
break :blk col_num_entry.StartColumn;
} else 0;
const found_line_index = start_line_index + line_entry_idx * @sizeOf(LineNumberEntry);
- const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[found_line_index]);
- const flags = @ptrCast(*LineNumberEntry.Flags, &line_num_entry.Flags);
+ const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[found_line_index]));
+ const flags = @as(*LineNumberEntry.Flags, @ptrCast(&line_num_entry.Flags));
return debug.LineInfo{
.file_name = source_file_name,
@@ -836,7 +836,7 @@ pub const Pdb = struct {
var sect_offset: usize = 0;
var skip_len: usize = undefined;
while (sect_offset != mod.subsect_info.len) : (sect_offset += skip_len) {
- const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &mod.subsect_info[sect_offset]);
+ const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&mod.subsect_info[sect_offset]));
skip_len = subsect_hdr.Length;
sect_offset += @sizeOf(DebugSubsectionHeader);
@@ -1038,7 +1038,7 @@ const MsfStream = struct {
}
fn read(self: *MsfStream, buffer: []u8) !usize {
- var block_id = @intCast(usize, self.pos / self.block_size);
+ var block_id = @as(usize, @intCast(self.pos / self.block_size));
if (block_id >= self.blocks.len) return 0; // End of Stream
var block = self.blocks[block_id];
var offset = self.pos % self.block_size;
@@ -1069,7 +1069,7 @@ const MsfStream = struct {
}
pub fn seekBy(self: *MsfStream, len: i64) !void {
- self.pos = @intCast(u64, @intCast(i64, self.pos) + len);
+ self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len));
if (self.pos >= self.blocks.len * self.block_size)
return error.EOF;
}
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 05066fa436..28d4bfcb25 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -68,7 +68,7 @@ pub const EnvMap = struct {
pub const EnvNameHashContext = struct {
fn upcase(c: u21) u21 {
if (c <= std.math.maxInt(u16))
- return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@intCast(u16, c));
+ return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@as(u16, @intCast(c)));
return c;
}
@@ -80,9 +80,9 @@ pub const EnvMap = struct {
while (it.nextCodepoint()) |cp| {
const cp_upper = upcase(cp);
h.update(&[_]u8{
- @intCast(u8, (cp_upper >> 16) & 0xff),
- @intCast(u8, (cp_upper >> 8) & 0xff),
- @intCast(u8, (cp_upper >> 0) & 0xff),
+ @as(u8, @intCast((cp_upper >> 16) & 0xff)),
+ @as(u8, @intCast((cp_upper >> 8) & 0xff)),
+ @as(u8, @intCast((cp_upper >> 0) & 0xff)),
});
}
return h.final();
@@ -872,8 +872,8 @@ pub fn argsFree(allocator: Allocator, args_alloc: []const [:0]u8) void {
for (args_alloc) |arg| {
total_bytes += @sizeOf([]u8) + arg.len + 1;
}
- const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes];
- const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
+ const unaligned_allocated_buf = @as([*]const u8, @ptrCast(args_alloc.ptr))[0..total_bytes];
+ const aligned_allocated_buf: []align(@alignOf([]u8)) const u8 = @alignCast(unaligned_allocated_buf);
return allocator.free(aligned_allocated_buf);
}
@@ -1143,7 +1143,7 @@ pub fn execve(
} else if (builtin.output_mode == .Exe) {
// Then we have Zig start code and this works.
// TODO type-safety for null-termination of `os.environ`.
- break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr);
+ break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr));
} else {
// TODO come up with a solution for this.
@compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process");
@@ -1175,7 +1175,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize {
error.NameTooLong, error.UnknownName => unreachable,
else => return error.UnknownTotalSystemMemory,
};
- return @intCast(usize, physmem);
+ return @as(usize, @intCast(physmem));
},
.openbsd => {
const mib: [2]c_int = [_]c_int{
@@ -1192,7 +1192,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize {
else => return error.UnknownTotalSystemMemory,
};
assert(physmem >= 0);
- return @bitCast(usize, physmem);
+ return @as(usize, @bitCast(physmem));
},
.windows => {
var sbi: std.os.windows.SYSTEM_BASIC_INFORMATION = undefined;
diff --git a/lib/std/rand.zig b/lib/std/rand.zig
index f07562c911..84dc9d2daf 100644
--- a/lib/std/rand.zig
+++ b/lib/std/rand.zig
@@ -41,8 +41,7 @@ pub const Random = struct {
assert(@typeInfo(@typeInfo(Ptr).Pointer.child) == .Struct); // Must point to a struct
const gen = struct {
fn fill(ptr: *anyopaque, buf: []u8) void {
- const alignment = @typeInfo(Ptr).Pointer.alignment;
- const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+ const self: Ptr = @ptrCast(@alignCast(ptr));
fillFn(self, buf);
}
};
@@ -97,7 +96,7 @@ pub const Random = struct {
r.uintLessThan(Index, values.len);
const MinInt = MinArrayIndex(Index);
- return values[@intCast(MinInt, index)];
+ return values[@as(MinInt, @intCast(index))];
}
/// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`.
@@ -114,8 +113,8 @@ pub const Random = struct {
// TODO: endian portability is pointless if the underlying prng isn't endian portable.
// TODO: document the endian portability of this library.
const byte_aligned_result = mem.readIntSliceLittle(ByteAlignedT, &rand_bytes);
- const unsigned_result = @truncate(UnsignedT, byte_aligned_result);
- return @bitCast(T, unsigned_result);
+ const unsigned_result = @as(UnsignedT, @truncate(byte_aligned_result));
+ return @as(T, @bitCast(unsigned_result));
}
/// Constant-time implementation off `uintLessThan`.
@@ -126,9 +125,9 @@ pub const Random = struct {
comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
if (bits <= 32) {
- return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than));
+ return @as(T, @intCast(limitRangeBiased(u32, r.int(u32), less_than)));
} else {
- return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than));
+ return @as(T, @intCast(limitRangeBiased(u64, r.int(u64), less_than)));
}
}
@@ -156,7 +155,7 @@ pub const Random = struct {
// "Lemire's (with an extra tweak from me)"
var x: Small = r.int(Small);
var m: Large = @as(Large, x) * @as(Large, less_than);
- var l: Small = @truncate(Small, m);
+ var l: Small = @as(Small, @truncate(m));
if (l < less_than) {
var t: Small = -%less_than;
@@ -169,10 +168,10 @@ pub const Random = struct {
while (l < t) {
x = r.int(Small);
m = @as(Large, x) * @as(Large, less_than);
- l = @truncate(Small, m);
+ l = @as(Small, @truncate(m));
}
}
- return @intCast(T, m >> small_bits);
+ return @as(T, @intCast(m >> small_bits));
}
/// Constant-time implementation off `uintAtMost`.
@@ -206,10 +205,10 @@ pub const Random = struct {
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
- const lo = @bitCast(UnsignedT, at_least);
- const hi = @bitCast(UnsignedT, less_than);
+ const lo = @as(UnsignedT, @bitCast(at_least));
+ const hi = @as(UnsignedT, @bitCast(less_than));
const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
- return @bitCast(T, result);
+ return @as(T, @bitCast(result));
} else {
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
return at_least + r.uintLessThanBiased(T, less_than - at_least);
@@ -225,10 +224,10 @@ pub const Random = struct {
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
- const lo = @bitCast(UnsignedT, at_least);
- const hi = @bitCast(UnsignedT, less_than);
+ const lo = @as(UnsignedT, @bitCast(at_least));
+ const hi = @as(UnsignedT, @bitCast(less_than));
const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
- return @bitCast(T, result);
+ return @as(T, @bitCast(result));
} else {
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
return at_least + r.uintLessThan(T, less_than - at_least);
@@ -243,10 +242,10 @@ pub const Random = struct {
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
- const lo = @bitCast(UnsignedT, at_least);
- const hi = @bitCast(UnsignedT, at_most);
+ const lo = @as(UnsignedT, @bitCast(at_least));
+ const hi = @as(UnsignedT, @bitCast(at_most));
const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
- return @bitCast(T, result);
+ return @as(T, @bitCast(result));
} else {
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
return at_least + r.uintAtMostBiased(T, at_most - at_least);
@@ -262,10 +261,10 @@ pub const Random = struct {
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
- const lo = @bitCast(UnsignedT, at_least);
- const hi = @bitCast(UnsignedT, at_most);
+ const lo = @as(UnsignedT, @bitCast(at_least));
+ const hi = @as(UnsignedT, @bitCast(at_most));
const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
- return @bitCast(T, result);
+ return @as(T, @bitCast(result));
} else {
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
return at_least + r.uintAtMost(T, at_most - at_least);
@@ -294,9 +293,9 @@ pub const Random = struct {
rand_lz += @clz(r.int(u32) | 0x7FF);
}
}
- const mantissa = @truncate(u23, rand);
+ const mantissa = @as(u23, @truncate(rand));
const exponent = @as(u32, 126 - rand_lz) << 23;
- return @bitCast(f32, exponent | mantissa);
+ return @as(f32, @bitCast(exponent | mantissa));
},
f64 => {
// Use 52 random bits for the mantissa, and the rest for the exponent.
@@ -321,7 +320,7 @@ pub const Random = struct {
}
const mantissa = rand & 0xFFFFFFFFFFFFF;
const exponent = (1022 - rand_lz) << 52;
- return @bitCast(f64, exponent | mantissa);
+ return @as(f64, @bitCast(exponent | mantissa));
},
else => @compileError("unknown floating point type"),
}
@@ -333,7 +332,7 @@ pub const Random = struct {
pub fn floatNorm(r: Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.NormDist);
switch (T) {
- f32 => return @floatCast(f32, value),
+ f32 => return @as(f32, @floatCast(value)),
f64 => return value,
else => @compileError("unknown floating point type"),
}
@@ -345,7 +344,7 @@ pub const Random = struct {
pub fn floatExp(r: Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
switch (T) {
- f32 => return @floatCast(f32, value),
+ f32 => return @as(f32, @floatCast(value)),
f64 => return value,
else => @compileError("unknown floating point type"),
}
@@ -379,10 +378,10 @@ pub const Random = struct {
}
// `i <= j < max <= maxInt(MinInt)`
- const max = @intCast(MinInt, buf.len);
+ const max = @as(MinInt, @intCast(buf.len));
var i: MinInt = 0;
while (i < max - 1) : (i += 1) {
- const j = @intCast(MinInt, r.intRangeLessThan(Index, i, max));
+ const j = @as(MinInt, @intCast(r.intRangeLessThan(Index, i, max)));
mem.swap(T, &buf[i], &buf[j]);
}
}
@@ -445,7 +444,7 @@ pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
// http://www.pcg-random.org/posts/bounded-rands.html
// "Integer Multiplication (Biased)"
var m: T2 = @as(T2, random_int) * @as(T2, less_than);
- return @intCast(T, m >> bits);
+ return @as(T, @intCast(m >> bits));
}
// Generator to extend 64-bit seed values into longer sequences.
diff --git a/lib/std/rand/Isaac64.zig b/lib/std/rand/Isaac64.zig
index 8c6205e1cd..785c551dfd 100644
--- a/lib/std/rand/Isaac64.zig
+++ b/lib/std/rand/Isaac64.zig
@@ -38,10 +38,10 @@ fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2:
const x = self.m[base + m1];
self.a = mix +% self.m[base + m2];
- const y = self.a +% self.b +% self.m[@intCast(usize, (x >> 3) % self.m.len)];
+ const y = self.a +% self.b +% self.m[@as(usize, @intCast((x >> 3) % self.m.len))];
self.m[base + m1] = y;
- self.b = x +% self.m[@intCast(usize, (y >> 11) % self.m.len)];
+ self.b = x +% self.m[@as(usize, @intCast((y >> 11) % self.m.len))];
self.r[self.r.len - 1 - base - m1] = self.b;
}
@@ -159,7 +159,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void {
var n = self.next();
comptime var j: usize = 0;
inline while (j < 8) : (j += 1) {
- buf[i + j] = @truncate(u8, n);
+ buf[i + j] = @as(u8, @truncate(n));
n >>= 8;
}
}
@@ -168,7 +168,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void {
if (i != buf.len) {
var n = self.next();
while (i < buf.len) : (i += 1) {
- buf[i] = @truncate(u8, n);
+ buf[i] = @as(u8, @truncate(n));
n >>= 8;
}
}
diff --git a/lib/std/rand/Pcg.zig b/lib/std/rand/Pcg.zig
index 951713cc40..ceeadeab5c 100644
--- a/lib/std/rand/Pcg.zig
+++ b/lib/std/rand/Pcg.zig
@@ -29,10 +29,10 @@ fn next(self: *Pcg) u32 {
const l = self.s;
self.s = l *% default_multiplier +% (self.i | 1);
- const xor_s = @truncate(u32, ((l >> 18) ^ l) >> 27);
- const rot = @intCast(u32, l >> 59);
+ const xor_s = @as(u32, @truncate(((l >> 18) ^ l) >> 27));
+ const rot = @as(u32, @intCast(l >> 59));
- return (xor_s >> @intCast(u5, rot)) | (xor_s << @intCast(u5, (0 -% rot) & 31));
+ return (xor_s >> @as(u5, @intCast(rot))) | (xor_s << @as(u5, @intCast((0 -% rot) & 31)));
}
fn seed(self: *Pcg, init_s: u64) void {
@@ -58,7 +58,7 @@ pub fn fill(self: *Pcg, buf: []u8) void {
var n = self.next();
comptime var j: usize = 0;
inline while (j < 4) : (j += 1) {
- buf[i + j] = @truncate(u8, n);
+ buf[i + j] = @as(u8, @truncate(n));
n >>= 8;
}
}
@@ -67,7 +67,7 @@ pub fn fill(self: *Pcg, buf: []u8) void {
if (i != buf.len) {
var n = self.next();
while (i < buf.len) : (i += 1) {
- buf[i] = @truncate(u8, n);
+ buf[i] = @as(u8, @truncate(n));
n >>= 8;
}
}
diff --git a/lib/std/rand/RomuTrio.zig b/lib/std/rand/RomuTrio.zig
index ff7b4deac1..4ce2b7af01 100644
--- a/lib/std/rand/RomuTrio.zig
+++ b/lib/std/rand/RomuTrio.zig
@@ -34,7 +34,7 @@ fn next(self: *RomuTrio) u64 {
}
pub fn seedWithBuf(self: *RomuTrio, buf: [24]u8) void {
- const seed_buf = @bitCast([3]u64, buf);
+ const seed_buf = @as([3]u64, @bitCast(buf));
self.x_state = seed_buf[0];
self.y_state = seed_buf[1];
self.z_state = seed_buf[2];
@@ -58,7 +58,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void {
var n = self.next();
comptime var j: usize = 0;
inline while (j < 8) : (j += 1) {
- buf[i + j] = @truncate(u8, n);
+ buf[i + j] = @as(u8, @truncate(n));
n >>= 8;
}
}
@@ -67,7 +67,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void {
if (i != buf.len) {
var n = self.next();
while (i < buf.len) : (i += 1) {
- buf[i] = @truncate(u8, n);
+ buf[i] = @as(u8, @truncate(n));
n >>= 8;
}
}
@@ -122,7 +122,7 @@ test "RomuTrio fill" {
}
test "RomuTrio buf seeding test" {
- const buf0 = @bitCast([24]u8, [3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 });
+ const buf0 = @as([24]u8, @bitCast([3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 }));
const resulting_state = .{ .x = 16294208416658607535, .y = 13964609475759908645, .z = 4703697494102998476 };
var r = RomuTrio.init(0);
r.seedWithBuf(buf0);
diff --git a/lib/std/rand/Sfc64.zig b/lib/std/rand/Sfc64.zig
index a5e6920df7..af439b115b 100644
--- a/lib/std/rand/Sfc64.zig
+++ b/lib/std/rand/Sfc64.zig
@@ -56,7 +56,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void {
var n = self.next();
comptime var j: usize = 0;
inline while (j < 8) : (j += 1) {
- buf[i + j] = @truncate(u8, n);
+ buf[i + j] = @as(u8, @truncate(n));
n >>= 8;
}
}
@@ -65,7 +65,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void {
if (i != buf.len) {
var n = self.next();
while (i < buf.len) : (i += 1) {
- buf[i] = @truncate(u8, n);
+ buf[i] = @as(u8, @truncate(n));
n >>= 8;
}
}
diff --git a/lib/std/rand/Xoroshiro128.zig b/lib/std/rand/Xoroshiro128.zig
index 6ddd2eb89e..56c4980e6d 100644
--- a/lib/std/rand/Xoroshiro128.zig
+++ b/lib/std/rand/Xoroshiro128.zig
@@ -45,7 +45,7 @@ pub fn jump(self: *Xoroshiro128) void {
inline for (table) |entry| {
var b: usize = 0;
while (b < 64) : (b += 1) {
- if ((entry & (@as(u64, 1) << @intCast(u6, b))) != 0) {
+ if ((entry & (@as(u64, 1) << @as(u6, @intCast(b)))) != 0) {
s0 ^= self.s[0];
s1 ^= self.s[1];
}
@@ -74,7 +74,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void {
var n = self.next();
comptime var j: usize = 0;
inline while (j < 8) : (j += 1) {
- buf[i + j] = @truncate(u8, n);
+ buf[i + j] = @as(u8, @truncate(n));
n >>= 8;
}
}
@@ -83,7 +83,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void {
if (i != buf.len) {
var n = self.next();
while (i < buf.len) : (i += 1) {
- buf[i] = @truncate(u8, n);
+ buf[i] = @as(u8, @truncate(n));
n >>= 8;
}
}
diff --git a/lib/std/rand/Xoshiro256.zig b/lib/std/rand/Xoshiro256.zig
index 35af701ea1..c72d9ee1a2 100644
--- a/lib/std/rand/Xoshiro256.zig
+++ b/lib/std/rand/Xoshiro256.zig
@@ -46,13 +46,13 @@ pub fn jump(self: *Xoshiro256) void {
var table: u256 = 0x39abdc4529b1661ca9582618e03fc9aad5a61266f0c9392c180ec6d33cfd0aba;
while (table != 0) : (table >>= 1) {
- if (@truncate(u1, table) != 0) {
- s ^= @bitCast(u256, self.s);
+ if (@as(u1, @truncate(table)) != 0) {
+ s ^= @as(u256, @bitCast(self.s));
}
_ = self.next();
}
- self.s = @bitCast([4]u64, s);
+ self.s = @as([4]u64, @bitCast(s));
}
pub fn seed(self: *Xoshiro256, init_s: u64) void {
@@ -74,7 +74,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void {
var n = self.next();
comptime var j: usize = 0;
inline while (j < 8) : (j += 1) {
- buf[i + j] = @truncate(u8, n);
+ buf[i + j] = @as(u8, @truncate(n));
n >>= 8;
}
}
@@ -83,7 +83,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void {
if (i != buf.len) {
var n = self.next();
while (i < buf.len) : (i += 1) {
- buf[i] = @truncate(u8, n);
+ buf[i] = @as(u8, @truncate(n));
n >>= 8;
}
}
diff --git a/lib/std/rand/benchmark.zig b/lib/std/rand/benchmark.zig
index ea3de9c70d..530556517c 100644
--- a/lib/std/rand/benchmark.zig
+++ b/lib/std/rand/benchmark.zig
@@ -91,8 +91,8 @@ pub fn benchmark(comptime H: anytype, bytes: usize, comptime block_size: usize)
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
std.debug.assert(rng.random().int(u64) != 0);
diff --git a/lib/std/rand/test.zig b/lib/std/rand/test.zig
index 6cc6891c5a..551e47f8ff 100644
--- a/lib/std/rand/test.zig
+++ b/lib/std/rand/test.zig
@@ -332,13 +332,13 @@ test "Random float chi-square goodness of fit" {
while (i < num_numbers) : (i += 1) {
const rand_f32 = random.float(f32);
const rand_f64 = random.float(f64);
- var f32_put = try f32_hist.getOrPut(@intFromFloat(u32, rand_f32 * @floatFromInt(f32, num_buckets)));
+ var f32_put = try f32_hist.getOrPut(@as(u32, @intFromFloat(rand_f32 * @as(f32, @floatFromInt(num_buckets)))));
if (f32_put.found_existing) {
f32_put.value_ptr.* += 1;
} else {
f32_put.value_ptr.* = 1;
}
- var f64_put = try f64_hist.getOrPut(@intFromFloat(u32, rand_f64 * @floatFromInt(f64, num_buckets)));
+ var f64_put = try f64_hist.getOrPut(@as(u32, @intFromFloat(rand_f64 * @as(f64, @floatFromInt(num_buckets)))));
if (f64_put.found_existing) {
f64_put.value_ptr.* += 1;
} else {
@@ -352,8 +352,8 @@ test "Random float chi-square goodness of fit" {
{
var j: u32 = 0;
while (j < num_buckets) : (j += 1) {
- const count = @floatFromInt(f64, (if (f32_hist.get(j)) |v| v else 0));
- const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets);
+ const count = @as(f64, @floatFromInt((if (f32_hist.get(j)) |v| v else 0)));
+ const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets));
const delta = count - expected;
const variance = (delta * delta) / expected;
f32_total_variance += variance;
@@ -363,8 +363,8 @@ test "Random float chi-square goodness of fit" {
{
var j: u64 = 0;
while (j < num_buckets) : (j += 1) {
- const count = @floatFromInt(f64, (if (f64_hist.get(j)) |v| v else 0));
- const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets);
+ const count = @as(f64, @floatFromInt((if (f64_hist.get(j)) |v| v else 0)));
+ const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets));
const delta = count - expected;
const variance = (delta * delta) / expected;
f64_total_variance += variance;
@@ -421,13 +421,13 @@ fn testRange(r: Random, start: i8, end: i8) !void {
try testRangeBias(r, start, end, false);
}
fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) !void {
- const count = @intCast(usize, @as(i32, end) - @as(i32, start));
+ const count = @as(usize, @intCast(@as(i32, end) - @as(i32, start)));
var values_buffer = [_]bool{false} ** 0x100;
const values = values_buffer[0..count];
var i: usize = 0;
while (i < count) {
const value: i32 = if (biased) r.intRangeLessThanBiased(i8, start, end) else r.intRangeLessThan(i8, start, end);
- const index = @intCast(usize, value - start);
+ const index = @as(usize, @intCast(value - start));
if (!values[index]) {
i += 1;
values[index] = true;
diff --git a/lib/std/rand/ziggurat.zig b/lib/std/rand/ziggurat.zig
index afe00a1348..09d695b88d 100644
--- a/lib/std/rand/ziggurat.zig
+++ b/lib/std/rand/ziggurat.zig
@@ -18,17 +18,17 @@ pub fn next_f64(random: Random, comptime tables: ZigTable) f64 {
// We manually construct a float from parts as we can avoid an extra random lookup here by
// using the unused exponent for the lookup table entry.
const bits = random.int(u64);
- const i = @as(usize, @truncate(u8, bits));
+ const i = @as(usize, @as(u8, @truncate(bits)));
const u = blk: {
if (tables.is_symmetric) {
// Generate a value in the range [2, 4) and scale into [-1, 1)
const repr = ((0x3ff + 1) << 52) | (bits >> 12);
- break :blk @bitCast(f64, repr) - 3.0;
+ break :blk @as(f64, @bitCast(repr)) - 3.0;
} else {
// Generate a value in the range [1, 2) and scale into (0, 1)
const repr = (0x3ff << 52) | (bits >> 12);
- break :blk @bitCast(f64, repr) - (1.0 - math.floatEps(f64) / 2.0);
+ break :blk @as(f64, @bitCast(repr)) - (1.0 - math.floatEps(f64) / 2.0);
}
};
diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig
index 172fe4e7c3..1c9cffa766 100644
--- a/lib/std/segmented_list.zig
+++ b/lib/std/segmented_list.zig
@@ -107,7 +107,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
pub fn deinit(self: *Self, allocator: Allocator) void {
- self.freeShelves(allocator, @intCast(ShelfIndex, self.dynamic_segments.len), 0);
+ self.freeShelves(allocator, @as(ShelfIndex, @intCast(self.dynamic_segments.len)), 0);
allocator.free(self.dynamic_segments);
self.* = undefined;
}
@@ -171,7 +171,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
/// TODO update this and related methods to match the conventions set by ArrayList
pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
if (prealloc_item_count != 0) {
- if (new_capacity <= @as(usize, 1) << (prealloc_exp + @intCast(ShelfIndex, self.dynamic_segments.len))) {
+ if (new_capacity <= @as(usize, 1) << (prealloc_exp + @as(ShelfIndex, @intCast(self.dynamic_segments.len)))) {
return self.shrinkCapacity(allocator, new_capacity);
}
}
@@ -181,7 +181,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
/// Only grows capacity, or retains current capacity.
pub fn growCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
const new_cap_shelf_count = shelfCount(new_capacity);
- const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
+ const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
if (new_cap_shelf_count <= old_shelf_count) return;
const new_dynamic_segments = try allocator.alloc([*]T, new_cap_shelf_count);
@@ -206,7 +206,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
/// It may fail to reduce the capacity in which case the capacity will remain unchanged.
pub fn shrinkCapacity(self: *Self, allocator: Allocator, new_capacity: usize) void {
if (new_capacity <= prealloc_item_count) {
- const len = @intCast(ShelfIndex, self.dynamic_segments.len);
+ const len = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
self.freeShelves(allocator, len, 0);
allocator.free(self.dynamic_segments);
self.dynamic_segments = &[_][*]T{};
@@ -214,7 +214,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
const new_cap_shelf_count = shelfCount(new_capacity);
- const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
+ const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
assert(new_cap_shelf_count <= old_shelf_count);
if (new_cap_shelf_count == old_shelf_count) return;
@@ -424,7 +424,7 @@ fn testSegmentedList(comptime prealloc: usize) !void {
{
var i: usize = 0;
while (i < 100) : (i += 1) {
- try list.append(testing.allocator, @intCast(i32, i + 1));
+ try list.append(testing.allocator, @as(i32, @intCast(i + 1)));
try testing.expect(list.len == i + 1);
}
}
@@ -432,7 +432,7 @@ fn testSegmentedList(comptime prealloc: usize) !void {
{
var i: usize = 0;
while (i < 100) : (i += 1) {
- try testing.expect(list.at(i).* == @intCast(i32, i + 1));
+ try testing.expect(list.at(i).* == @as(i32, @intCast(i + 1)));
}
}
@@ -492,7 +492,7 @@ fn testSegmentedList(comptime prealloc: usize) !void {
var i: i32 = 0;
while (i < 100) : (i += 1) {
try list.append(testing.allocator, i + 1);
- control[@intCast(usize, i)] = i + 1;
+ control[@as(usize, @intCast(i))] = i + 1;
}
@memset(dest[0..], 0);
diff --git a/lib/std/simd.zig b/lib/std/simd.zig
index 78d24a80bf..b3a50168ff 100644
--- a/lib/std/simd.zig
+++ b/lib/std/simd.zig
@@ -93,8 +93,8 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) {
var out: [len]T = undefined;
for (&out, 0..) |*element, i| {
element.* = switch (@typeInfo(T)) {
- .Int => @intCast(T, i),
- .Float => @floatFromInt(T, i),
+ .Int => @as(T, @intCast(i)),
+ .Float => @as(T, @floatFromInt(i)),
else => @compileError("Can't use type " ++ @typeName(T) ++ " in iota."),
};
}
@@ -107,7 +107,7 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) {
pub fn repeat(comptime len: usize, vec: anytype) @Vector(len, std.meta.Child(@TypeOf(vec))) {
const Child = std.meta.Child(@TypeOf(vec));
- return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @intCast(i32, vectorLength(@TypeOf(vec)))));
+ return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @as(i32, @intCast(vectorLength(@TypeOf(vec))))));
}
/// Returns a vector containing all elements of the first vector at the lower indices followed by all elements of the second vector
@@ -139,8 +139,8 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le
const a_vec_count = (1 + vecs_arr.len) >> 1;
const b_vec_count = vecs_arr.len >> 1;
- const a = interlace(@ptrCast(*const [a_vec_count]VecType, vecs_arr[0..a_vec_count]).*);
- const b = interlace(@ptrCast(*const [b_vec_count]VecType, vecs_arr[a_vec_count..]).*);
+ const a = interlace(@as(*const [a_vec_count]VecType, @ptrCast(vecs_arr[0..a_vec_count])).*);
+ const b = interlace(@as(*const [b_vec_count]VecType, @ptrCast(vecs_arr[a_vec_count..])).*);
const a_len = vectorLength(@TypeOf(a));
const b_len = vectorLength(@TypeOf(b));
@@ -148,10 +148,10 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le
const indices = comptime blk: {
const count_up = iota(i32, len);
- const cycle = @divFloor(count_up, @splat(len, @intCast(i32, vecs_arr.len)));
+ const cycle = @divFloor(count_up, @splat(len, @as(i32, @intCast(vecs_arr.len))));
const select_mask = repeat(len, join(@splat(a_vec_count, true), @splat(b_vec_count, false)));
- const a_indices = count_up - cycle * @splat(len, @intCast(i32, b_vec_count));
- const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @intCast(i32, a_vec_count)), a_vec_count, 0);
+ const a_indices = count_up - cycle * @splat(len, @as(i32, @intCast(b_vec_count)));
+ const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @as(i32, @intCast(a_vec_count))), a_vec_count, 0);
break :blk @select(i32, select_mask, a_indices, ~b_indices);
};
@@ -174,7 +174,7 @@ pub fn deinterlace(
comptime var i: usize = 0; // for-loops don't work for this, apparently.
inline while (i < out.len) : (i += 1) {
- const indices = comptime iota(i32, vec_len) * @splat(vec_len, @intCast(i32, vec_count)) + @splat(vec_len, @intCast(i32, i));
+ const indices = comptime iota(i32, vec_len) * @splat(vec_len, @as(i32, @intCast(vec_count))) + @splat(vec_len, @as(i32, @intCast(i)));
out[i] = @shuffle(Child, interlaced, undefined, indices);
}
@@ -189,9 +189,9 @@ pub fn extract(
const Child = std.meta.Child(@TypeOf(vec));
const len = vectorLength(@TypeOf(vec));
- std.debug.assert(@intCast(comptime_int, first) + @intCast(comptime_int, count) <= len);
+ std.debug.assert(@as(comptime_int, @intCast(first)) + @as(comptime_int, @intCast(count)) <= len);
- return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @intCast(i32, first)));
+ return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @as(i32, @intCast(first))));
}
test "vector patterns" {
@@ -263,7 +263,7 @@ pub fn reverseOrder(vec: anytype) @TypeOf(vec) {
const Child = std.meta.Child(@TypeOf(vec));
const len = vectorLength(@TypeOf(vec));
- return @shuffle(Child, vec, undefined, @splat(len, @intCast(i32, len) - 1) - iota(i32, len));
+ return @shuffle(Child, vec, undefined, @splat(len, @as(i32, @intCast(len)) - 1) - iota(i32, len));
}
test "vector shifting" {
diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig
index 23678a79c6..795dd29fc5 100644
--- a/lib/std/sort/pdq.zig
+++ b/lib/std/sort/pdq.zig
@@ -251,7 +251,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void {
const len = b - a;
if (len < 8) return;
- var rand = @intCast(u64, len);
+ var rand = @as(u64, @intCast(len));
const modulus = math.ceilPowerOfTwoAssert(u64, len);
var i = a + (len / 4) * 2 - 1;
@@ -261,7 +261,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void {
rand ^= rand >> 7;
rand ^= rand << 17;
- var other = @intCast(usize, rand & (modulus - 1));
+ var other = @as(usize, @intCast(rand & (modulus - 1)));
if (other >= len) other -= len;
context.swap(i, a + other);
}
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 9c83bd881c..d81eb4f9e9 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -190,7 +190,7 @@ fn exit2(code: usize) noreturn {
else => @compileError("TODO"),
},
.windows => {
- ExitProcess(@truncate(u32, code));
+ ExitProcess(@as(u32, @truncate(code)));
},
else => @compileError("TODO"),
}
@@ -387,23 +387,23 @@ fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn {
std.debug.maybeEnableSegfaultHandler();
const result: std.os.windows.INT = initEventLoopAndCallWinMain();
- std.os.windows.kernel32.ExitProcess(@bitCast(std.os.windows.UINT, result));
+ std.os.windows.kernel32.ExitProcess(@as(std.os.windows.UINT, @bitCast(result)));
}
fn posixCallMainAndExit() callconv(.C) noreturn {
@setAlignStack(16);
const argc = argc_argv_ptr[0];
- const argv = @ptrCast([*][*:0]u8, argc_argv_ptr + 1);
+ const argv = @as([*][*:0]u8, @ptrCast(argc_argv_ptr + 1));
- const envp_optional = @ptrCast([*:null]?[*:0]u8, @alignCast(@alignOf(usize), argv + argc + 1));
+ const envp_optional: [*:null]?[*:0]u8 = @ptrCast(@alignCast(argv + argc + 1));
var envp_count: usize = 0;
while (envp_optional[envp_count]) |_| : (envp_count += 1) {}
- const envp = @ptrCast([*][*:0]u8, envp_optional)[0..envp_count];
+ const envp = @as([*][*:0]u8, @ptrCast(envp_optional))[0..envp_count];
if (native_os == .linux) {
// Find the beginning of the auxiliary vector
- const auxv = @ptrCast([*]elf.Auxv, @alignCast(@alignOf(usize), envp.ptr + envp_count + 1));
+ const auxv: [*]elf.Auxv = @ptrCast(@alignCast(envp.ptr + envp_count + 1));
std.os.linux.elf_aux_maybe = auxv;
var at_hwcap: usize = 0;
@@ -419,7 +419,7 @@ fn posixCallMainAndExit() callconv(.C) noreturn {
else => continue,
}
}
- break :init @ptrFromInt([*]elf.Phdr, at_phdr)[0..at_phnum];
+ break :init @as([*]elf.Phdr, @ptrFromInt(at_phdr))[0..at_phnum];
};
// Apply the initial relocations as early as possible in the startup
@@ -495,20 +495,20 @@ fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
fn main(c_argc: c_int, c_argv: [*][*:0]c_char, c_envp: [*:null]?[*:0]c_char) callconv(.C) c_int {
var env_count: usize = 0;
while (c_envp[env_count] != null) : (env_count += 1) {}
- const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count];
+ const envp = @as([*][*:0]u8, @ptrCast(c_envp))[0..env_count];
if (builtin.os.tag == .linux) {
const at_phdr = std.c.getauxval(elf.AT_PHDR);
const at_phnum = std.c.getauxval(elf.AT_PHNUM);
- const phdrs = (@ptrFromInt([*]elf.Phdr, at_phdr))[0..at_phnum];
+ const phdrs = (@as([*]elf.Phdr, @ptrFromInt(at_phdr)))[0..at_phnum];
expandStackSize(phdrs);
}
- return @call(.always_inline, callMainWithArgs, .{ @intCast(usize, c_argc), @ptrCast([*][*:0]u8, c_argv), envp });
+ return @call(.always_inline, callMainWithArgs, .{ @as(usize, @intCast(c_argc)), @as([*][*:0]u8, @ptrCast(c_argv)), envp });
}
fn mainWithoutEnv(c_argc: c_int, c_argv: [*][*:0]c_char) callconv(.C) c_int {
- std.os.argv = @ptrCast([*][*:0]u8, c_argv)[0..@intCast(usize, c_argc)];
+ std.os.argv = @as([*][*:0]u8, @ptrCast(c_argv))[0..@as(usize, @intCast(c_argc))];
return @call(.always_inline, callMain, .{});
}
@@ -629,7 +629,7 @@ pub fn callMain() u8 {
pub fn call_wWinMain() std.os.windows.INT {
const MAIN_HINSTANCE = @typeInfo(@TypeOf(root.wWinMain)).Fn.params[0].type.?;
- const hInstance = @ptrCast(MAIN_HINSTANCE, std.os.windows.kernel32.GetModuleHandleW(null).?);
+ const hInstance = @as(MAIN_HINSTANCE, @ptrCast(std.os.windows.kernel32.GetModuleHandleW(null).?));
const lpCmdLine = std.os.windows.kernel32.GetCommandLineW();
// There's no (documented) way to get the nCmdShow parameter, so we're
diff --git a/lib/std/start_windows_tls.zig b/lib/std/start_windows_tls.zig
index a1cd8387dc..48880b4811 100644
--- a/lib/std/start_windows_tls.zig
+++ b/lib/std/start_windows_tls.zig
@@ -42,7 +42,7 @@ export const _tls_used linksection(".rdata$T") = IMAGE_TLS_DIRECTORY{
.StartAddressOfRawData = &_tls_start,
.EndAddressOfRawData = &_tls_end,
.AddressOfIndex = &_tls_index,
- .AddressOfCallBacks = @ptrCast(*anyopaque, &__xl_a),
+ .AddressOfCallBacks = @as(*anyopaque, @ptrCast(&__xl_a)),
.SizeOfZeroFill = 0,
.Characteristics = 0,
};
diff --git a/lib/std/tar.zig b/lib/std/tar.zig
index 688d093587..bc9a22fb7c 100644
--- a/lib/std/tar.zig
+++ b/lib/std/tar.zig
@@ -70,8 +70,8 @@ pub const Header = struct {
}
pub fn fileType(header: Header) FileType {
- const result = @enumFromInt(FileType, header.bytes[156]);
- return if (result == @enumFromInt(FileType, 0)) .normal else result;
+ const result = @as(FileType, @enumFromInt(header.bytes[156]));
+ return if (result == @as(FileType, @enumFromInt(0))) .normal else result;
}
fn str(header: Header, start: usize, end: usize) []const u8 {
@@ -117,7 +117,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi
start += 512;
const file_size = try header.fileSize();
const rounded_file_size = std.mem.alignForward(u64, file_size, 512);
- const pad_len = @intCast(usize, rounded_file_size - file_size);
+ const pad_len = @as(usize, @intCast(rounded_file_size - file_size));
const unstripped_file_name = try header.fullFileName(&file_name_buffer);
switch (header.fileType()) {
.directory => {
@@ -146,14 +146,14 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi
}
// Ask for the rounded up file size + 512 for the next header.
// TODO: https://github.com/ziglang/zig/issues/14039
- const ask = @intCast(usize, @min(
+ const ask = @as(usize, @intCast(@min(
buffer.len - end,
rounded_file_size + 512 - file_off -| (end - start),
- ));
+ )));
end += try reader.readAtLeast(buffer[end..], ask);
if (end - start < ask) return error.UnexpectedEndOfStream;
// TODO: https://github.com/ziglang/zig/issues/14039
- const slice = buffer[start..@intCast(usize, @min(file_size - file_off + start, end))];
+ const slice = buffer[start..@as(usize, @intCast(@min(file_size - file_off + start, end)))];
try file.writeAll(slice);
file_off += slice.len;
start += slice.len;
@@ -167,7 +167,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi
},
.global_extended_header, .extended_header => {
if (start + rounded_file_size > end) return error.TarHeadersTooBig;
- start = @intCast(usize, start + rounded_file_size);
+ start = @as(usize, @intCast(start + rounded_file_size));
},
.hard_link => return error.TarUnsupportedFileType,
.symbolic_link => return error.TarUnsupportedFileType,
diff --git a/lib/std/target.zig b/lib/std/target.zig
index ec61292360..2a96e84001 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -711,14 +711,14 @@ pub const Target = struct {
pub fn isEnabled(set: Set, arch_feature_index: Index) bool {
const usize_index = arch_feature_index / @bitSizeOf(usize);
- const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize));
+ const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize)));
return (set.ints[usize_index] & (@as(usize, 1) << bit_index)) != 0;
}
/// Adds the specified feature but not its dependencies.
pub fn addFeature(set: *Set, arch_feature_index: Index) void {
const usize_index = arch_feature_index / @bitSizeOf(usize);
- const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize));
+ const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize)));
set.ints[usize_index] |= @as(usize, 1) << bit_index;
}
@@ -730,7 +730,7 @@ pub const Target = struct {
/// Removes the specified feature but not its dependents.
pub fn removeFeature(set: *Set, arch_feature_index: Index) void {
const usize_index = arch_feature_index / @bitSizeOf(usize);
- const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize));
+ const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize)));
set.ints[usize_index] &= ~(@as(usize, 1) << bit_index);
}
@@ -745,7 +745,7 @@ pub const Target = struct {
var old = set.ints;
while (true) {
for (all_features_list, 0..) |feature, index_usize| {
- const index = @intCast(Index, index_usize);
+ const index = @as(Index, @intCast(index_usize));
if (set.isEnabled(index)) {
set.addFeatureSet(feature.dependencies);
}
@@ -757,7 +757,7 @@ pub const Target = struct {
}
pub fn asBytes(set: *const Set) *const [byte_count]u8 {
- return @ptrCast(*const [byte_count]u8, &set.ints);
+ return @as(*const [byte_count]u8, @ptrCast(&set.ints));
}
pub fn eql(set: Set, other_set: Set) bool {
@@ -1526,7 +1526,7 @@ pub const Target = struct {
pub fn set(self: *DynamicLinker, dl_or_null: ?[]const u8) void {
if (dl_or_null) |dl| {
@memcpy(self.buffer[0..dl.len], dl);
- self.max_byte = @intCast(u8, dl.len - 1);
+ self.max_byte = @as(u8, @intCast(dl.len - 1));
} else {
self.max_byte = null;
}
@@ -1537,12 +1537,12 @@ pub const Target = struct {
var result: DynamicLinker = .{};
const S = struct {
fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker {
- r.max_byte = @intCast(u8, (std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1);
+ r.max_byte = @as(u8, @intCast((std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1));
return r.*;
}
fn copy(r: *DynamicLinker, s: []const u8) DynamicLinker {
@memcpy(r.buffer[0..s.len], s);
- r.max_byte = @intCast(u8, s.len - 1);
+ r.max_byte = @as(u8, @intCast(s.len - 1));
return r.*;
}
};
@@ -1970,7 +1970,7 @@ pub const Target = struct {
16 => 2,
32 => 4,
64 => 8,
- 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))),
+ 80 => @as(u16, @intCast(mem.alignForward(usize, 10, c_type_alignment(t, .longdouble)))),
128 => 16,
else => unreachable,
},
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index 2cdb78cd1d..313af987ab 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -63,7 +63,7 @@ pub const FailingAllocator = struct {
log2_ptr_align: u8,
return_address: usize,
) ?[*]u8 {
- const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
+ const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
if (self.index == self.fail_index) {
if (!self.has_induced_failure) {
@memset(&self.stack_addresses, 0);
@@ -91,7 +91,7 @@ pub const FailingAllocator = struct {
new_len: usize,
ra: usize,
) bool {
- const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
+ const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra))
return false;
if (new_len < old_mem.len) {
@@ -108,7 +108,7 @@ pub const FailingAllocator = struct {
log2_old_align: u8,
ra: usize,
) void {
- const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
+ const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
self.internal_allocator.rawFree(old_mem, log2_old_align, ra);
self.deallocations += 1;
self.freed_bytes += old_mem.len;
diff --git a/lib/std/time.zig b/lib/std/time.zig
index 3eb342fa85..a60a0ef959 100644
--- a/lib/std/time.zig
+++ b/lib/std/time.zig
@@ -70,7 +70,7 @@ pub fn timestamp() i64 {
/// before the epoch.
/// See `std.os.clock_gettime` for a POSIX timestamp.
pub fn milliTimestamp() i64 {
- return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_ms));
+ return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_ms)));
}
/// Get a calendar timestamp, in microseconds, relative to UTC 1970-01-01.
@@ -79,7 +79,7 @@ pub fn milliTimestamp() i64 {
/// before the epoch.
/// See `std.os.clock_gettime` for a POSIX timestamp.
pub fn microTimestamp() i64 {
- return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_us));
+ return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_us)));
}
/// Get a calendar timestamp, in nanoseconds, relative to UTC 1970-01-01.
@@ -96,7 +96,7 @@ pub fn nanoTimestamp() i128 {
var ft: os.windows.FILETIME = undefined;
os.windows.kernel32.GetSystemTimeAsFileTime(&ft);
const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
- return @as(i128, @bitCast(i64, ft64) + epoch_adj) * 100;
+ return @as(i128, @as(i64, @bitCast(ft64)) + epoch_adj) * 100;
}
if (builtin.os.tag == .wasi and !builtin.link_libc) {
@@ -239,9 +239,9 @@ pub const Instant = struct {
}
// Convert to ns using fixed point.
- const scale = @as(u64, std.time.ns_per_s << 32) / @intCast(u32, qpf);
+ const scale = @as(u64, std.time.ns_per_s << 32) / @as(u32, @intCast(qpf));
const result = (@as(u96, qpc) * scale) >> 32;
- return @truncate(u64, result);
+ return @as(u64, @truncate(result));
}
// WASI timestamps are directly in nanoseconds
@@ -250,9 +250,9 @@ pub const Instant = struct {
}
// Convert timespec diff to ns
- const seconds = @intCast(u64, self.timestamp.tv_sec - earlier.timestamp.tv_sec);
- const elapsed = (seconds * ns_per_s) + @intCast(u32, self.timestamp.tv_nsec);
- return elapsed - @intCast(u32, earlier.timestamp.tv_nsec);
+ const seconds = @as(u64, @intCast(self.timestamp.tv_sec - earlier.timestamp.tv_sec));
+ const elapsed = (seconds * ns_per_s) + @as(u32, @intCast(self.timestamp.tv_nsec));
+ return elapsed - @as(u32, @intCast(earlier.timestamp.tv_nsec));
}
};
diff --git a/lib/std/time/epoch.zig b/lib/std/time/epoch.zig
index 279acc4298..f467721a49 100644
--- a/lib/std/time/epoch.zig
+++ b/lib/std/time/epoch.zig
@@ -122,9 +122,9 @@ pub const YearAndDay = struct {
if (days_left < days_in_month)
break;
days_left -= days_in_month;
- month = @enumFromInt(Month, @intFromEnum(month) + 1);
+ month = @as(Month, @enumFromInt(@intFromEnum(month) + 1));
}
- return .{ .month = month, .day_index = @intCast(u5, days_left) };
+ return .{ .month = month, .day_index = @as(u5, @intCast(days_left)) };
}
};
@@ -146,7 +146,7 @@ pub const EpochDay = struct {
year_day -= year_size;
year += 1;
}
- return .{ .year = year, .day = @intCast(u9, year_day) };
+ return .{ .year = year, .day = @as(u9, @intCast(year_day)) };
}
};
@@ -156,11 +156,11 @@ pub const DaySeconds = struct {
/// the number of hours past the start of the day (0 to 23)
pub fn getHoursIntoDay(self: DaySeconds) u5 {
- return @intCast(u5, @divTrunc(self.secs, 3600));
+ return @as(u5, @intCast(@divTrunc(self.secs, 3600)));
}
/// the number of minutes past the hour (0 to 59)
pub fn getMinutesIntoHour(self: DaySeconds) u6 {
- return @intCast(u6, @divTrunc(@mod(self.secs, 3600), 60));
+ return @as(u6, @intCast(@divTrunc(@mod(self.secs, 3600), 60)));
}
/// the number of seconds past the start of the minute (0 to 59)
pub fn getSecondsIntoMinute(self: DaySeconds) u6 {
@@ -175,7 +175,7 @@ pub const EpochSeconds = struct {
/// Returns the number of days since the epoch as an EpochDay.
/// Use EpochDay to get information about the day of this time.
pub fn getEpochDay(self: EpochSeconds) EpochDay {
- return EpochDay{ .day = @intCast(u47, @divTrunc(self.secs, secs_per_day)) };
+ return EpochDay{ .day = @as(u47, @intCast(@divTrunc(self.secs, secs_per_day))) };
}
/// Returns the number of seconds into the day as DaySeconds.
diff --git a/lib/std/tz.zig b/lib/std/tz.zig
index 0cb9cefa50..16288bd4ce 100644
--- a/lib/std/tz.zig
+++ b/lib/std/tz.zig
@@ -155,8 +155,8 @@ pub const Tz = struct {
if (corr > std.math.maxInt(i16)) return error.Malformed; // Unreasonably large correction
leapseconds[i] = .{
- .occurrence = @intCast(i48, occur),
- .correction = @intCast(i16, corr),
+ .occurrence = @as(i48, @intCast(occur)),
+ .correction = @as(i16, @intCast(corr)),
};
}
diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig
index 1987d10b0d..12cb74bd92 100644
--- a/lib/std/unicode.zig
+++ b/lib/std/unicode.zig
@@ -45,22 +45,22 @@ pub fn utf8Encode(c: u21, out: []u8) !u3 {
// - Increasing the initial shift by 6 each time
// - Each time after the first shorten the shifted
// value to a max of 0b111111 (63)
- 1 => out[0] = @intCast(u8, c), // Can just do 0 + codepoint for initial range
+ 1 => out[0] = @as(u8, @intCast(c)), // Can just do 0 + codepoint for initial range
2 => {
- out[0] = @intCast(u8, 0b11000000 | (c >> 6));
- out[1] = @intCast(u8, 0b10000000 | (c & 0b111111));
+ out[0] = @as(u8, @intCast(0b11000000 | (c >> 6)));
+ out[1] = @as(u8, @intCast(0b10000000 | (c & 0b111111)));
},
3 => {
if (0xd800 <= c and c <= 0xdfff) return error.Utf8CannotEncodeSurrogateHalf;
- out[0] = @intCast(u8, 0b11100000 | (c >> 12));
- out[1] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111));
- out[2] = @intCast(u8, 0b10000000 | (c & 0b111111));
+ out[0] = @as(u8, @intCast(0b11100000 | (c >> 12)));
+ out[1] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111)));
+ out[2] = @as(u8, @intCast(0b10000000 | (c & 0b111111)));
},
4 => {
- out[0] = @intCast(u8, 0b11110000 | (c >> 18));
- out[1] = @intCast(u8, 0b10000000 | ((c >> 12) & 0b111111));
- out[2] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111));
- out[3] = @intCast(u8, 0b10000000 | (c & 0b111111));
+ out[0] = @as(u8, @intCast(0b11110000 | (c >> 18)));
+ out[1] = @as(u8, @intCast(0b10000000 | ((c >> 12) & 0b111111)));
+ out[2] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111)));
+ out[3] = @as(u8, @intCast(0b10000000 | (c & 0b111111)));
},
else => unreachable,
}
@@ -695,11 +695,11 @@ pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u1
var it = view.iterator();
while (it.nextCodepoint()) |codepoint| {
if (codepoint < 0x10000) {
- const short = @intCast(u16, codepoint);
+ const short = @as(u16, @intCast(codepoint));
try result.append(mem.nativeToLittle(u16, short));
} else {
- const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
- const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
+ const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800;
+ const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00;
var out: [2]u16 = undefined;
out[0] = mem.nativeToLittle(u16, high);
out[1] = mem.nativeToLittle(u16, low);
@@ -720,12 +720,12 @@ pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize {
const next_src_i = src_i + n;
const codepoint = utf8Decode(utf8[src_i..next_src_i]) catch return error.InvalidUtf8;
if (codepoint < 0x10000) {
- const short = @intCast(u16, codepoint);
+ const short = @as(u16, @intCast(codepoint));
utf16le[dest_i] = mem.nativeToLittle(u16, short);
dest_i += 1;
} else {
- const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
- const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
+ const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800;
+ const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00;
utf16le[dest_i] = mem.nativeToLittle(u16, high);
utf16le[dest_i + 1] = mem.nativeToLittle(u16, low);
dest_i += 2;
diff --git a/lib/std/unicode/throughput_test.zig b/lib/std/unicode/throughput_test.zig
index b828b4e43f..084406dc78 100644
--- a/lib/std/unicode/throughput_test.zig
+++ b/lib/std/unicode/throughput_test.zig
@@ -32,8 +32,8 @@ fn benchmarkCodepointCount(buf: []const u8) !ResultCount {
}
const end = timer.read();
- const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
- const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+ const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
return ResultCount{ .count = r, .throughput = throughput };
}
diff --git a/lib/std/valgrind.zig b/lib/std/valgrind.zig
index ae4fde0da1..61312e2338 100644
--- a/lib/std/valgrind.zig
+++ b/lib/std/valgrind.zig
@@ -94,7 +94,7 @@ pub fn IsTool(base: [2]u8, code: usize) bool {
}
fn doClientRequestExpr(default: usize, request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
- return doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5);
+ return doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
}
fn doClientRequestStmt(request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
diff --git a/lib/std/valgrind/callgrind.zig b/lib/std/valgrind/callgrind.zig
index f3d8c7ae3c..3ba74fb525 100644
--- a/lib/std/valgrind/callgrind.zig
+++ b/lib/std/valgrind/callgrind.zig
@@ -11,7 +11,7 @@ pub const CallgrindClientRequest = enum(usize) {
};
fn doCallgrindClientRequestExpr(default: usize, request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
- return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5);
+ return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
}
fn doCallgrindClientRequestStmt(request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
diff --git a/lib/std/valgrind/memcheck.zig b/lib/std/valgrind/memcheck.zig
index dd6c79cd90..7f5e973c43 100644
--- a/lib/std/valgrind/memcheck.zig
+++ b/lib/std/valgrind/memcheck.zig
@@ -21,7 +21,7 @@ pub const MemCheckClientRequest = enum(usize) {
};
fn doMemCheckClientRequestExpr(default: usize, request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
- return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5);
+ return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
}
fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
@@ -31,24 +31,24 @@ fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: us
/// Mark memory at qzz.ptr as unaddressable for qzz.len bytes.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemNoAccess(qzz: []u8) i1 {
- return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
- .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+ return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+ .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
}
/// Similarly, mark memory at qzz.ptr as addressable but undefined
/// for qzz.len bytes.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemUndefined(qzz: []u8) i1 {
- return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
- .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+ return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+ .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
}
/// Similarly, mark memory at qzz.ptr as addressable and defined
/// for qzz.len bytes.
pub fn makeMemDefined(qzz: []u8) i1 {
// This returns -1 when run on Valgrind and 0 otherwise.
- return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
- .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+ return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+ .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
}
/// Similar to makeMemDefined except that addressability is
@@ -56,8 +56,8 @@ pub fn makeMemDefined(qzz: []u8) i1 {
/// but those which are not addressable are left unchanged.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemDefinedIfAddressable(qzz: []u8) i1 {
- return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
- .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+ return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+ .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
}
/// Create a block-description handle. The description is an ascii
@@ -195,7 +195,7 @@ test "countLeakBlocks" {
/// impossible to segfault your system by using this call.
pub fn getVbits(zza: []u8, zzvbits: []u8) u2 {
std.debug.assert(zzvbits.len >= zza.len / 8);
- return @intCast(u2, doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0));
+ return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)));
}
/// Set the validity data for addresses zza, copying it
@@ -208,7 +208,7 @@ pub fn getVbits(zza: []u8, zzvbits: []u8) u2 {
/// impossible to segfault your system by using this call.
pub fn setVbits(zzvbits: []u8, zza: []u8) u2 {
std.debug.assert(zzvbits.len >= zza.len / 8);
- return @intCast(u2, doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0));
+ return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)));
}
/// Disable and re-enable reporting of addressing errors in the
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index fe6d2ec120..63b620f674 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -36,7 +36,7 @@ pub fn hashSrc(src: []const u8) SrcHash {
}
pub fn srcHashEql(a: SrcHash, b: SrcHash) bool {
- return @bitCast(u128, a) == @bitCast(u128, b);
+ return @as(u128, @bitCast(a)) == @as(u128, @bitCast(b));
}
pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash {
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 86e4e48820..a82982e262 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -62,7 +62,7 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A
const token = tokenizer.next();
try tokens.append(gpa, .{
.tag = token.tag,
- .start = @intCast(u32, token.loc.start),
+ .start = @as(u32, @intCast(token.loc.start)),
});
if (token.tag == .eof) break;
}
@@ -123,7 +123,7 @@ pub fn renderToArrayList(tree: Ast, buffer: *std.ArrayList(u8)) RenderError!void
/// should point after the token in the error message.
pub fn errorOffset(tree: Ast, parse_error: Error) u32 {
return if (parse_error.token_is_prev)
- @intCast(u32, tree.tokenSlice(parse_error.token).len)
+ @as(u32, @intCast(tree.tokenSlice(parse_error.token).len))
else
0;
}
@@ -772,7 +772,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
var n = node;
var end_offset: TokenIndex = 0;
while (true) switch (tags[n]) {
- .root => return @intCast(TokenIndex, tree.tokens.len - 1),
+ .root => return @as(TokenIndex, @intCast(tree.tokens.len - 1)),
.@"usingnamespace",
.bool_not,
@@ -1288,7 +1288,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
n = extra.else_expr;
},
.@"for" => {
- const extra = @bitCast(Node.For, datas[n].rhs);
+ const extra = @as(Node.For, @bitCast(datas[n].rhs));
n = tree.extra_data[datas[n].lhs + extra.inputs + @intFromBool(extra.has_else)];
},
.@"suspend" => {
@@ -1955,7 +1955,7 @@ pub fn forSimple(tree: Ast, node: Node.Index) full.For {
pub fn forFull(tree: Ast, node: Node.Index) full.For {
const data = tree.nodes.items(.data)[node];
- const extra = @bitCast(Node.For, data.rhs);
+ const extra = @as(Node.For, @bitCast(data.rhs));
const inputs = tree.extra_data[data.lhs..][0..extra.inputs];
const then_expr = tree.extra_data[data.lhs + extra.inputs];
const else_expr = if (extra.has_else) tree.extra_data[data.lhs + extra.inputs + 1] else 0;
diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig
index 13219888b2..d42b02d931 100644
--- a/lib/std/zig/CrossTarget.zig
+++ b/lib/std/zig/CrossTarget.zig
@@ -317,7 +317,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
}
const feature_name = cpu_features[start..index];
for (all_features, 0..) |feature, feat_index_usize| {
- const feat_index = @intCast(Target.Cpu.Feature.Set.Index, feat_index_usize);
+ const feat_index = @as(Target.Cpu.Feature.Set.Index, @intCast(feat_index_usize));
if (mem.eql(u8, feature_name, feature.name)) {
set.addFeature(feat_index);
break;
diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig
index fe3d97517f..201c06d4d7 100644
--- a/lib/std/zig/ErrorBundle.zig
+++ b/lib/std/zig/ErrorBundle.zig
@@ -94,7 +94,7 @@ pub fn getErrorMessageList(eb: ErrorBundle) ErrorMessageList {
pub fn getMessages(eb: ErrorBundle) []const MessageIndex {
const list = eb.getErrorMessageList();
- return @ptrCast([]const MessageIndex, eb.extra[list.start..][0..list.len]);
+ return @as([]const MessageIndex, @ptrCast(eb.extra[list.start..][0..list.len]));
}
pub fn getErrorMessage(eb: ErrorBundle, index: MessageIndex) ErrorMessage {
@@ -109,7 +109,7 @@ pub fn getSourceLocation(eb: ErrorBundle, index: SourceLocationIndex) SourceLoca
pub fn getNotes(eb: ErrorBundle, index: MessageIndex) []const MessageIndex {
const notes_len = eb.getErrorMessage(index).notes_len;
const start = @intFromEnum(index) + @typeInfo(ErrorMessage).Struct.fields.len;
- return @ptrCast([]const MessageIndex, eb.extra[start..][0..notes_len]);
+ return @as([]const MessageIndex, @ptrCast(eb.extra[start..][0..notes_len]));
}
pub fn getCompileLogOutput(eb: ErrorBundle) [:0]const u8 {
@@ -125,8 +125,8 @@ fn extraData(eb: ErrorBundle, comptime T: type, index: usize) struct { data: T,
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => eb.extra[i],
- MessageIndex => @enumFromInt(MessageIndex, eb.extra[i]),
- SourceLocationIndex => @enumFromInt(SourceLocationIndex, eb.extra[i]),
+ MessageIndex => @as(MessageIndex, @enumFromInt(eb.extra[i])),
+ SourceLocationIndex => @as(SourceLocationIndex, @enumFromInt(eb.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
@@ -202,7 +202,7 @@ fn renderErrorMessageToWriter(
try counting_stderr.writeAll(": ");
// This is the length of the part before the error message:
// e.g. "file.zig:4:5: error: "
- const prefix_len = @intCast(usize, counting_stderr.context.bytes_written);
+ const prefix_len = @as(usize, @intCast(counting_stderr.context.bytes_written));
try ttyconf.setColor(stderr, .reset);
try ttyconf.setColor(stderr, .bold);
if (err_msg.count == 1) {
@@ -357,7 +357,7 @@ pub const Wip = struct {
}
const compile_log_str_index = if (compile_log_text.len == 0) 0 else str: {
- const str = @intCast(u32, wip.string_bytes.items.len);
+ const str = @as(u32, @intCast(wip.string_bytes.items.len));
try wip.string_bytes.ensureUnusedCapacity(gpa, compile_log_text.len + 1);
wip.string_bytes.appendSliceAssumeCapacity(compile_log_text);
wip.string_bytes.appendAssumeCapacity(0);
@@ -365,11 +365,11 @@ pub const Wip = struct {
};
wip.setExtra(0, ErrorMessageList{
- .len = @intCast(u32, wip.root_list.items.len),
- .start = @intCast(u32, wip.extra.items.len),
+ .len = @as(u32, @intCast(wip.root_list.items.len)),
+ .start = @as(u32, @intCast(wip.extra.items.len)),
.compile_log_text = compile_log_str_index,
});
- try wip.extra.appendSlice(gpa, @ptrCast([]const u32, wip.root_list.items));
+ try wip.extra.appendSlice(gpa, @as([]const u32, @ptrCast(wip.root_list.items)));
wip.root_list.clearAndFree(gpa);
return .{
.string_bytes = try wip.string_bytes.toOwnedSlice(gpa),
@@ -386,7 +386,7 @@ pub const Wip = struct {
pub fn addString(wip: *Wip, s: []const u8) !u32 {
const gpa = wip.gpa;
- const index = @intCast(u32, wip.string_bytes.items.len);
+ const index = @as(u32, @intCast(wip.string_bytes.items.len));
try wip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1);
wip.string_bytes.appendSliceAssumeCapacity(s);
wip.string_bytes.appendAssumeCapacity(0);
@@ -395,7 +395,7 @@ pub const Wip = struct {
pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) !u32 {
const gpa = wip.gpa;
- const index = @intCast(u32, wip.string_bytes.items.len);
+ const index = @as(u32, @intCast(wip.string_bytes.items.len));
try wip.string_bytes.writer(gpa).print(fmt, args);
try wip.string_bytes.append(gpa, 0);
return index;
@@ -407,15 +407,15 @@ pub const Wip = struct {
}
pub fn addErrorMessage(wip: *Wip, em: ErrorMessage) !MessageIndex {
- return @enumFromInt(MessageIndex, try addExtra(wip, em));
+ return @as(MessageIndex, @enumFromInt(try addExtra(wip, em)));
}
pub fn addErrorMessageAssumeCapacity(wip: *Wip, em: ErrorMessage) MessageIndex {
- return @enumFromInt(MessageIndex, addExtraAssumeCapacity(wip, em));
+ return @as(MessageIndex, @enumFromInt(addExtraAssumeCapacity(wip, em)));
}
pub fn addSourceLocation(wip: *Wip, sl: SourceLocation) !SourceLocationIndex {
- return @enumFromInt(SourceLocationIndex, try addExtra(wip, sl));
+ return @as(SourceLocationIndex, @enumFromInt(try addExtra(wip, sl)));
}
pub fn addReferenceTrace(wip: *Wip, rt: ReferenceTrace) !void {
@@ -431,7 +431,7 @@ pub const Wip = struct {
const other_list = other.getMessages();
// The ensureUnusedCapacity call above guarantees this.
- const notes_start = wip.reserveNotes(@intCast(u32, other_list.len)) catch unreachable;
+ const notes_start = wip.reserveNotes(@as(u32, @intCast(other_list.len))) catch unreachable;
for (notes_start.., other_list) |note, message| {
wip.extra.items[note] = @intFromEnum(wip.addOtherMessage(other, message) catch unreachable);
}
@@ -441,7 +441,7 @@ pub const Wip = struct {
try wip.extra.ensureUnusedCapacity(wip.gpa, notes_len +
notes_len * @typeInfo(ErrorBundle.ErrorMessage).Struct.fields.len);
wip.extra.items.len += notes_len;
- return @intCast(u32, wip.extra.items.len - notes_len);
+ return @as(u32, @intCast(wip.extra.items.len - notes_len));
}
fn addOtherMessage(wip: *Wip, other: ErrorBundle, msg_index: MessageIndex) !MessageIndex {
@@ -493,7 +493,7 @@ pub const Wip = struct {
fn addExtraAssumeCapacity(wip: *Wip, extra: anytype) u32 {
const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
- const result = @intCast(u32, wip.extra.items.len);
+ const result = @as(u32, @intCast(wip.extra.items.len));
wip.extra.items.len += fields.len;
setExtra(wip, result, extra);
return result;
diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig
index f3eec86acc..14019571b1 100644
--- a/lib/std/zig/Parse.zig
+++ b/lib/std/zig/Parse.zig
@@ -36,20 +36,20 @@ const Members = struct {
fn listToSpan(p: *Parse, list: []const Node.Index) !Node.SubRange {
try p.extra_data.appendSlice(p.gpa, list);
return Node.SubRange{
- .start = @intCast(Node.Index, p.extra_data.items.len - list.len),
- .end = @intCast(Node.Index, p.extra_data.items.len),
+ .start = @as(Node.Index, @intCast(p.extra_data.items.len - list.len)),
+ .end = @as(Node.Index, @intCast(p.extra_data.items.len)),
};
}
fn addNode(p: *Parse, elem: Ast.Node) Allocator.Error!Node.Index {
- const result = @intCast(Node.Index, p.nodes.len);
+ const result = @as(Node.Index, @intCast(p.nodes.len));
try p.nodes.append(p.gpa, elem);
return result;
}
fn setNode(p: *Parse, i: usize, elem: Ast.Node) Node.Index {
p.nodes.set(i, elem);
- return @intCast(Node.Index, i);
+ return @as(Node.Index, @intCast(i));
}
fn reserveNode(p: *Parse, tag: Ast.Node.Tag) !usize {
@@ -72,7 +72,7 @@ fn unreserveNode(p: *Parse, node_index: usize) void {
fn addExtra(p: *Parse, extra: anytype) Allocator.Error!Node.Index {
const fields = std.meta.fields(@TypeOf(extra));
try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
- const result = @intCast(u32, p.extra_data.items.len);
+ const result = @as(u32, @intCast(p.extra_data.items.len));
inline for (fields) |field| {
comptime assert(field.type == Node.Index);
p.extra_data.appendAssumeCapacity(@field(extra, field.name));
@@ -1202,10 +1202,10 @@ fn parseForStatement(p: *Parse) !Node.Index {
.main_token = for_token,
.data = .{
.lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
- .rhs = @bitCast(u32, Node.For{
- .inputs = @intCast(u31, inputs),
+ .rhs = @as(u32, @bitCast(Node.For{
+ .inputs = @as(u31, @intCast(inputs)),
.has_else = has_else,
- }),
+ })),
},
});
}
@@ -1486,7 +1486,7 @@ fn parseExprPrecedence(p: *Parse, min_prec: i32) Error!Node.Index {
while (true) {
const tok_tag = p.token_tags[p.tok_i];
- const info = operTable[@intCast(usize, @intFromEnum(tok_tag))];
+ const info = operTable[@as(usize, @intCast(@intFromEnum(tok_tag)))];
if (info.prec < min_prec) {
break;
}
@@ -2087,10 +2087,10 @@ fn parseForExpr(p: *Parse) !Node.Index {
.main_token = for_token,
.data = .{
.lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
- .rhs = @bitCast(u32, Node.For{
- .inputs = @intCast(u31, inputs),
+ .rhs = @as(u32, @bitCast(Node.For{
+ .inputs = @as(u31, @intCast(inputs)),
.has_else = has_else,
- }),
+ })),
},
});
}
@@ -2862,10 +2862,10 @@ fn parseForTypeExpr(p: *Parse) !Node.Index {
.main_token = for_token,
.data = .{
.lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
- .rhs = @bitCast(u32, Node.For{
- .inputs = @intCast(u31, inputs),
+ .rhs = @as(u32, @bitCast(Node.For{
+ .inputs = @as(u31, @intCast(inputs)),
.has_else = has_else,
- }),
+ })),
},
});
}
diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig
index f4f979f012..468219f8f0 100644
--- a/lib/std/zig/Server.zig
+++ b/lib/std/zig/Server.zig
@@ -132,7 +132,7 @@ pub fn receiveMessage(s: *Server) !InMessage.Header {
pub fn receiveBody_u32(s: *Server) !u32 {
const fifo = &s.receive_fifo;
const buf = fifo.readableSlice(0);
- const result = @ptrCast(*align(1) const u32, buf[0..4]).*;
+ const result = @as(*align(1) const u32, @ptrCast(buf[0..4])).*;
fifo.discard(4);
return bswap(result);
}
@@ -140,7 +140,7 @@ pub fn receiveBody_u32(s: *Server) !u32 {
pub fn serveStringMessage(s: *Server, tag: OutMessage.Tag, msg: []const u8) !void {
return s.serveMessage(.{
.tag = tag,
- .bytes_len = @intCast(u32, msg.len),
+ .bytes_len = @as(u32, @intCast(msg.len)),
}, &.{msg});
}
@@ -152,7 +152,7 @@ pub fn serveMessage(
var iovecs: [10]std.os.iovec_const = undefined;
const header_le = bswap(header);
iovecs[0] = .{
- .iov_base = @ptrCast([*]const u8, &header_le),
+ .iov_base = @as([*]const u8, @ptrCast(&header_le)),
.iov_len = @sizeOf(OutMessage.Header),
};
for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| {
@@ -171,7 +171,7 @@ pub fn serveEmitBinPath(
) !void {
try s.serveMessage(.{
.tag = .emit_bin_path,
- .bytes_len = @intCast(u32, fs_path.len + @sizeOf(OutMessage.EmitBinPath)),
+ .bytes_len = @as(u32, @intCast(fs_path.len + @sizeOf(OutMessage.EmitBinPath))),
}, &.{
std.mem.asBytes(&header),
fs_path,
@@ -185,7 +185,7 @@ pub fn serveTestResults(
const msg_le = bswap(msg);
try s.serveMessage(.{
.tag = .test_results,
- .bytes_len = @intCast(u32, @sizeOf(OutMessage.TestResults)),
+ .bytes_len = @as(u32, @intCast(@sizeOf(OutMessage.TestResults))),
}, &.{
std.mem.asBytes(&msg_le),
});
@@ -193,14 +193,14 @@ pub fn serveTestResults(
pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
const eb_hdr: OutMessage.ErrorBundle = .{
- .extra_len = @intCast(u32, error_bundle.extra.len),
- .string_bytes_len = @intCast(u32, error_bundle.string_bytes.len),
+ .extra_len = @as(u32, @intCast(error_bundle.extra.len)),
+ .string_bytes_len = @as(u32, @intCast(error_bundle.string_bytes.len)),
};
const bytes_len = @sizeOf(OutMessage.ErrorBundle) +
4 * error_bundle.extra.len + error_bundle.string_bytes.len;
try s.serveMessage(.{
.tag = .error_bundle,
- .bytes_len = @intCast(u32, bytes_len),
+ .bytes_len = @as(u32, @intCast(bytes_len)),
}, &.{
std.mem.asBytes(&eb_hdr),
// TODO: implement @ptrCast between slices changing the length
@@ -218,8 +218,8 @@ pub const TestMetadata = struct {
pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
const header: OutMessage.TestMetadata = .{
- .tests_len = bswap(@intCast(u32, test_metadata.names.len)),
- .string_bytes_len = bswap(@intCast(u32, test_metadata.string_bytes.len)),
+ .tests_len = bswap(@as(u32, @intCast(test_metadata.names.len))),
+ .string_bytes_len = bswap(@as(u32, @intCast(test_metadata.string_bytes.len))),
};
const bytes_len = @sizeOf(OutMessage.TestMetadata) +
3 * 4 * test_metadata.names.len + test_metadata.string_bytes.len;
@@ -237,7 +237,7 @@ pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
return s.serveMessage(.{
.tag = .test_metadata,
- .bytes_len = @intCast(u32, bytes_len),
+ .bytes_len = @as(u32, @intCast(bytes_len)),
}, &.{
std.mem.asBytes(&header),
// TODO: implement @ptrCast between slices changing the length
@@ -253,7 +253,7 @@ fn bswap(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
switch (@typeInfo(T)) {
- .Enum => return @enumFromInt(T, @byteSwap(@intFromEnum(x))),
+ .Enum => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))),
.Int => return @byteSwap(x),
.Struct => |info| switch (info.layout) {
.Extern => {
@@ -265,7 +265,7 @@ fn bswap(x: anytype) @TypeOf(x) {
},
.Packed => {
const I = info.backing_integer.?;
- return @bitCast(T, @byteSwap(@bitCast(I, x)));
+ return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x)))));
},
.Auto => @compileError("auto layout struct"),
},
@@ -286,7 +286,7 @@ fn bswap_and_workaround_u32(bytes_ptr: *const [4]u8) u32 {
/// workaround for https://github.com/ziglang/zig/issues/14904
fn bswap_and_workaround_tag(bytes_ptr: *const [4]u8) InMessage.Tag {
const int = std.mem.readIntLittle(u32, bytes_ptr);
- return @enumFromInt(InMessage.Tag, int);
+ return @as(InMessage.Tag, @enumFromInt(int));
}
const OutMessage = std.zig.Server.Message;
diff --git a/lib/std/zig/c_builtins.zig b/lib/std/zig/c_builtins.zig
index de9ac95600..7f0414c96f 100644
--- a/lib/std/zig/c_builtins.zig
+++ b/lib/std/zig/c_builtins.zig
@@ -20,19 +20,19 @@ pub inline fn __builtin_signbitf(val: f32) c_int {
pub inline fn __builtin_popcount(val: c_uint) c_int {
// popcount of a c_uint will never exceed the capacity of a c_int
@setRuntimeSafety(false);
- return @bitCast(c_int, @as(c_uint, @popCount(val)));
+ return @as(c_int, @bitCast(@as(c_uint, @popCount(val))));
}
pub inline fn __builtin_ctz(val: c_uint) c_int {
// Returns the number of trailing 0-bits in val, starting at the least significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
- return @bitCast(c_int, @as(c_uint, @ctz(val)));
+ return @as(c_int, @bitCast(@as(c_uint, @ctz(val))));
}
pub inline fn __builtin_clz(val: c_uint) c_int {
// Returns the number of leading 0-bits in x, starting at the most significant bit position.
// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
@setRuntimeSafety(false);
- return @bitCast(c_int, @as(c_uint, @clz(val)));
+ return @as(c_int, @bitCast(@as(c_uint, @clz(val))));
}
pub inline fn __builtin_sqrt(val: f64) f64 {
@@ -135,7 +135,7 @@ pub inline fn __builtin_object_size(ptr: ?*const anyopaque, ty: c_int) usize {
// If it is not possible to determine which objects ptr points to at compile time,
// __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
// for type 2 or 3.
- if (ty == 0 or ty == 1) return @bitCast(usize, -@as(isize, 1));
+ if (ty == 0 or ty == 1) return @as(usize, @bitCast(-@as(isize, 1)));
if (ty == 2 or ty == 3) return 0;
unreachable;
}
@@ -151,8 +151,8 @@ pub inline fn __builtin___memset_chk(
}
pub inline fn __builtin_memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque {
- const dst_cast = @ptrCast([*c]u8, dst);
- @memset(dst_cast[0..len], @bitCast(u8, @truncate(i8, val)));
+ const dst_cast = @as([*c]u8, @ptrCast(dst));
+ @memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val)))));
return dst;
}
@@ -172,8 +172,8 @@ pub inline fn __builtin_memcpy(
len: usize,
) ?*anyopaque {
if (len > 0) @memcpy(
- @ptrCast([*]u8, dst.?)[0..len],
- @ptrCast([*]const u8, src.?),
+ @as([*]u8, @ptrCast(dst.?))[0..len],
+ @as([*]const u8, @ptrCast(src.?)),
);
return dst;
}
@@ -202,8 +202,8 @@ pub inline fn __builtin_expect(expr: c_long, c: c_long) c_long {
/// If tagp is empty, the function returns a NaN whose significand is zero.
pub inline fn __builtin_nanf(tagp: []const u8) f32 {
const parsed = std.fmt.parseUnsigned(c_ulong, tagp, 0) catch 0;
- const bits = @truncate(u23, parsed); // single-precision float trailing significand is 23 bits
- return @bitCast(f32, @as(u32, bits) | std.math.qnan_u32);
+ const bits = @as(u23, @truncate(parsed)); // single-precision float trailing significand is 23 bits
+ return @as(f32, @bitCast(@as(u32, bits) | std.math.qnan_u32));
}
pub inline fn __builtin_huge_valf() f32 {
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index dafef5e63b..2e7bb61df6 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -42,9 +42,9 @@ pub fn cast(comptime DestType: type, target: anytype) DestType {
},
.Float => {
switch (@typeInfo(SourceType)) {
- .Int => return @floatFromInt(DestType, target),
- .Float => return @floatCast(DestType, target),
- .Bool => return @floatFromInt(DestType, @intFromBool(target)),
+ .Int => return @as(DestType, @floatFromInt(target)),
+ .Float => return @as(DestType, @floatCast(target)),
+ .Bool => return @as(DestType, @floatFromInt(@intFromBool(target))),
else => {},
}
},
@@ -65,36 +65,25 @@ fn castInt(comptime DestType: type, target: anytype) DestType {
const source = @typeInfo(@TypeOf(target)).Int;
if (dest.bits < source.bits)
- return @bitCast(DestType, @truncate(std.meta.Int(source.signedness, dest.bits), target))
+ return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), @truncate(target))))
else
- return @bitCast(DestType, @as(std.meta.Int(source.signedness, dest.bits), target));
+ return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), target)));
}
fn castPtr(comptime DestType: type, target: anytype) DestType {
- const dest = ptrInfo(DestType);
- const source = ptrInfo(@TypeOf(target));
-
- if (source.is_const and !dest.is_const)
- return @constCast(target)
- else if (source.is_volatile and !dest.is_volatile)
- return @volatileCast(target)
- else if (@typeInfo(dest.child) == .Opaque)
- // dest.alignment would error out
- return @ptrCast(DestType, target)
- else
- return @ptrCast(DestType, @alignCast(dest.alignment, target));
+ return @constCast(@volatileCast(@alignCast(@ptrCast(target))));
}
fn castToPtr(comptime DestType: type, comptime SourceType: type, target: anytype) DestType {
switch (@typeInfo(SourceType)) {
.Int => {
- return @ptrFromInt(DestType, castInt(usize, target));
+ return @as(DestType, @ptrFromInt(castInt(usize, target)));
},
.ComptimeInt => {
if (target < 0)
- return @ptrFromInt(DestType, @bitCast(usize, @intCast(isize, target)))
+ return @as(DestType, @ptrFromInt(@as(usize, @bitCast(@as(isize, @intCast(target))))))
else
- return @ptrFromInt(DestType, @intCast(usize, target));
+ return @as(DestType, @ptrFromInt(@as(usize, @intCast(target))));
},
.Pointer => {
return castPtr(DestType, target);
@@ -120,34 +109,34 @@ fn ptrInfo(comptime PtrType: type) std.builtin.Type.Pointer {
test "cast" {
var i = @as(i64, 10);
- try testing.expect(cast(*u8, 16) == @ptrFromInt(*u8, 16));
+ try testing.expect(cast(*u8, 16) == @as(*u8, @ptrFromInt(16)));
try testing.expect(cast(*u64, &i).* == @as(u64, 10));
try testing.expect(cast(*i64, @as(?*align(1) i64, &i)) == &i);
- try testing.expect(cast(?*u8, 2) == @ptrFromInt(*u8, 2));
+ try testing.expect(cast(?*u8, 2) == @as(*u8, @ptrFromInt(2)));
try testing.expect(cast(?*i64, @as(*align(1) i64, &i)) == &i);
try testing.expect(cast(?*i64, @as(?*align(1) i64, &i)) == &i);
- try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(*u32, 4)));
- try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(?*u32, 4)));
+ try testing.expectEqual(@as(u32, 4), cast(u32, @as(*u32, @ptrFromInt(4))));
+ try testing.expectEqual(@as(u32, 4), cast(u32, @as(?*u32, @ptrFromInt(4))));
try testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10)));
- try testing.expectEqual(@bitCast(i32, @as(u32, 0x8000_0000)), cast(i32, @as(u32, 0x8000_0000)));
+ try testing.expectEqual(@as(i32, @bitCast(@as(u32, 0x8000_0000))), cast(i32, @as(u32, 0x8000_0000)));
- try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*const u8, 2)));
- try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*volatile u8, 2)));
+ try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*const u8, @ptrFromInt(2))));
+ try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*volatile u8, @ptrFromInt(2))));
- try testing.expectEqual(@ptrFromInt(?*anyopaque, 2), cast(?*anyopaque, @ptrFromInt(*u8, 2)));
+ try testing.expectEqual(@as(?*anyopaque, @ptrFromInt(2)), cast(?*anyopaque, @as(*u8, @ptrFromInt(2))));
var foo: c_int = -1;
- try testing.expect(cast(*anyopaque, -1) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))));
- try testing.expect(cast(*anyopaque, foo) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))));
- try testing.expect(cast(?*anyopaque, -1) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1))));
- try testing.expect(cast(?*anyopaque, foo) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1))));
+ try testing.expect(cast(*anyopaque, -1) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
+ try testing.expect(cast(*anyopaque, foo) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
+ try testing.expect(cast(?*anyopaque, -1) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
+ try testing.expect(cast(?*anyopaque, foo) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
const FnPtr = ?*align(1) const fn (*anyopaque) void;
- try testing.expect(cast(FnPtr, 0) == @ptrFromInt(FnPtr, @as(usize, 0)));
- try testing.expect(cast(FnPtr, foo) == @ptrFromInt(FnPtr, @bitCast(usize, @as(isize, -1))));
+ try testing.expect(cast(FnPtr, 0) == @as(FnPtr, @ptrFromInt(@as(usize, 0))));
+ try testing.expect(cast(FnPtr, foo) == @as(FnPtr, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
}
/// Given a value returns its size as C's sizeof operator would.
@@ -192,7 +181,7 @@ pub fn sizeof(target: anytype) usize {
const array_info = @typeInfo(ptr.child).Array;
if ((array_info.child == u8 or array_info.child == u16) and
array_info.sentinel != null and
- @ptrCast(*align(1) const array_info.child, array_info.sentinel.?).* == 0)
+ @as(*align(1) const array_info.child, @ptrCast(array_info.sentinel.?)).* == 0)
{
// length of the string plus one for the null terminator.
return (array_info.len + 1) * @sizeOf(array_info.child);
@@ -325,10 +314,10 @@ test "promoteIntLiteral" {
pub fn shuffleVectorIndex(comptime this_index: c_int, comptime source_vector_len: usize) i32 {
if (this_index <= 0) return 0;
- const positive_index = @intCast(usize, this_index);
- if (positive_index < source_vector_len) return @intCast(i32, this_index);
+ const positive_index = @as(usize, @intCast(this_index));
+ if (positive_index < source_vector_len) return @as(i32, @intCast(this_index));
const b_index = positive_index - source_vector_len;
- return ~@intCast(i32, b_index);
+ return ~@as(i32, @intCast(b_index));
}
test "shuffleVectorIndex" {
diff --git a/lib/std/zig/number_literal.zig b/lib/std/zig/number_literal.zig
index 66596b3b15..aba588a3ea 100644
--- a/lib/std/zig/number_literal.zig
+++ b/lib/std/zig/number_literal.zig
@@ -141,7 +141,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result {
'a'...'z' => c - 'a' + 10,
else => return .{ .failure = .{ .invalid_character = i } },
};
- if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @enumFromInt(Base, base) } } };
+ if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @as(Base, @enumFromInt(base)) } } };
if (exponent and digit >= 10) return .{ .failure = .{ .invalid_digit_exponent = i } };
underscore = false;
special = 0;
@@ -159,7 +159,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result {
if (underscore) return .{ .failure = .{ .trailing_underscore = bytes.len - 1 } };
if (special != 0) return .{ .failure = .{ .trailing_special = bytes.len - 1 } };
- if (float) return .{ .float = @enumFromInt(FloatBase, base) };
- if (overflow) return .{ .big_int = @enumFromInt(Base, base) };
+ if (float) return .{ .float = @as(FloatBase, @enumFromInt(base)) };
+ if (overflow) return .{ .big_int = @as(Base, @enumFromInt(base)) };
return .{ .int = x };
}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index e41e9157e6..ca3e99b164 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -166,10 +166,10 @@ test "zig fmt: respect line breaks after var declarations" {
\\ lookup_tables[1][p[6]] ^
\\ lookup_tables[2][p[5]] ^
\\ lookup_tables[3][p[4]] ^
- \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
- \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
- \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
- \\ lookup_tables[7][@truncate(u8, self.crc >> 0)];
+ \\ lookup_tables[4][@as(u8, self.crc >> 24)] ^
+ \\ lookup_tables[5][@as(u8, self.crc >> 16)] ^
+ \\ lookup_tables[6][@as(u8, self.crc >> 8)] ^
+ \\ lookup_tables[7][@as(u8, self.crc >> 0)];
\\
);
}
@@ -1108,7 +1108,7 @@ test "zig fmt: async function" {
\\ handleRequestFn: fn (*Server, *const std.net.Address, File) callconv(.Async) void,
\\};
\\test "hi" {
- \\ var ptr = @ptrCast(fn (i32) callconv(.Async) void, other);
+ \\ var ptr: fn (i32) callconv(.Async) void = @ptrCast(other);
\\}
\\
);
@@ -1825,10 +1825,10 @@ test "zig fmt: respect line breaks after infix operators" {
\\ lookup_tables[1][p[6]] ^
\\ lookup_tables[2][p[5]] ^
\\ lookup_tables[3][p[4]] ^
- \\ lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
- \\ lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
- \\ lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
- \\ lookup_tables[7][@truncate(u8, self.crc >> 0)];
+ \\ lookup_tables[4][@as(u8, self.crc >> 24)] ^
+ \\ lookup_tables[5][@as(u8, self.crc >> 16)] ^
+ \\ lookup_tables[6][@as(u8, self.crc >> 8)] ^
+ \\ lookup_tables[7][@as(u8, self.crc >> 0)];
\\}
\\
);
@@ -4814,7 +4814,7 @@ test "zig fmt: use of comments and multiline string literals may force the param
\\ \\ unknown-length pointers and C pointers cannot be hashed deeply.
\\ \\ Consider providing your own hash function.
\\ );
- \\ return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
+ \\ return @intCast(doMemCheckClientRequestExpr(0, // default return
\\ .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
\\}
\\
diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig
index df60978510..a53dee7fa8 100644
--- a/lib/std/zig/perf_test.zig
+++ b/lib/std/zig/perf_test.zig
@@ -18,9 +18,9 @@ pub fn main() !void {
}
const end = timer.read();
memory_used /= iterations;
- const elapsed_s = @floatFromInt(f64, end - start) / std.time.ns_per_s;
- const bytes_per_sec_float = @floatFromInt(f64, source.len * iterations) / elapsed_s;
- const bytes_per_sec = @intFromFloat(u64, @floor(bytes_per_sec_float));
+ const elapsed_s = @as(f64, @floatFromInt(end - start)) / std.time.ns_per_s;
+ const bytes_per_sec_float = @as(f64, @floatFromInt(source.len * iterations)) / elapsed_s;
+ const bytes_per_sec = @as(u64, @intFromFloat(@floor(bytes_per_sec_float)));
var stdout_file = std.io.getStdOut();
const stdout = stdout_file.writer();
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 0c93230d46..2cf7bc9716 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -1390,14 +1390,51 @@ fn renderBuiltinCall(
) Error!void {
const token_tags = tree.tokens.items(.tag);
- // TODO remove before release of 0.11.0
+ // TODO remove before release of 0.12.0
const slice = tree.tokenSlice(builtin_token);
+ const rewrite_two_param_cast = params.len == 2 and for ([_][]const u8{
+ "@bitCast",
+ "@errSetCast",
+ "@floatCast",
+ "@intCast",
+ "@ptrCast",
+ "@intFromFloat",
+ "@floatToInt",
+ "@enumFromInt",
+ "@intToEnum",
+ "@floatFromInt",
+ "@intToFloat",
+ "@ptrFromInt",
+ "@intToPtr",
+ "@truncate",
+ }) |name| {
+ if (mem.eql(u8, slice, name)) break true;
+ } else false;
+
+ if (rewrite_two_param_cast) {
+ const after_last_param_token = tree.lastToken(params[1]) + 1;
+ if (token_tags[after_last_param_token] != .comma) {
+ // Render all on one line, no trailing comma.
+ try ais.writer().writeAll("@as");
+ try renderToken(ais, tree, builtin_token + 1, .none); // (
+ try renderExpression(gpa, ais, tree, params[0], .comma_space);
+ } else {
+ // Render one param per line.
+ try ais.writer().writeAll("@as");
+ ais.pushIndent();
+ try renderToken(ais, tree, builtin_token + 1, .newline); // (
+ try renderExpression(gpa, ais, tree, params[0], .comma);
+ }
+ }
+ // Corresponding logic below builtin name rewrite below
+
+ // TODO remove before release of 0.11.0
if (mem.eql(u8, slice, "@maximum")) {
try ais.writer().writeAll("@max");
} else if (mem.eql(u8, slice, "@minimum")) {
try ais.writer().writeAll("@min");
}
- //
+ // TODO remove before release of 0.12.0
else if (mem.eql(u8, slice, "@boolToInt")) {
try ais.writer().writeAll("@intFromBool");
} else if (mem.eql(u8, slice, "@enumToInt")) {
@@ -1420,6 +1457,23 @@ fn renderBuiltinCall(
try renderToken(ais, tree, builtin_token, .none); // @name
}
+ if (rewrite_two_param_cast) {
+ // Matches with corresponding logic above builtin name rewrite
+ const after_last_param_token = tree.lastToken(params[1]) + 1;
+ try ais.writer().writeAll("(");
+ try renderExpression(gpa, ais, tree, params[1], .none);
+ try ais.writer().writeAll(")");
+ if (token_tags[after_last_param_token] != .comma) {
+ // Render all on one line, no trailing comma.
+ return renderToken(ais, tree, after_last_param_token, space); // )
+ } else {
+ // Render one param per line.
+ ais.popIndent();
+ try renderToken(ais, tree, after_last_param_token, .newline); // ,
+ return renderToken(ais, tree, after_last_param_token + 1, space); // )
+ }
+ }
+
if (params.len == 0) {
try renderToken(ais, tree, builtin_token + 1, .none); // (
return renderToken(ais, tree, builtin_token + 2, space); // )
@@ -2665,7 +2719,7 @@ fn renderIdentifier(ais: *Ais, tree: Ast, token_index: Ast.TokenIndex, space: Sp
while (contents_i < contents.len and buf_i < longest_keyword_or_primitive_len) {
if (contents[contents_i] == '\\') {
const res = std.zig.string_literal.parseEscapeSequence(contents, &contents_i).success;
- buf[buf_i] = @intCast(u8, res);
+ buf[buf_i] = @as(u8, @intCast(res));
buf_i += 1;
} else {
buf[buf_i] = contents[contents_i];
@@ -2719,7 +2773,7 @@ fn renderIdentifierContents(writer: anytype, bytes: []const u8) !void {
switch (res) {
.success => |codepoint| {
if (codepoint <= 0x7f) {
- const buf = [1]u8{@intCast(u8, codepoint)};
+ const buf = [1]u8{@as(u8, @intCast(codepoint))};
try std.fmt.format(writer, "{}", .{std.zig.fmtEscapes(&buf)});
} else {
try writer.writeAll(escape_sequence);
diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig
index 4859c379a0..53b1ab7ca8 100644
--- a/lib/std/zig/string_literal.zig
+++ b/lib/std/zig/string_literal.zig
@@ -142,7 +142,7 @@ pub fn parseEscapeSequence(slice: []const u8, offset: *usize) ParsedCharLiteral
return .{ .failure = .{ .expected_rbrace = i } };
}
offset.* = i;
- return .{ .success = @intCast(u21, value) };
+ return .{ .success = @as(u21, @intCast(value)) };
},
else => return .{ .failure = .{ .invalid_escape_character = offset.* - 1 } },
}
@@ -253,7 +253,7 @@ pub fn parseWrite(writer: anytype, bytes: []const u8) error{OutOfMemory}!Result
};
try writer.writeAll(buf[0..len]);
} else {
- try writer.writeByte(@intCast(u8, codepoint));
+ try writer.writeByte(@as(u8, @intCast(codepoint)));
}
},
.failure => |err| return Result{ .failure = err },
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index 29ef752d7a..f69f1e1b1e 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -479,8 +479,8 @@ fn glibcVerFromRPath(rpath: []const u8) !std.SemanticVersion {
fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined;
_ = try preadMin(file, &hdr_buf, 0, hdr_buf.len);
- const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf);
- const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf);
+ const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf));
+ const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf));
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
elf.ELFDATA2LSB => .Little,
@@ -503,8 +503,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
if (sh_buf.len < shentsize) return error.InvalidElfFile;
_ = try preadMin(file, &sh_buf, str_section_off, shentsize);
- const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf));
- const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf));
+ const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf));
+ const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf));
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
@@ -529,14 +529,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
shoff += shentsize;
sh_buf_i += shentsize;
}) {
- const sh32 = @ptrCast(
- *elf.Elf32_Shdr,
- @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]),
- );
- const sh64 = @ptrCast(
- *elf.Elf64_Shdr,
- @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]),
- );
+ const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
+ const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name);
const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0);
if (mem.eql(u8, sh_name, ".dynstr")) {
@@ -558,7 +552,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
var buf: [80000]u8 = undefined;
if (buf.len < dynstr.size) return error.InvalidGnuLibCVersion;
- const dynstr_size = @intCast(usize, dynstr.size);
+ const dynstr_size = @as(usize, @intCast(dynstr.size));
const dynstr_bytes = buf[0..dynstr_size];
_ = try preadMin(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len);
var it = mem.splitScalar(u8, dynstr_bytes, 0);
@@ -621,8 +615,8 @@ pub fn abiAndDynamicLinkerFromFile(
) AbiAndDynamicLinkerFromFileError!NativeTargetInfo {
var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined;
_ = try preadMin(file, &hdr_buf, 0, hdr_buf.len);
- const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf);
- const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf);
+ const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf));
+ const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf));
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
elf.ELFDATA2LSB => .Little,
@@ -668,21 +662,21 @@ pub fn abiAndDynamicLinkerFromFile(
phoff += phentsize;
ph_buf_i += phentsize;
}) {
- const ph32 = @ptrCast(*elf.Elf32_Phdr, @alignCast(@alignOf(elf.Elf32_Phdr), &ph_buf[ph_buf_i]));
- const ph64 = @ptrCast(*elf.Elf64_Phdr, @alignCast(@alignOf(elf.Elf64_Phdr), &ph_buf[ph_buf_i]));
+ const ph32: *elf.Elf32_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i]));
+ const ph64: *elf.Elf64_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i]));
const p_type = elfInt(is_64, need_bswap, ph32.p_type, ph64.p_type);
switch (p_type) {
elf.PT_INTERP => if (look_for_ld) {
const p_offset = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset);
const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz);
if (p_filesz > result.dynamic_linker.buffer.len) return error.NameTooLong;
- const filesz = @intCast(usize, p_filesz);
+ const filesz = @as(usize, @intCast(p_filesz));
_ = try preadMin(file, result.dynamic_linker.buffer[0..filesz], p_offset, filesz);
// PT_INTERP includes a null byte in filesz.
const len = filesz - 1;
// dynamic_linker.max_byte is "max", not "len".
// We know it will fit in u8 because we check against dynamic_linker.buffer.len above.
- result.dynamic_linker.max_byte = @intCast(u8, len - 1);
+ result.dynamic_linker.max_byte = @as(u8, @intCast(len - 1));
// Use it to determine ABI.
const full_ld_path = result.dynamic_linker.buffer[0..len];
@@ -720,14 +714,8 @@ pub fn abiAndDynamicLinkerFromFile(
dyn_off += dyn_size;
dyn_buf_i += dyn_size;
}) {
- const dyn32 = @ptrCast(
- *elf.Elf32_Dyn,
- @alignCast(@alignOf(elf.Elf32_Dyn), &dyn_buf[dyn_buf_i]),
- );
- const dyn64 = @ptrCast(
- *elf.Elf64_Dyn,
- @alignCast(@alignOf(elf.Elf64_Dyn), &dyn_buf[dyn_buf_i]),
- );
+ const dyn32: *elf.Elf32_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i]));
+ const dyn64: *elf.Elf64_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i]));
const tag = elfInt(is_64, need_bswap, dyn32.d_tag, dyn64.d_tag);
const val = elfInt(is_64, need_bswap, dyn32.d_val, dyn64.d_val);
if (tag == elf.DT_RUNPATH) {
@@ -755,8 +743,8 @@ pub fn abiAndDynamicLinkerFromFile(
if (sh_buf.len < shentsize) return error.InvalidElfFile;
_ = try preadMin(file, &sh_buf, str_section_off, shentsize);
- const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf));
- const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf));
+ const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf));
+ const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf));
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
@@ -782,14 +770,8 @@ pub fn abiAndDynamicLinkerFromFile(
shoff += shentsize;
sh_buf_i += shentsize;
}) {
- const sh32 = @ptrCast(
- *elf.Elf32_Shdr,
- @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]),
- );
- const sh64 = @ptrCast(
- *elf.Elf64_Shdr,
- @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]),
- );
+ const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
+ const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name);
const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0);
if (mem.eql(u8, sh_name, ".dynstr")) {
diff --git a/lib/std/zig/system/arm.zig b/lib/std/zig/system/arm.zig
index da05c8c90d..7d466fc984 100644
--- a/lib/std/zig/system/arm.zig
+++ b/lib/std/zig/system/arm.zig
@@ -141,7 +141,7 @@ pub const aarch64 = struct {
}
inline fn bitField(input: u64, offset: u6) u4 {
- return @truncate(u4, input >> offset);
+ return @as(u4, @truncate(input >> offset));
}
/// Input array should consist of readouts from 12 system registers such that:
@@ -176,23 +176,23 @@ pub const aarch64 = struct {
/// Takes readout of MIDR_EL1 register as input.
fn detectNativeCoreInfo(midr: u64) CoreInfo {
var info = CoreInfo{
- .implementer = @truncate(u8, midr >> 24),
- .part = @truncate(u12, midr >> 4),
+ .implementer = @as(u8, @truncate(midr >> 24)),
+ .part = @as(u12, @truncate(midr >> 4)),
};
blk: {
if (info.implementer == 0x41) {
// ARM Ltd.
- const special_bits = @truncate(u4, info.part >> 8);
+ const special_bits = @as(u4, @truncate(info.part >> 8));
if (special_bits == 0x0 or special_bits == 0x7) {
// TODO Variant and arch encoded differently.
break :blk;
}
}
- info.variant |= @intCast(u8, @truncate(u4, midr >> 20)) << 4;
- info.variant |= @truncate(u4, midr);
- info.architecture = @truncate(u4, midr >> 16);
+ info.variant |= @as(u8, @intCast(@as(u4, @truncate(midr >> 20)))) << 4;
+ info.variant |= @as(u4, @truncate(midr));
+ info.architecture = @as(u4, @truncate(midr >> 16));
}
return info;
diff --git a/lib/std/zig/system/windows.zig b/lib/std/zig/system/windows.zig
index c5c6f052ec..9c5b614c39 100644
--- a/lib/std/zig/system/windows.zig
+++ b/lib/std/zig/system/windows.zig
@@ -26,8 +26,8 @@ pub fn detectRuntimeVersion() WindowsVersion {
// `---` `` ``--> Sub-version (Starting from Windows 10 onwards)
// \ `--> Service pack (Always zero in the constants defined)
// `--> OS version (Major & minor)
- const os_ver: u16 = @intCast(u16, version_info.dwMajorVersion & 0xff) << 8 |
- @intCast(u16, version_info.dwMinorVersion & 0xff);
+ const os_ver: u16 = @as(u16, @intCast(version_info.dwMajorVersion & 0xff)) << 8 |
+ @as(u16, @intCast(version_info.dwMinorVersion & 0xff));
const sp_ver: u8 = 0;
const sub_ver: u8 = if (os_ver >= 0x0A00) subver: {
// There's no other way to obtain this info beside
@@ -38,12 +38,12 @@ pub fn detectRuntimeVersion() WindowsVersion {
if (version_info.dwBuildNumber >= build)
last_idx = i;
}
- break :subver @truncate(u8, last_idx);
+ break :subver @as(u8, @truncate(last_idx));
} else 0;
const version: u32 = @as(u32, os_ver) << 16 | @as(u16, sp_ver) << 8 | sub_ver;
- return @enumFromInt(WindowsVersion, version);
+ return @as(WindowsVersion, @enumFromInt(version));
}
// Technically, a registry value can be as long as 1MB. However, MS recommends storing
@@ -100,11 +100,11 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
REG.MULTI_SZ,
=> {
comptime assert(@sizeOf(std.os.windows.UNICODE_STRING) % 2 == 0);
- const unicode = @ptrCast(*std.os.windows.UNICODE_STRING, &tmp_bufs[i]);
+ const unicode = @as(*std.os.windows.UNICODE_STRING, @ptrCast(&tmp_bufs[i]));
unicode.* = .{
.Length = 0,
.MaximumLength = max_value_len - @sizeOf(std.os.windows.UNICODE_STRING),
- .Buffer = @ptrCast([*]u16, tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..]),
+ .Buffer = @as([*]u16, @ptrCast(tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..])),
};
break :blk unicode;
},
@@ -159,7 +159,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
REG.MULTI_SZ,
=> {
var buf = @field(args, field.name).value_buf;
- const entry = @ptrCast(*align(1) const std.os.windows.UNICODE_STRING, table[i + 1].EntryContext);
+ const entry = @as(*align(1) const std.os.windows.UNICODE_STRING, @ptrCast(table[i + 1].EntryContext));
const len = try std.unicode.utf16leToUtf8(buf, entry.Buffer[0 .. entry.Length / 2]);
buf[len] = 0;
},
@@ -168,7 +168,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
REG.DWORD_BIG_ENDIAN,
REG.QWORD,
=> {
- const entry = @ptrCast([*]align(1) const u8, table[i + 1].EntryContext);
+ const entry = @as([*]align(1) const u8, @ptrCast(table[i + 1].EntryContext));
switch (@field(args, field.name).value_type) {
REG.DWORD, REG.DWORD_BIG_ENDIAN => {
@memcpy(@field(args, field.name).value_buf[0..4], entry[0..4]);
@@ -254,18 +254,18 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
// CP 4039 -> ID_AA64MMFR1_EL1
// CP 403A -> ID_AA64MMFR2_EL1
getCpuInfoFromRegistry(i, .{
- .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[0]) },
- .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[1]) },
- .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[2]) },
- .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[3]) },
- .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[4]) },
- .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[5]) },
- .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[6]) },
- .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[7]) },
- .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[8]) },
- .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[9]) },
- .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[10]) },
- .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[11]) },
+ .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[0])) },
+ .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[1])) },
+ .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[2])) },
+ .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[3])) },
+ .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[4])) },
+ .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[5])) },
+ .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[6])) },
+ .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[7])) },
+ .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[8])) },
+ .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[9])) },
+ .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[10])) },
+ .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[11])) },
}) catch break :blk null;
cores[i] = @import("arm.zig").aarch64.detectNativeCpuAndFeatures(current_arch, registers) orelse
diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig
index 0d6a6d4fd8..72f65afb3a 100644
--- a/lib/std/zig/tokenizer.zig
+++ b/lib/std/zig/tokenizer.zig
@@ -1290,7 +1290,7 @@ pub const Tokenizer = struct {
// check utf8-encoded character.
const length = std.unicode.utf8ByteSequenceLength(c0) catch return 1;
if (self.index + length > self.buffer.len) {
- return @intCast(u3, self.buffer.len - self.index);
+ return @as(u3, @intCast(self.buffer.len - self.index));
}
const bytes = self.buffer[self.index .. self.index + length];
switch (length) {