aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Kelley <andrewrk@noreply.codeberg.org>2025-12-27 14:10:46 +0100
committerAndrew Kelley <andrewrk@noreply.codeberg.org>2025-12-27 14:10:46 +0100
commite55e6b5528bb2f01de242fcf32b172e244e98e74 (patch)
tree3a5eb3193d3d192c54ab0c2b7295a7f21861c27e
parentc3f2de5e519926eb0029062fe8e782a6f9df9c05 (diff)
parent60a1ba0a8f3517356fa2941462f002a7f580545b (diff)
downloadzig-e55e6b5528bb2f01de242fcf32b172e244e98e74.tar.gz
zig-e55e6b5528bb2f01de242fcf32b172e244e98e74.zip
Merge pull request 'std: migrate all `fs` APIs to `Io`' (#30232) from std.Io-fs into masterHEADmaster
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30232
-rw-r--r--CMakeLists.txt3
-rw-r--r--build.zig95
-rw-r--r--doc/langref/bad_default_value.zig2
-rw-r--r--doc/langref/hello.zig12
-rw-r--r--lib/compiler/aro/aro/Compilation.zig36
-rw-r--r--lib/compiler/aro/aro/Diagnostics.zig38
-rw-r--r--lib/compiler/aro/aro/Driver.zig46
-rw-r--r--lib/compiler/aro/aro/Driver/Filesystem.zig40
-rw-r--r--lib/compiler/aro/aro/Parser.zig27
-rw-r--r--lib/compiler/aro/aro/Preprocessor.zig13
-rw-r--r--lib/compiler/aro/aro/Tokenizer.zig5
-rw-r--r--lib/compiler/aro/aro/Toolchain.zig18
-rw-r--r--lib/compiler/aro/aro/Value.zig11
-rw-r--r--lib/compiler/aro/backend/Assembly.zig5
-rw-r--r--lib/compiler/aro/main.zig9
-rw-r--r--lib/compiler/build_runner.zig393
-rw-r--r--lib/compiler/libc.zig20
-rw-r--r--lib/compiler/objcopy.zig46
-rw-r--r--lib/compiler/reduce.zig46
-rw-r--r--lib/compiler/resinator/cli.zig82
-rw-r--r--lib/compiler/resinator/compile.zig54
-rw-r--r--lib/compiler/resinator/errors.zig90
-rw-r--r--lib/compiler/resinator/main.zig188
-rw-r--r--lib/compiler/resinator/utils.zig40
-rw-r--r--lib/compiler/std-docs.zig127
-rw-r--r--lib/compiler/test_runner.zig46
-rw-r--r--lib/compiler/translate-c/main.zig41
-rw-r--r--lib/compiler_rt/emutls.zig2
-rw-r--r--lib/fuzzer.zig84
-rw-r--r--lib/init/src/main.zig26
-rw-r--r--lib/init/src/root.zig19
-rw-r--r--lib/std/Build.zig144
-rw-r--r--lib/std/Build/Cache.zig231
-rw-r--r--lib/std/Build/Cache/Directory.zig14
-rw-r--r--lib/std/Build/Cache/Path.zig46
-rw-r--r--lib/std/Build/Fuzz.zig69
-rw-r--r--lib/std/Build/Step.zig58
-rw-r--r--lib/std/Build/Step/CheckFile.zig5
-rw-r--r--lib/std/Build/Step/CheckObject.zig2
-rw-r--r--lib/std/Build/Step/Compile.zig59
-rw-r--r--lib/std/Build/Step/ConfigHeader.zig13
-rw-r--r--lib/std/Build/Step/InstallArtifact.zig7
-rw-r--r--lib/std/Build/Step/InstallDir.zig11
-rw-r--r--lib/std/Build/Step/ObjCopy.zig5
-rw-r--r--lib/std/Build/Step/Options.zig57
-rw-r--r--lib/std/Build/Step/RemoveDir.zig11
-rw-r--r--lib/std/Build/Step/Run.zig250
-rw-r--r--lib/std/Build/Step/UpdateSourceFiles.zig6
-rw-r--r--lib/std/Build/Step/WriteFile.zig37
-rw-r--r--lib/std/Build/Watch.zig20
-rw-r--r--lib/std/Build/Watch/FsEvents.zig5
-rw-r--r--lib/std/Build/WebServer.zig36
-rw-r--r--lib/std/Io.zig132
-rw-r--r--lib/std/Io/Dir.zig1593
-rw-r--r--lib/std/Io/File.zig853
-rw-r--r--lib/std/Io/File/Atomic.zig (renamed from lib/std/fs/AtomicFile.zig)50
-rw-r--r--lib/std/Io/File/Reader.zig394
-rw-r--r--lib/std/Io/File/Writer.zig274
-rw-r--r--lib/std/Io/IoUring.zig4
-rw-r--r--lib/std/Io/Kqueue.zig20
-rw-r--r--lib/std/Io/Terminal.zig138
-rw-r--r--lib/std/Io/Threaded.zig6173
-rw-r--r--lib/std/Io/Threaded/test.zig12
-rw-r--r--lib/std/Io/Writer.zig29
-rw-r--r--lib/std/Io/net.zig25
-rw-r--r--lib/std/Io/net/HostName.zig2
-rw-r--r--lib/std/Io/net/test.zig13
-rw-r--r--lib/std/Io/test.zig160
-rw-r--r--lib/std/Io/tty.zig131
-rw-r--r--lib/std/Progress.zig288
-rw-r--r--lib/std/Random/benchmark.zig6
-rw-r--r--lib/std/Thread.zig36
-rw-r--r--lib/std/c.zig12
-rw-r--r--lib/std/c/darwin.zig2
-rw-r--r--lib/std/c/freebsd.zig2
-rw-r--r--lib/std/crypto/Certificate/Bundle.zig32
-rw-r--r--lib/std/crypto/Certificate/Bundle/macos.zig5
-rw-r--r--lib/std/crypto/benchmark.zig10
-rw-r--r--lib/std/crypto/codecs/asn1/test.zig6
-rw-r--r--lib/std/crypto/tls.zig1
-rw-r--r--lib/std/debug.zig528
-rw-r--r--lib/std/debug/ElfFile.zig50
-rw-r--r--lib/std/debug/Info.zig30
-rw-r--r--lib/std/debug/MachOFile.zig22
-rw-r--r--lib/std/debug/Pdb.zig2
-rw-r--r--lib/std/debug/SelfInfo/Elf.zig35
-rw-r--r--lib/std/debug/SelfInfo/MachO.zig20
-rw-r--r--lib/std/debug/SelfInfo/Windows.zig30
-rw-r--r--lib/std/debug/simple_panic.zig5
-rw-r--r--lib/std/dynamic_library.zig76
-rw-r--r--lib/std/fs.zig562
-rw-r--r--lib/std/fs/Dir.zig2065
-rw-r--r--lib/std/fs/File.zig1437
-rw-r--r--lib/std/fs/path.zig4
-rw-r--r--lib/std/fs/test.zig2008
-rw-r--r--lib/std/hash/benchmark.zig5
-rw-r--r--lib/std/heap/debug_allocator.zig55
-rw-r--r--lib/std/http.zig2
-rw-r--r--lib/std/http/Client.zig4
-rw-r--r--lib/std/json/dynamic.zig7
-rw-r--r--lib/std/log.zig46
-rw-r--r--lib/std/os.zig136
-rw-r--r--lib/std/os/freebsd.zig50
-rw-r--r--lib/std/os/linux.zig180
-rw-r--r--lib/std/os/linux/IoUring.zig2743
-rw-r--r--lib/std/os/linux/IoUring/test.zig2691
-rw-r--r--lib/std/os/linux/test.zig32
-rw-r--r--lib/std/os/uefi/protocol/file.zig9
-rw-r--r--lib/std/os/windows.zig257
-rw-r--r--lib/std/pdb.zig2
-rw-r--r--lib/std/posix.zig2624
-rw-r--r--lib/std/posix/test.zig638
-rw-r--r--lib/std/process.zig271
-rw-r--r--lib/std/process/Child.zig163
-rw-r--r--lib/std/start.zig16
-rw-r--r--lib/std/std.zig26
-rw-r--r--lib/std/tar.zig157
-rw-r--r--lib/std/tar/test.zig12
-rw-r--r--lib/std/testing.zig120
-rw-r--r--lib/std/unicode/throughput_test.zig6
-rw-r--r--lib/std/zig.zig21
-rw-r--r--lib/std/zig/ErrorBundle.zig78
-rw-r--r--lib/std/zig/LibCDirs.zig19
-rw-r--r--lib/std/zig/LibCInstallation.zig205
-rw-r--r--lib/std/zig/WindowsSdk.zig305
-rw-r--r--lib/std/zig/Zir.zig8
-rw-r--r--lib/std/zig/Zoir.zig15
-rw-r--r--lib/std/zig/llvm/Builder.zig29
-rw-r--r--lib/std/zig/parser_test.zig45
-rw-r--r--lib/std/zig/perf_test.zig3
-rw-r--r--lib/std/zig/system.zig44
-rw-r--r--lib/std/zig/system/NativePaths.zig13
-rw-r--r--lib/std/zig/system/darwin.zig29
-rw-r--r--lib/std/zig/system/darwin/macos.zig9
-rw-r--r--lib/std/zig/system/linux.zig4
-rw-r--r--lib/std/zip.zig28
-rw-r--r--src/Air/print.zig28
-rw-r--r--src/Builtin.zig5
-rw-r--r--src/Compilation.zig436
-rw-r--r--src/InternPool.zig60
-rw-r--r--src/Package/Fetch.zig246
-rw-r--r--src/Package/Fetch/git.zig90
-rw-r--r--src/Sema.zig8
-rw-r--r--src/Zcu.zig37
-rw-r--r--src/Zcu/PerThread.zig87
-rw-r--r--src/codegen/aarch64/Select.zig10
-rw-r--r--src/codegen/llvm.zig38
-rw-r--r--src/crash_report.zig13
-rw-r--r--src/fmt.zig70
-rw-r--r--src/introspect.zig98
-rw-r--r--src/libs/freebsd.zig28
-rw-r--r--src/libs/glibc.zig32
-rw-r--r--src/libs/mingw.zig65
-rw-r--r--src/libs/mingw/def.zig32
-rw-r--r--src/libs/netbsd.zig26
-rw-r--r--src/link.zig224
-rw-r--r--src/link/C.zig17
-rw-r--r--src/link/Coff.zig74
-rw-r--r--src/link/Dwarf.zig107
-rw-r--r--src/link/Elf.zig113
-rw-r--r--src/link/Elf/Archive.zig53
-rw-r--r--src/link/Elf/AtomList.zig12
-rw-r--r--src/link/Elf/Object.zig112
-rw-r--r--src/link/Elf/SharedObject.zig53
-rw-r--r--src/link/Elf/ZigObject.zig28
-rw-r--r--src/link/Elf/file.zig35
-rw-r--r--src/link/Elf/relocatable.zig68
-rw-r--r--src/link/Elf2.zig78
-rw-r--r--src/link/Lld.zig64
-rw-r--r--src/link/MachO.zig235
-rw-r--r--src/link/MachO/Archive.zig7
-rw-r--r--src/link/MachO/CodeSignature.zig41
-rw-r--r--src/link/MachO/DebugSymbols.zig91
-rw-r--r--src/link/MachO/Dylib.zig20
-rw-r--r--src/link/MachO/Object.zig153
-rw-r--r--src/link/MachO/ZigObject.zig21
-rw-r--r--src/link/MachO/fat.zig18
-rw-r--r--src/link/MachO/file.zig3
-rw-r--r--src/link/MachO/hasher.zig48
-rw-r--r--src/link/MachO/relocatable.zig30
-rw-r--r--src/link/MachO/uuid.zig33
-rw-r--r--src/link/MappedFile.zig87
-rw-r--r--src/link/Queue.zig4
-rw-r--r--src/link/SpirV.zig8
-rw-r--r--src/link/Wasm.zig58
-rw-r--r--src/link/Wasm/Flush.zig14
-rw-r--r--src/link/tapi.zig14
-rw-r--r--src/main.zig499
-rw-r--r--src/print_env.zig17
-rw-r--r--src/print_targets.zig19
-rw-r--r--stage1/wasi.c61
-rw-r--r--test/cases/disable_stack_tracing.zig6
-rw-r--r--test/incremental/add_decl20
-rw-r--r--test/incremental/add_decl_namespaced20
-rw-r--r--test/incremental/bad_import6
-rw-r--r--test/incremental/change_embed_file9
-rw-r--r--test/incremental/change_enum_tag_type9
-rw-r--r--test/incremental/change_exports18
-rw-r--r--test/incremental/change_fn_type9
-rw-r--r--test/incremental/change_generic_line_number6
-rw-r--r--test/incremental/change_line_number6
-rw-r--r--test/incremental/change_panic_handler9
-rw-r--r--test/incremental/change_panic_handler_explicit9
-rw-r--r--test/incremental/change_shift_op6
-rw-r--r--test/incremental/change_struct_same_fields9
-rw-r--r--test/incremental/change_zon_file9
-rw-r--r--test/incremental/change_zon_file_no_result_type3
-rw-r--r--test/incremental/compile_log9
-rw-r--r--test/incremental/fix_astgen_failure13
-rw-r--r--test/incremental/function_becomes_inline9
-rw-r--r--test/incremental/hello6
-rw-r--r--test/incremental/make_decl_pub6
-rw-r--r--test/incremental/modify_inline_fn6
-rw-r--r--test/incremental/move_src6
-rw-r--r--test/incremental/no_change_preserves_tag_names6
-rw-r--r--test/incremental/recursive_function_becomes_non_recursive12
-rw-r--r--test/incremental/remove_enum_field6
-rw-r--r--test/incremental/unreferenced_error12
-rw-r--r--test/link/bss/main.zig2
-rw-r--r--test/link/elf.zig4
-rw-r--r--test/link/macho.zig7
-rw-r--r--test/link/wasm/extern/main.zig2
-rw-r--r--test/src/Cases.zig24
-rw-r--r--test/src/convert-stack-trace.zig10
-rw-r--r--test/standalone/child_process/child.zig16
-rw-r--r--test/standalone/child_process/main.zig23
-rw-r--r--test/standalone/cmakedefine/check.zig6
-rw-r--r--test/standalone/coff_dwarf/main.zig2
-rw-r--r--test/standalone/dirname/build.zig10
-rw-r--r--test/standalone/dirname/exists_in.zig8
-rw-r--r--test/standalone/dirname/touch.zig16
-rw-r--r--test/standalone/entry_point/check_differ.zig6
-rw-r--r--test/standalone/env_vars/main.zig2
-rw-r--r--test/standalone/glibc_compat/glibc_runtime_check.zig4
-rw-r--r--test/standalone/install_headers/check_exists.zig10
-rw-r--r--test/standalone/ios/build.zig4
-rw-r--r--test/standalone/libfuzzer/main.zig12
-rw-r--r--test/standalone/posix/cwd.zig79
-rw-r--r--test/standalone/posix/relpaths.zig133
-rw-r--r--test/standalone/run_cwd/check_file_exists.zig4
-rw-r--r--test/standalone/run_output_caching/main.zig7
-rw-r--r--test/standalone/run_output_paths/create_file.zig7
-rw-r--r--test/standalone/self_exe_symlink/build.zig4
-rw-r--r--test/standalone/self_exe_symlink/create-symlink.zig5
-rw-r--r--test/standalone/self_exe_symlink/main.zig21
-rw-r--r--test/standalone/simple/cat/main.zig14
-rw-r--r--test/standalone/simple/guess_number/main.zig33
-rw-r--r--test/standalone/simple/hello_world/hello.zig12
-rw-r--r--test/standalone/windows_argv/build.zig2
-rw-r--r--test/standalone/windows_bat_args/echo-args.zig4
-rw-r--r--test/standalone/windows_bat_args/fuzz.zig75
-rw-r--r--test/standalone/windows_bat_args/test.zig154
-rw-r--r--test/standalone/windows_paths/relative.zig6
-rw-r--r--test/standalone/windows_paths/test.zig47
-rw-r--r--test/standalone/windows_spawn/hello.zig3
-rw-r--r--test/standalone/windows_spawn/main.zig195
-rw-r--r--test/tests.zig114
-rw-r--r--tools/docgen.zig34
-rw-r--r--tools/doctest.zig63
-rw-r--r--tools/dump-cov.zig12
-rw-r--r--tools/fetch_them_macos_headers.zig61
-rw-r--r--tools/gen_macos_headers_c.zig32
-rw-r--r--tools/gen_outline_atomics.zig7
-rw-r--r--tools/gen_spirv_spec.zig39
-rw-r--r--tools/gen_stubs.zig17
-rw-r--r--tools/generate_JSONTestSuite.zig13
-rw-r--r--tools/generate_c_size_and_align_checks.zig5
-rw-r--r--tools/generate_linux_syscalls.zig18
-rw-r--r--tools/incr-check.zig196
-rw-r--r--tools/migrate_langref.zig35
-rw-r--r--tools/process_headers.zig39
-rw-r--r--tools/update-linux-headers.zig39
-rw-r--r--tools/update_clang_options.zig15
-rw-r--r--tools/update_cpu_features.zig42
-rw-r--r--tools/update_crc_catalog.zig39
-rw-r--r--tools/update_freebsd_libc.zig32
-rw-r--r--tools/update_glibc.zig64
-rw-r--r--tools/update_mingw.zig64
-rw-r--r--tools/update_netbsd_libc.zig29
279 files changed, 19474 insertions, 17815 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ce4bb1eef7..3b06f36e61 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -436,9 +436,6 @@ set(ZIG_STAGE2_SOURCES
lib/std/fmt.zig
lib/std/fmt/parse_float.zig
lib/std/fs.zig
- lib/std/fs/AtomicFile.zig
- lib/std/fs/Dir.zig
- lib/std/fs/File.zig
lib/std/fs/get_app_data_dir.zig
lib/std/fs/path.zig
lib/std/hash.zig
diff --git a/build.zig b/build.zig
index b7a8e282ba..338709c7c4 100644
--- a/build.zig
+++ b/build.zig
@@ -1,18 +1,20 @@
const std = @import("std");
const builtin = std.builtin;
-const tests = @import("test/tests.zig");
const BufMap = std.BufMap;
const mem = std.mem;
-const io = std.io;
const fs = std.fs;
const InstallDirectoryOptions = std.Build.InstallDirectoryOptions;
const assert = std.debug.assert;
+const Io = std.Io;
+
+const tests = @import("test/tests.zig");
const DevEnv = @import("src/dev.zig").Env;
-const ValueInterpretMode = enum { direct, by_name };
const zig_version: std.SemanticVersion = .{ .major = 0, .minor = 16, .patch = 0 };
const stack_size = 46 * 1024 * 1024;
+const ValueInterpretMode = enum { direct, by_name };
+
pub fn build(b: *std.Build) !void {
const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false;
const target = b.standardTargetOptions(.{
@@ -306,8 +308,10 @@ pub fn build(b: *std.Build) !void {
if (enable_llvm) {
const cmake_cfg = if (static_llvm) null else blk: {
+ const io = b.graph.io;
+ const cwd: Io.Dir = .cwd();
if (findConfigH(b, config_h_path_option)) |config_h_path| {
- const file_contents = fs.cwd().readFileAlloc(config_h_path, b.allocator, .limited(max_config_h_bytes)) catch unreachable;
+ const file_contents = cwd.readFileAlloc(io, config_h_path, b.allocator, .limited(max_config_h_bytes)) catch unreachable;
break :blk parseConfigH(b, file_contents);
} else {
std.log.warn("config.h could not be located automatically. Consider providing it explicitly via \"-Dconfig_h\"", .{});
@@ -531,10 +535,6 @@ pub fn build(b: *std.Build) !void {
.aarch64 => 701_413_785,
else => 800_000_000,
},
- .windows => switch (b.graph.host.result.cpu.arch) {
- .x86_64 => 536_414_208,
- else => 600_000_000,
- },
else => 900_000_000,
},
}));
@@ -561,30 +561,7 @@ pub fn build(b: *std.Build) !void {
.skip_llvm = skip_llvm,
.skip_libc = true,
.no_builtin = true,
- .max_rss = switch (b.graph.host.result.os.tag) {
- .freebsd => switch (b.graph.host.result.cpu.arch) {
- .x86_64 => 557_892_403,
- else => 600_000_000,
- },
- .linux => switch (b.graph.host.result.cpu.arch) {
- .aarch64 => 615_302_758,
- .loongarch64 => 598_974_464,
- .powerpc64le => 587_845_632,
- .riscv64 => 382_786_764,
- .s390x => 395_555_635,
- .x86_64 => 871_883_161,
- else => 900_000_000,
- },
- .macos => switch (b.graph.host.result.cpu.arch) {
- .aarch64 => 451_389_030,
- else => 500_000_000,
- },
- .windows => switch (b.graph.host.result.cpu.arch) {
- .x86_64 => 367_747_072,
- else => 400_000_000,
- },
- else => 900_000_000,
- },
+ .max_rss = 900_000_000,
}));
test_modules_step.dependOn(tests.addModuleTests(b, .{
@@ -647,30 +624,7 @@ pub fn build(b: *std.Build) !void {
.use_llvm = use_llvm,
.use_lld = use_llvm,
.zig_lib_dir = b.path("lib"),
- .max_rss = switch (b.graph.host.result.os.tag) {
- .freebsd => switch (b.graph.host.result.cpu.arch) {
- .x86_64 => 2_188_099_584,
- else => 2_200_000_000,
- },
- .linux => switch (b.graph.host.result.cpu.arch) {
- .aarch64 => 1_991_934_771,
- .loongarch64 => 1_844_538_572,
- .powerpc64le => 1_793_035_059,
- .riscv64 => 2_459_003_289,
- .s390x => 1_781_248_409,
- .x86_64 => 977_192_550,
- else => 2_500_000_000,
- },
- .macos => switch (b.graph.host.result.cpu.arch) {
- .aarch64 => 2_062_393_344,
- else => 2_100_000_000,
- },
- .windows => switch (b.graph.host.result.cpu.arch) {
- .x86_64 => 1_953_087_488,
- else => 2_000_000_000,
- },
- else => 2_500_000_000,
- },
+ .max_rss = 2_500_000_000,
});
if (link_libc) {
unit_tests.root_module.link_libc = true;
@@ -762,7 +716,7 @@ pub fn build(b: *std.Build) !void {
}
const test_incremental_step = b.step("test-incremental", "Run the incremental compilation test cases");
- try tests.addIncrementalTests(b, test_incremental_step);
+ try tests.addIncrementalTests(b, test_incremental_step, test_filters);
if (!skip_test_incremental) test_step.dependOn(test_incremental_step);
if (tests.addLibcTests(b, .{
@@ -1153,10 +1107,13 @@ const CMakeConfig = struct {
const max_config_h_bytes = 1 * 1024 * 1024;
fn findConfigH(b: *std.Build, config_h_path_option: ?[]const u8) ?[]const u8 {
+ const io = b.graph.io;
+ const cwd: Io.Dir = .cwd();
+
if (config_h_path_option) |path| {
- var config_h_or_err = fs.cwd().openFile(path, .{});
+ var config_h_or_err = cwd.openFile(io, path, .{});
if (config_h_or_err) |*file| {
- file.close();
+ file.close(io);
return path;
} else |_| {
std.log.err("Could not open provided config.h: \"{s}\"", .{path});
@@ -1166,13 +1123,13 @@ fn findConfigH(b: *std.Build, config_h_path_option: ?[]const u8) ?[]const u8 {
var check_dir = fs.path.dirname(b.graph.zig_exe).?;
while (true) {
- var dir = fs.cwd().openDir(check_dir, .{}) catch unreachable;
- defer dir.close();
+ var dir = cwd.openDir(io, check_dir, .{}) catch unreachable;
+ defer dir.close(io);
// Check if config.h is present in dir
- var config_h_or_err = dir.openFile("config.h", .{});
+ var config_h_or_err = dir.openFile(io, "config.h", .{});
if (config_h_or_err) |*file| {
- file.close();
+ file.close(io);
return fs.path.join(
b.allocator,
&[_][]const u8{ check_dir, "config.h" },
@@ -1183,9 +1140,9 @@ fn findConfigH(b: *std.Build, config_h_path_option: ?[]const u8) ?[]const u8 {
}
// Check if we reached the source root by looking for .git, and bail if so
- var git_dir_or_err = dir.openDir(".git", .{});
+ var git_dir_or_err = dir.openDir(io, ".git", .{});
if (git_dir_or_err) |*git_dir| {
- git_dir.close();
+ git_dir.close(io);
return null;
} else |_| {}
@@ -1581,6 +1538,8 @@ const llvm_libs_xtensa = [_][]const u8{
};
fn generateLangRef(b: *std.Build) std.Build.LazyPath {
+ const io = b.graph.io;
+
const doctest_exe = b.addExecutable(.{
.name = "doctest",
.root_module = b.createModule(.{
@@ -1590,17 +1549,17 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
}),
});
- var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| {
+ var dir = b.build_root.handle.openDir(io, "doc/langref", .{ .iterate = true }) catch |err| {
std.debug.panic("unable to open '{f}doc/langref' directory: {s}", .{
b.build_root, @errorName(err),
});
};
- defer dir.close();
+ defer dir.close(io);
var wf = b.addWriteFiles();
var it = dir.iterateAssumeFirstIteration();
- while (it.next() catch @panic("failed to read dir")) |entry| {
+ while (it.next(io) catch @panic("failed to read dir")) |entry| {
if (std.mem.startsWith(u8, entry.name, ".") or entry.kind != .file)
continue;
diff --git a/doc/langref/bad_default_value.zig b/doc/langref/bad_default_value.zig
index df38209c49..c80374a333 100644
--- a/doc/langref/bad_default_value.zig
+++ b/doc/langref/bad_default_value.zig
@@ -17,7 +17,7 @@ pub fn main() !void {
.maximum = 0.20,
};
const category = threshold.categorize(0.90);
- try std.fs.File.stdout().writeAll(@tagName(category));
+ std.log.info("category: {t}", .{category});
}
const std = @import("std");
diff --git a/doc/langref/hello.zig b/doc/langref/hello.zig
index 27ea1f689a..3fc2fb98d5 100644
--- a/doc/langref/hello.zig
+++ b/doc/langref/hello.zig
@@ -1,7 +1,17 @@
const std = @import("std");
+// See https://github.com/ziglang/zig/issues/24510
+// for the plan to simplify this code.
pub fn main() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
+ defer _ = debug_allocator.deinit();
+ const gpa = debug_allocator.allocator();
+
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
// exe=succeed
diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig
index d5f4ebe2d9..aec780af02 100644
--- a/lib/compiler/aro/aro/Compilation.zig
+++ b/lib/compiler/aro/aro/Compilation.zig
@@ -154,7 +154,7 @@ gpa: Allocator,
/// Allocations in this arena live all the way until `Compilation.deinit`.
arena: Allocator,
io: Io,
-cwd: std.fs.Dir,
+cwd: Io.Dir,
diagnostics: *Diagnostics,
sources: std.StringArrayHashMapUnmanaged(Source) = .empty,
@@ -181,7 +181,7 @@ pragma_handlers: std.StringArrayHashMapUnmanaged(*Pragma) = .empty,
/// Used by MS extensions which allow searching for includes relative to the directory of the main source file.
ms_cwd_source_id: ?Source.Id = null,
-pub fn init(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics, cwd: std.fs.Dir) Compilation {
+pub fn init(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics, cwd: Io.Dir) Compilation {
return .{
.gpa = gpa,
.arena = arena,
@@ -193,7 +193,7 @@ pub fn init(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics,
/// Initialize Compilation with default environment,
/// pragma handlers and emulation mode set to target.
-pub fn initDefault(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics, cwd: std.fs.Dir) !Compilation {
+pub fn initDefault(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics, cwd: Io.Dir) !Compilation {
var comp: Compilation = .{
.gpa = gpa,
.arena = arena,
@@ -1639,12 +1639,14 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin
return error.FileNotFound;
}
- const file = try comp.cwd.openFile(path, .{});
- defer file.close();
+ const io = comp.io;
+
+ const file = try comp.cwd.openFile(io, path, .{});
+ defer file.close(io);
return comp.addSourceFromFile(file, path, kind);
}
-pub fn addSourceFromFile(comp: *Compilation, file: std.fs.File, path: []const u8, kind: Source.Kind) !Source {
+pub fn addSourceFromFile(comp: *Compilation, file: Io.File, path: []const u8, kind: Source.Kind) !Source {
const contents = try comp.getFileContents(file, .unlimited);
errdefer comp.gpa.free(contents);
return comp.addSourceFromOwnedBuffer(path, contents, kind);
@@ -1711,7 +1713,8 @@ pub fn initSearchPath(comp: *Compilation, includes: []const Include, verbose: bo
}
}
fn addToSearchPath(comp: *Compilation, include: Include, verbose: bool) !void {
- comp.cwd.access(include.path, .{}) catch {
+ const io = comp.io;
+ comp.cwd.access(io, include.path, .{}) catch {
if (verbose) {
std.debug.print("ignoring nonexistent directory \"{s}\"\n", .{include.path});
return;
@@ -1971,12 +1974,14 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8
return error.FileNotFound;
}
- const file = try comp.cwd.openFile(path, .{});
- defer file.close();
+ const io = comp.io;
+
+ const file = try comp.cwd.openFile(io, path, .{});
+ defer file.close(io);
return comp.getFileContents(file, limit);
}
-fn getFileContents(comp: *Compilation, file: std.fs.File, limit: Io.Limit) ![]u8 {
+fn getFileContents(comp: *Compilation, file: Io.File, limit: Io.Limit) ![]u8 {
var file_buf: [4096]u8 = undefined;
var file_reader = file.reader(comp.io, &file_buf);
@@ -2158,8 +2163,9 @@ pub fn locSlice(comp: *const Compilation, loc: Source.Location) []const u8 {
}
pub fn getSourceMTimeUncached(comp: *const Compilation, source_id: Source.Id) ?u64 {
+ const io = comp.io;
const source = comp.getSource(source_id);
- if (comp.cwd.statFile(source.path)) |stat| {
+ if (comp.cwd.statFile(io, source.path, .{})) |stat| {
return std.math.cast(u64, stat.mtime.toSeconds());
} else |_| {
return null;
@@ -2249,7 +2255,7 @@ test "addSourceFromBuffer" {
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena.deinit();
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
const source = try comp.addSourceFromBuffer("path", str);
@@ -2263,7 +2269,7 @@ test "addSourceFromBuffer" {
var arena: std.heap.ArenaAllocator = .init(allocator);
defer arena.deinit();
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
_ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n");
@@ -2309,7 +2315,7 @@ test "addSourceFromBuffer - exhaustive check for carriage return elimination" {
var buf: [alphabet.len]u8 = @splat(alphabet[0]);
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
var source_count: u32 = 0;
@@ -2337,7 +2343,7 @@ test "ignore BOM at beginning of file" {
const Test = struct {
fn run(arena: Allocator, buf: []const u8) !void {
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
const source = try comp.addSourceFromBuffer("file.c", buf);
diff --git a/lib/compiler/aro/aro/Diagnostics.zig b/lib/compiler/aro/aro/Diagnostics.zig
index 2d58a217ea..993b5d93af 100644
--- a/lib/compiler/aro/aro/Diagnostics.zig
+++ b/lib/compiler/aro/aro/Diagnostics.zig
@@ -24,20 +24,21 @@ pub const Message = struct {
@"fatal error",
};
- pub fn write(msg: Message, w: *std.Io.Writer, config: std.Io.tty.Config, details: bool) std.Io.tty.Config.SetColorError!void {
- try config.setColor(w, .bold);
+ pub fn write(msg: Message, t: std.Io.Terminal, details: bool) std.Io.Terminal.SetColorError!void {
+ const w = t.writer;
+ try t.setColor(.bold);
if (msg.location) |loc| {
try w.print("{s}:{d}:{d}: ", .{ loc.path, loc.line_no, loc.col });
}
switch (msg.effective_kind) {
- .@"fatal error", .@"error" => try config.setColor(w, .bright_red),
- .note => try config.setColor(w, .bright_cyan),
- .warning => try config.setColor(w, .bright_magenta),
+ .@"fatal error", .@"error" => try t.setColor(.bright_red),
+ .note => try t.setColor(.bright_cyan),
+ .warning => try t.setColor(.bright_magenta),
.off => unreachable,
}
try w.print("{s}: ", .{@tagName(msg.effective_kind)});
- try config.setColor(w, .white);
+ try t.setColor(.white);
try w.writeAll(msg.text);
if (msg.opt) |some| {
if (msg.effective_kind == .@"error" and msg.kind != .@"error") {
@@ -55,17 +56,17 @@ pub const Message = struct {
if (!details or msg.location == null) {
try w.writeAll("\n");
- try config.setColor(w, .reset);
+ try t.setColor(.reset);
} else {
const loc = msg.location.?;
const trailer = if (loc.end_with_splice) "\\ " else "";
- try config.setColor(w, .reset);
+ try t.setColor(.reset);
try w.print("\n{s}{s}\n", .{ loc.line, trailer });
try w.splatByteAll(' ', loc.width);
- try config.setColor(w, .bold);
- try config.setColor(w, .bright_green);
+ try t.setColor(.bold);
+ try t.setColor(.bright_green);
try w.writeAll("^\n");
- try config.setColor(w, .reset);
+ try t.setColor(.reset);
}
try w.flush();
}
@@ -290,10 +291,7 @@ pub const State = struct {
const Diagnostics = @This();
output: union(enum) {
- to_writer: struct {
- writer: *std.Io.Writer,
- color: std.Io.tty.Config,
- },
+ to_writer: std.Io.Terminal,
to_list: struct {
messages: std.ArrayList(Message) = .empty,
arena: std.heap.ArenaAllocator,
@@ -543,11 +541,11 @@ fn addMessage(d: *Diagnostics, msg: Message) Compilation.Error!void {
switch (d.output) {
.ignore => {},
- .to_writer => |writer| {
- var config = writer.color;
- if (d.color == false) config = .no_color;
- if (d.color == true and config == .no_color) config = .escape_codes;
- msg.write(writer.writer, config, d.details) catch {
+ .to_writer => |t| {
+ var new_mode = t.mode;
+ if (d.color == false) new_mode = .no_color;
+ if (d.color == true and new_mode == .no_color) new_mode = .escape_codes;
+ msg.write(.{ .writer = t.writer, .mode = new_mode }, d.details) catch {
return error.FatalError;
};
},
diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig
index 6a399ece39..340a35bdde 100644
--- a/lib/compiler/aro/aro/Driver.zig
+++ b/lib/compiler/aro/aro/Driver.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const Allocator = mem.Allocator;
const process = std.process;
@@ -133,8 +134,9 @@ strip: bool = false,
unwindlib: ?[]const u8 = null,
pub fn deinit(d: *Driver) void {
+ const io = d.comp.io;
for (d.link_objects.items[d.link_objects.items.len - d.temp_file_count ..]) |obj| {
- std.fs.deleteFileAbsolute(obj) catch {};
+ Io.Dir.deleteFileAbsolute(io, obj) catch {};
d.comp.gpa.free(obj);
}
d.inputs.deinit(d.comp.gpa);
@@ -1061,7 +1063,7 @@ pub fn printDiagnosticsStats(d: *Driver) void {
}
}
-pub fn detectConfig(d: *Driver, file: std.fs.File) std.Io.tty.Config {
+pub fn detectConfig(d: *Driver, file: Io.File) std.Io.tty.Config {
if (d.diagnostics.color == false) return .no_color;
const force_color = d.diagnostics.color == true;
@@ -1109,7 +1111,7 @@ pub fn main(d: *Driver, tc: *Toolchain, args: []const []const u8, comptime fast_
defer macro_buf.deinit(d.comp.gpa);
var stdout_buf: [256]u8 = undefined;
- var stdout = std.fs.File.stdout().writer(&stdout_buf);
+ var stdout = Io.File.stdout().writer(&stdout_buf);
if (parseArgs(d, &stdout.interface, &macro_buf, args) catch |er| switch (er) {
error.WriteFailed => return d.fatal("failed to write to stdout: {s}", .{errorDescription(er)}),
error.OutOfMemory => return error.OutOfMemory,
@@ -1286,6 +1288,8 @@ fn processSource(
d.comp.generated_buf.items.len = 0;
const prev_total = d.diagnostics.errors;
+ const io = d.comp.io;
+
var pp = try Preprocessor.initDefault(d.comp);
defer pp.deinit();
@@ -1324,13 +1328,13 @@ fn processSource(
const dep_file_name = try d.getDepFileName(source, writer_buf[0..std.fs.max_name_bytes]);
const file = if (dep_file_name) |path|
- d.comp.cwd.createFile(path, .{}) catch |er|
+ d.comp.cwd.createFile(io, path, .{}) catch |er|
return d.fatal("unable to create dependency file '{s}': {s}", .{ path, errorDescription(er) })
else
- std.fs.File.stdout();
- defer if (dep_file_name != null) file.close();
+ Io.File.stdout();
+ defer if (dep_file_name != null) file.close(io);
- var file_writer = file.writer(&writer_buf);
+ var file_writer = file.writer(io, &writer_buf);
dep_file.write(&file_writer.interface) catch
return d.fatal("unable to write dependency file: {s}", .{errorDescription(file_writer.err.?)});
}
@@ -1349,13 +1353,13 @@ fn processSource(
}
const file = if (d.output_name) |some|
- d.comp.cwd.createFile(some, .{}) catch |er|
+ d.comp.cwd.createFile(io, some, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ some, errorDescription(er) })
else
- std.fs.File.stdout();
- defer if (d.output_name != null) file.close();
+ Io.File.stdout();
+ defer if (d.output_name != null) file.close(io);
- var file_writer = file.writer(&writer_buf);
+ var file_writer = file.writer(io, &writer_buf);
pp.prettyPrintTokens(&file_writer.interface, dump_mode) catch
return d.fatal("unable to write result: {s}", .{errorDescription(file_writer.err.?)});
@@ -1367,7 +1371,7 @@ fn processSource(
defer tree.deinit();
if (d.verbose_ast) {
- var stdout = std.fs.File.stdout().writer(&writer_buf);
+ var stdout = Io.File.stdout().writer(&writer_buf);
tree.dump(d.detectConfig(stdout.file), &stdout.interface) catch {};
}
@@ -1402,9 +1406,9 @@ fn processSource(
defer assembly.deinit(gpa);
if (d.only_preprocess_and_compile) {
- const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er|
+ const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) });
- defer out_file.close();
+ defer out_file.close(io);
assembly.writeToFile(out_file) catch |er|
return d.fatal("unable to write to output file '{s}': {s}", .{ out_file_name, errorDescription(er) });
@@ -1416,9 +1420,9 @@ fn processSource(
// then assemble to out_file_name
var assembly_name_buf: [std.fs.max_name_bytes]u8 = undefined;
const assembly_out_file_name = try d.getRandomFilename(&assembly_name_buf, ".s");
- const out_file = d.comp.cwd.createFile(assembly_out_file_name, .{}) catch |er|
+ const out_file = d.comp.cwd.createFile(io, assembly_out_file_name, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ assembly_out_file_name, errorDescription(er) });
- defer out_file.close();
+ defer out_file.close(io);
assembly.writeToFile(out_file) catch |er|
return d.fatal("unable to write to output file '{s}': {s}", .{ assembly_out_file_name, errorDescription(er) });
try d.invokeAssembler(tc, assembly_out_file_name, out_file_name);
@@ -1431,7 +1435,7 @@ fn processSource(
defer ir.deinit(gpa);
if (d.verbose_ir) {
- var stdout = std.fs.File.stdout().writer(&writer_buf);
+ var stdout = Io.File.stdout().writer(&writer_buf);
ir.dump(gpa, d.detectConfig(stdout.file), &stdout.interface) catch {};
}
@@ -1452,11 +1456,11 @@ fn processSource(
};
defer obj.deinit();
- const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er|
+ const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) });
- defer out_file.close();
+ defer out_file.close(io);
- var file_writer = out_file.writer(&writer_buf);
+ var file_writer = out_file.writer(io, &writer_buf);
obj.finish(&file_writer.interface) catch
return d.fatal("could not output to object file '{s}': {s}", .{ out_file_name, errorDescription(file_writer.err.?) });
}
@@ -1497,7 +1501,7 @@ pub fn invokeLinker(d: *Driver, tc: *Toolchain, comptime fast_exit: bool) Compil
if (d.verbose_linker_args) {
var stdout_buf: [4096]u8 = undefined;
- var stdout = std.fs.File.stdout().writer(&stdout_buf);
+ var stdout = Io.File.stdout().writer(&stdout_buf);
dumpLinkerArgs(&stdout.interface, argv.items) catch {
return d.fatal("unable to dump linker args: {s}", .{errorDescription(stdout.err.?)});
};
diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig
index 87092cb235..c229dfd831 100644
--- a/lib/compiler/aro/aro/Driver/Filesystem.zig
+++ b/lib/compiler/aro/aro/Driver/Filesystem.zig
@@ -1,8 +1,10 @@
-const std = @import("std");
-const mem = std.mem;
const builtin = @import("builtin");
const is_windows = builtin.os.tag == .windows;
+const std = @import("std");
+const Io = std.Io;
+const mem = std.std.mem;
+
fn readFileFake(entries: []const Filesystem.Entry, path: []const u8, buf: []u8) ?[]const u8 {
@branchHint(.cold);
for (entries) |entry| {
@@ -55,8 +57,8 @@ fn existsFake(entries: []const Filesystem.Entry, path: []const u8) bool {
return false;
}
-fn canExecutePosix(path: []const u8) bool {
- std.posix.access(path, std.posix.X_OK) catch return false;
+fn canExecutePosix(io: Io, path: []const u8) bool {
+ Io.Dir.accessAbsolute(io, path, .{ .execute = true }) catch return false;
// Todo: ensure path is not a directory
return true;
}
@@ -96,7 +98,7 @@ fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]con
}
pub const Filesystem = union(enum) {
- real: std.fs.Dir,
+ real: std.Io.Dir,
fake: []const Entry,
const Entry = struct {
@@ -121,7 +123,7 @@ pub const Filesystem = union(enum) {
base: []const u8,
i: usize = 0,
- fn next(self: *@This()) !?std.fs.Dir.Entry {
+ fn next(self: *@This()) !?std.Io.Dir.Entry {
while (self.i < self.entries.len) {
const entry = self.entries[self.i];
self.i += 1;
@@ -130,7 +132,7 @@ pub const Filesystem = union(enum) {
const remaining = entry.path[self.base.len + 1 ..];
if (std.mem.indexOfScalar(u8, remaining, std.fs.path.sep) != null) continue;
const extension = std.fs.path.extension(remaining);
- const kind: std.fs.Dir.Entry.Kind = if (extension.len == 0) .directory else .file;
+ const kind: std.Io.Dir.Entry.Kind = if (extension.len == 0) .directory else .file;
return .{ .name = remaining, .kind = kind };
}
}
@@ -140,7 +142,7 @@ pub const Filesystem = union(enum) {
};
const Dir = union(enum) {
- dir: std.fs.Dir,
+ dir: std.Io.Dir,
fake: FakeDir,
pub fn iterate(self: Dir) Iterator {
@@ -150,19 +152,19 @@ pub const Filesystem = union(enum) {
};
}
- pub fn close(self: *Dir) void {
+ pub fn close(self: *Dir, io: Io) void {
switch (self.*) {
- .dir => |*d| d.close(),
+ .dir => |*d| d.close(io),
.fake => {},
}
}
};
const Iterator = union(enum) {
- iterator: std.fs.Dir.Iterator,
+ iterator: std.Io.Dir.Iterator,
fake: FakeDir.Iterator,
- pub fn next(self: *Iterator) std.fs.Dir.Iterator.Error!?std.fs.Dir.Entry {
+ pub fn next(self: *Iterator) std.Io.Dir.Iterator.Error!?std.Io.Dir.Entry {
return switch (self.*) {
.iterator => |*it| it.next(),
.fake => |*it| it.next(),
@@ -170,10 +172,10 @@ pub const Filesystem = union(enum) {
}
};
- pub fn exists(fs: Filesystem, path: []const u8) bool {
+ pub fn exists(fs: Filesystem, io: Io, path: []const u8) bool {
switch (fs) {
.real => |cwd| {
- cwd.access(path, .{}) catch return false;
+ cwd.access(io, path, .{}) catch return false;
return true;
},
.fake => |paths| return existsFake(paths, path),
@@ -208,11 +210,11 @@ pub const Filesystem = union(enum) {
/// Read the file at `path` into `buf`.
/// Returns null if any errors are encountered
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
- pub fn readFile(fs: Filesystem, path: []const u8, buf: []u8) ?[]const u8 {
+ pub fn readFile(fs: Filesystem, io: Io, path: []const u8, buf: []u8) ?[]const u8 {
return switch (fs) {
.real => |cwd| {
- const file = cwd.openFile(path, .{}) catch return null;
- defer file.close();
+ const file = cwd.openFile(io, path, .{}) catch return null;
+ defer file.close(io);
const bytes_read = file.readAll(buf) catch return null;
return buf[0..bytes_read];
@@ -221,9 +223,9 @@ pub const Filesystem = union(enum) {
};
}
- pub fn openDir(fs: Filesystem, dir_name: []const u8) std.fs.Dir.OpenError!Dir {
+ pub fn openDir(fs: Filesystem, io: Io, dir_name: []const u8) std.Io.Dir.OpenError!Dir {
return switch (fs) {
- .real => |cwd| .{ .dir = try cwd.openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) },
+ .real => |cwd| .{ .dir = try cwd.openDir(io, dir_name, .{ .access_sub_paths = false, .iterate = true }) },
.fake => |entries| .{ .fake = .{ .entries = entries, .path = dir_name } },
};
}
diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig
index 4a89e0d460..fc21ee4d0b 100644
--- a/lib/compiler/aro/aro/Parser.zig
+++ b/lib/compiler/aro/aro/Parser.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
@@ -211,7 +212,7 @@ fn checkIdentifierCodepointWarnings(p: *Parser, codepoint: u21, loc: Source.Loca
const prev_total = p.diagnostics.total;
var sf = std.heap.stackFallback(1024, p.comp.gpa);
- var allocating: std.Io.Writer.Allocating = .init(sf.get());
+ var allocating: Io.Writer.Allocating = .init(sf.get());
defer allocating.deinit();
if (!char_info.isC99IdChar(codepoint)) {
@@ -425,7 +426,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype)
if (p.diagnostics.effectiveKind(diagnostic) == .off) return;
var sf = std.heap.stackFallback(1024, p.comp.gpa);
- var allocating: std.Io.Writer.Allocating = .init(sf.get());
+ var allocating: Io.Writer.Allocating = .init(sf.get());
defer allocating.deinit();
p.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory;
@@ -447,7 +448,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype)
}, p.pp.expansionSlice(tok_i), true);
}
-fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !void {
+fn formatArgs(p: *Parser, w: *Io.Writer, fmt: []const u8, args: anytype) !void {
var i: usize = 0;
inline for (std.meta.fields(@TypeOf(args))) |arg_info| {
const arg = @field(args, arg_info.name);
@@ -476,13 +477,13 @@ fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !vo
try w.writeAll(fmt[i..]);
}
-fn formatTokenId(w: *std.Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize {
+fn formatTokenId(w: *Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize {
const i = Diagnostics.templateIndex(w, fmt, "{tok_id}");
try w.writeAll(tok_id.symbol());
return i;
}
-fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType) !usize {
+fn formatQualType(p: *Parser, w: *Io.Writer, fmt: []const u8, qt: QualType) !usize {
const i = Diagnostics.templateIndex(w, fmt, "{qt}");
try w.writeByte('\'');
try qt.print(p.comp, w);
@@ -501,7 +502,7 @@ fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType)
return i;
}
-fn formatResult(p: *Parser, w: *std.Io.Writer, fmt: []const u8, res: Result) !usize {
+fn formatResult(p: *Parser, w: *Io.Writer, fmt: []const u8, res: Result) !usize {
const i = Diagnostics.templateIndex(w, fmt, "{value}");
switch (res.val.opt_ref) {
.none => try w.writeAll("(none)"),
@@ -524,7 +525,7 @@ const Normalized = struct {
return .{ .str = str };
}
- pub fn format(ctx: Normalized, w: *std.Io.Writer, fmt: []const u8) !usize {
+ pub fn format(ctx: Normalized, w: *Io.Writer, fmt: []const u8) !usize {
const i = Diagnostics.templateIndex(w, fmt, "{normalized}");
var it: std.unicode.Utf8Iterator = .{
.bytes = ctx.str,
@@ -558,7 +559,7 @@ const Codepoint = struct {
return .{ .codepoint = codepoint };
}
- pub fn format(ctx: Codepoint, w: *std.Io.Writer, fmt: []const u8) !usize {
+ pub fn format(ctx: Codepoint, w: *Io.Writer, fmt: []const u8) !usize {
const i = Diagnostics.templateIndex(w, fmt, "{codepoint}");
try w.print("{X:0>4}", .{ctx.codepoint});
return i;
@@ -572,7 +573,7 @@ const Escaped = struct {
return .{ .str = str };
}
- pub fn format(ctx: Escaped, w: *std.Io.Writer, fmt: []const u8) !usize {
+ pub fn format(ctx: Escaped, w: *Io.Writer, fmt: []const u8) !usize {
const i = Diagnostics.templateIndex(w, fmt, "{s}");
try std.zig.stringEscape(ctx.str, w);
return i;
@@ -1453,7 +1454,7 @@ fn decl(p: *Parser) Error!bool {
return true;
}
-fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *std.Io.Writer.Allocating) !?[]const u8 {
+fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *Io.Writer.Allocating) !?[]const u8 {
const w = &allocating.writer;
const cond = cond_node.get(&p.tree);
@@ -1526,7 +1527,7 @@ fn staticAssert(p: *Parser) Error!bool {
} else {
if (!res.val.toBool(p.comp)) {
var sf = std.heap.stackFallback(1024, gpa);
- var allocating: std.Io.Writer.Allocating = .init(sf.get());
+ var allocating: Io.Writer.Allocating = .init(sf.get());
defer allocating.deinit();
if (p.staticAssertMessage(res_node, str, &allocating) catch return error.OutOfMemory) |message| {
@@ -9719,7 +9720,7 @@ fn primaryExpr(p: *Parser) Error!?Result {
qt = some.qt;
} else if (p.func.qt) |func_qt| {
var sf = std.heap.stackFallback(1024, gpa);
- var allocating: std.Io.Writer.Allocating = .init(sf.get());
+ var allocating: Io.Writer.Allocating = .init(sf.get());
defer allocating.deinit();
func_qt.printNamed(p.tokSlice(p.func.name), p.comp, &allocating.writer) catch return error.OutOfMemory;
@@ -10608,7 +10609,7 @@ test "Node locations" {
const arena = arena_state.allocator();
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
const file = try comp.addSourceFromBuffer("file.c",
diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig
index 6bd1206aff..6e36703df1 100644
--- a/lib/compiler/aro/aro/Preprocessor.zig
+++ b/lib/compiler/aro/aro/Preprocessor.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
@@ -1064,11 +1065,13 @@ fn fatalNotFound(pp: *Preprocessor, tok: TokenWithExpansionLocs, filename: []con
fn verboseLog(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: anytype) void {
@branchHint(.cold);
- const source = pp.comp.getSource(raw.source);
+ const comp = pp.comp;
+ const io = comp.io;
+ const source = comp.getSource(raw.source);
const line_col = source.lineCol(.{ .id = raw.source, .line = raw.line, .byte_offset = raw.start });
var stderr_buf: [4096]u8 = undefined;
- var stderr = std.fs.File.stderr().writer(&stderr_buf);
+ var stderr = Io.File.stderr().writer(io, &stderr_buf);
const w = &stderr.interface;
w.print("{s}:{d}:{d}: ", .{ source.path, line_col.line_no, line_col.col }) catch return;
@@ -3899,7 +3902,7 @@ test "Preserve pragma tokens sometimes" {
defer arena.deinit();
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
try comp.addDefaultPragmaHandlers();
@@ -3966,7 +3969,7 @@ test "destringify" {
var arena: std.heap.ArenaAllocator = .init(gpa);
defer arena.deinit();
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
var pp = Preprocessor.init(&comp, .default);
defer pp.deinit();
@@ -4029,7 +4032,7 @@ test "Include guards" {
const arena = arena_state.allocator();
var diagnostics: Diagnostics = .{ .output = .ignore };
- var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, std.fs.cwd());
+ var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
var pp = Preprocessor.init(&comp, .default);
defer pp.deinit();
diff --git a/lib/compiler/aro/aro/Tokenizer.zig b/lib/compiler/aro/aro/Tokenizer.zig
index c497c5ce82..198d49364a 100644
--- a/lib/compiler/aro/aro/Tokenizer.zig
+++ b/lib/compiler/aro/aro/Tokenizer.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const Compilation = @import("Compilation.zig");
@@ -2326,7 +2327,7 @@ test "Tokenizer fuzz test" {
fn testOne(_: @This(), input_bytes: []const u8) anyerror!void {
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena.deinit();
- var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd());
defer comp.deinit();
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
@@ -2351,7 +2352,7 @@ test "Tokenizer fuzz test" {
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void {
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena.deinit();
- var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd());
defer comp.deinit();
if (langopts) |provided| {
comp.langopts = provided;
diff --git a/lib/compiler/aro/aro/Toolchain.zig b/lib/compiler/aro/aro/Toolchain.zig
index 326278cc38..0aa9d76fc8 100644
--- a/lib/compiler/aro/aro/Toolchain.zig
+++ b/lib/compiler/aro/aro/Toolchain.zig
@@ -497,10 +497,11 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void {
const comp = d.comp;
const gpa = comp.gpa;
const arena = comp.arena;
+ const io = comp.io;
try d.includes.ensureUnusedCapacity(gpa, 1);
if (d.resource_dir) |resource_dir| {
const path = try std.fs.path.join(arena, &.{ resource_dir, "include" });
- comp.cwd.access(path, .{}) catch {
+ comp.cwd.access(io, path, .{}) catch {
return d.fatal("Aro builtin headers not found in provided -resource-dir", .{});
};
d.includes.appendAssumeCapacity(.{ .kind = .system, .path = path });
@@ -508,10 +509,10 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void {
}
var search_path = d.aro_name;
while (std.fs.path.dirname(search_path)) |dirname| : (search_path = dirname) {
- var base_dir = d.comp.cwd.openDir(dirname, .{}) catch continue;
- defer base_dir.close();
+ var base_dir = d.comp.cwd.openDir(io, dirname, .{}) catch continue;
+ defer base_dir.close(io);
- base_dir.access("include/stddef.h", .{}) catch continue;
+ base_dir.access(io, "include/stddef.h", .{}) catch continue;
const path = try std.fs.path.join(arena, &.{ dirname, "include" });
d.includes.appendAssumeCapacity(.{ .kind = .system, .path = path });
break;
@@ -523,12 +524,14 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void {
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
pub fn readFile(tc: *const Toolchain, path: []const u8, buf: []u8) ?[]const u8 {
const comp = tc.driver.comp;
- return comp.cwd.adaptToNewApi().readFile(comp.io, path, buf) catch null;
+ const io = comp.io;
+ return comp.cwd.readFile(io, path, buf) catch null;
}
pub fn exists(tc: *const Toolchain, path: []const u8) bool {
const comp = tc.driver.comp;
- comp.cwd.adaptToNewApi().access(comp.io, path, .{}) catch return false;
+ const io = comp.io;
+ comp.cwd.access(io, path, .{}) catch return false;
return true;
}
@@ -546,7 +549,8 @@ pub fn canExecute(tc: *const Toolchain, path: []const u8) bool {
}
const comp = tc.driver.comp;
- comp.cwd.adaptToNewApi().access(comp.io, path, .{ .execute = true }) catch return false;
+ const io = comp.io;
+ comp.cwd.access(io, path, .{ .execute = true }) catch return false;
// Todo: ensure path is not a directory
return true;
}
diff --git a/lib/compiler/aro/aro/Value.zig b/lib/compiler/aro/aro/Value.zig
index 25a2d1824f..14949ce03b 100644
--- a/lib/compiler/aro/aro/Value.zig
+++ b/lib/compiler/aro/aro/Value.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
@@ -80,7 +81,7 @@ test "minUnsignedBits" {
defer arena_state.deinit();
const arena = arena_state.allocator();
- var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd());
defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query));
@@ -119,7 +120,7 @@ test "minSignedBits" {
defer arena_state.deinit();
const arena = arena_state.allocator();
- var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd());
+ var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd());
defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query));
@@ -1080,7 +1081,7 @@ const NestedPrint = union(enum) {
},
};
-pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
+pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void {
try w.writeByte('&');
try w.writeAll(base);
if (!offset.isZero(comp)) {
@@ -1089,7 +1090,7 @@ pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w
}
}
-pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!?NestedPrint {
+pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!?NestedPrint {
if (qt.is(comp, .bool)) {
try w.writeAll(if (v.isZero(comp)) "false" else "true");
return null;
@@ -1116,7 +1117,7 @@ pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer
return null;
}
-pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
+pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void {
const size: Compilation.CharUnitSize = @enumFromInt(qt.childType(comp).sizeof(comp));
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
try w.writeByte('"');
diff --git a/lib/compiler/aro/backend/Assembly.zig b/lib/compiler/aro/backend/Assembly.zig
index d0d14bdd77..4ec4860b51 100644
--- a/lib/compiler/aro/backend/Assembly.zig
+++ b/lib/compiler/aro/backend/Assembly.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
data: []const u8,
@@ -11,8 +12,8 @@ pub fn deinit(self: *const Assembly, gpa: Allocator) void {
gpa.free(self.text);
}
-pub fn writeToFile(self: Assembly, file: std.fs.File) !void {
- var file_writer = file.writer(&.{});
+pub fn writeToFile(self: Assembly, io: Io, file: Io.File) !void {
+ var file_writer = file.writer(io, &.{});
var buffers = [_][]const u8{ self.data, self.text };
try file_writer.interface.writeSplatAll(&buffers, 1);
diff --git a/lib/compiler/aro/main.zig b/lib/compiler/aro/main.zig
index d3655e43da..ca079c0e5f 100644
--- a/lib/compiler/aro/main.zig
+++ b/lib/compiler/aro/main.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = mem.Allocator;
const mem = std.mem;
const process = std.process;
@@ -30,7 +31,7 @@ pub fn main() u8 {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -42,7 +43,7 @@ pub fn main() u8 {
return 1;
};
- const aro_name = std.fs.selfExePathAlloc(gpa) catch {
+ const aro_name = process.executablePathAlloc(io, gpa) catch {
std.debug.print("unable to find Aro executable path\n", .{});
if (fast_exit) process.exit(1);
return 1;
@@ -50,7 +51,7 @@ pub fn main() u8 {
defer gpa.free(aro_name);
var stderr_buf: [1024]u8 = undefined;
- var stderr = std.fs.File.stderr().writer(&stderr_buf);
+ var stderr = Io.File.stderr().writer(&stderr_buf);
var diagnostics: Diagnostics = .{
.output = .{ .to_writer = .{
.color = .detect(stderr.file),
@@ -58,7 +59,7 @@ pub fn main() u8 {
} },
};
- var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |er| switch (er) {
+ var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |er| switch (er) {
error.OutOfMemory => {
std.debug.print("out of memory\n", .{});
if (fast_exit) process.exit(1);
diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig
index eed48e79ad..135cf2dced 100644
--- a/lib/compiler/build_runner.zig
+++ b/lib/compiler/build_runner.zig
@@ -7,14 +7,13 @@ const assert = std.debug.assert;
const fmt = std.fmt;
const mem = std.mem;
const process = std.process;
-const File = std.fs.File;
+const File = std.Io.File;
const Step = std.Build.Step;
const Watch = std.Build.Watch;
const WebServer = std.Build.WebServer;
const Allocator = std.mem.Allocator;
const fatal = std.process.fatal;
const Writer = std.Io.Writer;
-const tty = std.Io.tty;
pub const root = @import("@build");
pub const dependencies = @import("@dependencies");
@@ -40,7 +39,7 @@ pub fn main() !void {
const args = try process.argsAlloc(arena);
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -53,24 +52,26 @@ pub fn main() !void {
const cache_root = nextArg(args, &arg_idx) orelse fatal("missing cache root directory path", .{});
const global_cache_root = nextArg(args, &arg_idx) orelse fatal("missing global cache root directory path", .{});
+ const cwd: Io.Dir = .cwd();
+
const zig_lib_directory: std.Build.Cache.Directory = .{
.path = zig_lib_dir,
- .handle = try std.fs.cwd().openDir(zig_lib_dir, .{}),
+ .handle = try cwd.openDir(io, zig_lib_dir, .{}),
};
const build_root_directory: std.Build.Cache.Directory = .{
.path = build_root,
- .handle = try std.fs.cwd().openDir(build_root, .{}),
+ .handle = try cwd.openDir(io, build_root, .{}),
};
const local_cache_directory: std.Build.Cache.Directory = .{
.path = cache_root,
- .handle = try std.fs.cwd().makeOpenPath(cache_root, .{}),
+ .handle = try cwd.createDirPathOpen(io, cache_root, .{}),
};
const global_cache_directory: std.Build.Cache.Directory = .{
.path = global_cache_root,
- .handle = try std.fs.cwd().makeOpenPath(global_cache_root, .{}),
+ .handle = try cwd.createDirPathOpen(io, global_cache_root, .{}),
};
var graph: std.Build.Graph = .{
@@ -79,7 +80,7 @@ pub fn main() !void {
.cache = .{
.io = io,
.gpa = arena,
- .manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try local_cache_directory.handle.createDirPathOpen(io, "h", .{}),
},
.zig_exe = zig_exe,
.env_map = try process.getEnvMap(arena),
@@ -92,7 +93,7 @@ pub fn main() !void {
.time_report = false,
};
- graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
+ graph.cache.addPrefix(.{ .path = null, .handle = cwd });
graph.cache.addPrefix(build_root_directory);
graph.cache.addPrefix(local_cache_directory);
graph.cache.addPrefix(global_cache_directory);
@@ -285,8 +286,8 @@ pub fn main() !void {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected u16 after '{s}'", .{arg});
debounce_interval_ms = std.fmt.parseUnsigned(u16, next_arg, 0) catch |err| {
- fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {s}\n", .{
- next_arg, @errorName(err),
+ fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {t}\n", .{
+ next_arg, err,
});
};
} else if (mem.eql(u8, arg, "--webui")) {
@@ -428,14 +429,21 @@ pub fn main() !void {
}
}
+ const NO_COLOR = std.zig.EnvVar.NO_COLOR.isSet();
+ const CLICOLOR_FORCE = std.zig.EnvVar.CLICOLOR_FORCE.isSet();
+
+ graph.stderr_mode = switch (color) {
+ .auto => try .detect(io, .stderr(), NO_COLOR, CLICOLOR_FORCE),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+
if (webui_listen != null) {
if (watch) fatal("using '--webui' and '--watch' together is not yet supported; consider omitting '--watch' in favour of the web UI \"Rebuild\" button", .{});
if (builtin.single_threaded) fatal("'--webui' is not yet supported on single-threaded hosts", .{});
}
- const ttyconf = color.detectTtyConf();
-
- const main_progress_node = std.Progress.start(.{
+ const main_progress_node = std.Progress.start(io, .{
.disable_printing = (color == .off),
});
defer main_progress_node.end();
@@ -457,7 +465,7 @@ pub fn main() !void {
}
const s = std.fs.path.sep_str;
const tmp_sub_path = "tmp" ++ s ++ (output_tmp_nonce orelse fatal("missing -Z arg", .{}));
- local_cache_directory.handle.writeFile(.{
+ local_cache_directory.handle.writeFile(io, .{
.sub_path = tmp_sub_path,
.data = buffer.items,
.flags = .{ .exclusive = true },
@@ -476,14 +484,14 @@ pub fn main() !void {
validateSystemLibraryOptions(builder);
if (help_menu) {
- var w = initStdoutWriter();
+ var w = initStdoutWriter(io);
printUsage(builder, w) catch return stdout_writer_allocation.err.?;
w.flush() catch return stdout_writer_allocation.err.?;
return;
}
if (steps_menu) {
- var w = initStdoutWriter();
+ var w = initStdoutWriter(io);
printSteps(builder, w) catch return stdout_writer_allocation.err.?;
w.flush() catch return stdout_writer_allocation.err.?;
return;
@@ -507,8 +515,6 @@ pub fn main() !void {
.error_style = error_style,
.multiline_errors = multiline_errors,
.summary = summary orelse if (watch or webui_listen != null) .line else .failures,
-
- .ttyconf = ttyconf,
};
defer {
run.memory_blocked_steps.deinit(gpa);
@@ -522,10 +528,10 @@ pub fn main() !void {
prepare(arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
error.DependencyLoopDetected => {
- // Perhaps in the future there could be an Advanced Options flag such as
- // --debug-build-runner-leaks which would make this code return instead of
- // calling exit.
- std.debug.lockStdErr();
+ // Perhaps in the future there could be an Advanced Options flag
+ // such as --debug-build-runner-leaks which would make this code
+ // return instead of calling exit.
+ _ = io.lockStderr(&.{}, graph.stderr_mode) catch {};
process.exit(1);
},
else => |e| return e,
@@ -543,7 +549,6 @@ pub fn main() !void {
if (builtin.single_threaded) unreachable; // `fatal` above
break :ws .init(.{
.gpa = gpa,
- .ttyconf = ttyconf,
.graph = &graph,
.all_steps = run.step_stack.keys(),
.root_prog_node = main_progress_node,
@@ -558,9 +563,9 @@ pub fn main() !void {
}
rebuild: while (true) : (if (run.error_style.clearOnUpdate()) {
- const bw, _ = std.debug.lockStderrWriter(&stdio_buffer_allocation);
- defer std.debug.unlockStderrWriter();
- try bw.writeAll("\x1B[2J\x1B[3J\x1B[H");
+ const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode);
+ defer io.unlockStderr();
+ try stderr.file_writer.interface.writeAll("\x1B[2J\x1B[3J\x1B[H");
}) {
if (run.web_server) |*ws| ws.startBuild();
@@ -661,9 +666,6 @@ const Run = struct {
memory_blocked_steps: std.ArrayList(*Step),
/// Allocated into `gpa`.
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
- /// Similar to the `tty.Config` returned by `std.debug.lockStderrWriter`,
- /// but also respects the '--color' flag.
- ttyconf: tty.Config,
claimed_rss: usize,
error_style: ErrorStyle,
@@ -737,7 +739,8 @@ fn runStepNames(
fuzz: ?std.Build.Fuzz.Mode,
) !void {
const gpa = run.gpa;
- const io = b.graph.io;
+ const graph = b.graph;
+ const io = graph.io;
const step_stack = &run.step_stack;
{
@@ -822,7 +825,7 @@ fn runStepNames(
}
if (@bitSizeOf(usize) != 64) {
// Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
- // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
+ // being compatible with file system's u64 return value. This is not the case
// on 32-bit platforms.
// Affects or affected by issues #5185, #22523, and #22464.
fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
@@ -837,7 +840,6 @@ fn runStepNames(
var f = std.Build.Fuzz.init(
gpa,
io,
- run.ttyconf,
step_stack.keys(),
parent_prog_node,
mode,
@@ -864,18 +866,19 @@ fn runStepNames(
.none => break :summary,
}
- const w, _ = std.debug.lockStderrWriter(&stdio_buffer_allocation);
- defer std.debug.unlockStderrWriter();
- const ttyconf = run.ttyconf;
+ const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode);
+ defer io.unlockStderr();
+ const t = stderr.terminal();
+ const w = &stderr.file_writer.interface;
const total_count = success_count + failure_count + pending_count + skipped_count;
- ttyconf.setColor(w, .cyan) catch {};
- ttyconf.setColor(w, .bold) catch {};
+ t.setColor(.cyan) catch {};
+ t.setColor(.bold) catch {};
w.writeAll("Build Summary: ") catch {};
- ttyconf.setColor(w, .reset) catch {};
+ t.setColor(.reset) catch {};
w.print("{d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
{
- ttyconf.setColor(w, .dim) catch {};
+ t.setColor(.dim) catch {};
var first = true;
if (skipped_count > 0) {
w.print("{s}{d} skipped", .{ if (first) " (" else ", ", skipped_count }) catch {};
@@ -886,12 +889,12 @@ fn runStepNames(
first = false;
}
if (!first) w.writeByte(')') catch {};
- ttyconf.setColor(w, .reset) catch {};
+ t.setColor(.reset) catch {};
}
if (test_count > 0) {
w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
- ttyconf.setColor(w, .dim) catch {};
+ t.setColor(.dim) catch {};
var first = true;
if (test_skip_count > 0) {
w.print("{s}{d} skipped", .{ if (first) " (" else ", ", test_skip_count }) catch {};
@@ -910,7 +913,7 @@ fn runStepNames(
first = false;
}
if (!first) w.writeByte(')') catch {};
- ttyconf.setColor(w, .reset) catch {};
+ t.setColor(.reset) catch {};
}
w.writeAll("\n") catch {};
@@ -924,7 +927,7 @@ fn runStepNames(
var print_node: PrintNode = .{ .parent = null };
if (step_names.len == 0) {
print_node.last = true;
- printTreeStep(b, b.default_step, run, w, ttyconf, &print_node, &step_stack_copy) catch {};
+ printTreeStep(b, b.default_step, run, t, &print_node, &step_stack_copy) catch {};
} else {
const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: {
var i: usize = step_names.len;
@@ -943,7 +946,7 @@ fn runStepNames(
for (step_names, 0..) |step_name, i| {
const tls = b.top_level_steps.get(step_name).?;
print_node.last = i + 1 == last_index;
- printTreeStep(b, &tls.step, run, w, ttyconf, &print_node, &step_stack_copy) catch {};
+ printTreeStep(b, &tls.step, run, t, &print_node, &step_stack_copy) catch {};
}
}
w.writeByte('\n') catch {};
@@ -960,7 +963,7 @@ fn runStepNames(
if (run.error_style.verboseContext()) break :code 1; // failure; print build command
break :code 2; // failure; do not print build command
};
- std.debug.lockStdErr();
+ _ = io.lockStderr(&.{}, graph.stderr_mode) catch {};
process.exit(code);
}
@@ -969,33 +972,30 @@ const PrintNode = struct {
last: bool = false,
};
-fn printPrefix(node: *PrintNode, stderr: *Writer, ttyconf: tty.Config) !void {
+fn printPrefix(node: *PrintNode, stderr: Io.Terminal) !void {
const parent = node.parent orelse return;
+ const writer = stderr.writer;
if (parent.parent == null) return;
- try printPrefix(parent, stderr, ttyconf);
+ try printPrefix(parent, stderr);
if (parent.last) {
- try stderr.writeAll(" ");
+ try writer.writeAll(" ");
} else {
- try stderr.writeAll(switch (ttyconf) {
- .no_color, .windows_api => "| ",
+ try writer.writeAll(switch (stderr.mode) {
.escape_codes => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │
+ else => "| ",
});
}
}
-fn printChildNodePrefix(stderr: *Writer, ttyconf: tty.Config) !void {
- try stderr.writeAll(switch (ttyconf) {
- .no_color, .windows_api => "+- ",
+fn printChildNodePrefix(stderr: Io.Terminal) !void {
+ try stderr.writer.writeAll(switch (stderr.mode) {
.escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─
+ else => "+- ",
});
}
-fn printStepStatus(
- s: *Step,
- stderr: *Writer,
- ttyconf: tty.Config,
- run: *const Run,
-) !void {
+fn printStepStatus(s: *Step, stderr: Io.Terminal, run: *const Run) !void {
+ const writer = stderr.writer;
switch (s.state) {
.precheck_unstarted => unreachable,
.precheck_started => unreachable,
@@ -1003,139 +1003,135 @@ fn printStepStatus(
.running => unreachable,
.dependency_failure => {
- try ttyconf.setColor(stderr, .dim);
- try stderr.writeAll(" transitive failure\n");
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.dim);
+ try writer.writeAll(" transitive failure\n");
+ try stderr.setColor(.reset);
},
.success => {
- try ttyconf.setColor(stderr, .green);
+ try stderr.setColor(.green);
if (s.result_cached) {
- try stderr.writeAll(" cached");
+ try writer.writeAll(" cached");
} else if (s.test_results.test_count > 0) {
const pass_count = s.test_results.passCount();
assert(s.test_results.test_count == pass_count + s.test_results.skip_count);
- try stderr.print(" {d} pass", .{pass_count});
+ try writer.print(" {d} pass", .{pass_count});
if (s.test_results.skip_count > 0) {
- try ttyconf.setColor(stderr, .reset);
- try stderr.writeAll(", ");
- try ttyconf.setColor(stderr, .yellow);
- try stderr.print("{d} skip", .{s.test_results.skip_count});
+ try stderr.setColor(.reset);
+ try writer.writeAll(", ");
+ try stderr.setColor(.yellow);
+ try writer.print("{d} skip", .{s.test_results.skip_count});
}
- try ttyconf.setColor(stderr, .reset);
- try stderr.print(" ({d} total)", .{s.test_results.test_count});
+ try stderr.setColor(.reset);
+ try writer.print(" ({d} total)", .{s.test_results.test_count});
} else {
- try stderr.writeAll(" success");
+ try writer.writeAll(" success");
}
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.reset);
if (s.result_duration_ns) |ns| {
- try ttyconf.setColor(stderr, .dim);
+ try stderr.setColor(.dim);
if (ns >= std.time.ns_per_min) {
- try stderr.print(" {d}m", .{ns / std.time.ns_per_min});
+ try writer.print(" {d}m", .{ns / std.time.ns_per_min});
} else if (ns >= std.time.ns_per_s) {
- try stderr.print(" {d}s", .{ns / std.time.ns_per_s});
+ try writer.print(" {d}s", .{ns / std.time.ns_per_s});
} else if (ns >= std.time.ns_per_ms) {
- try stderr.print(" {d}ms", .{ns / std.time.ns_per_ms});
+ try writer.print(" {d}ms", .{ns / std.time.ns_per_ms});
} else if (ns >= std.time.ns_per_us) {
- try stderr.print(" {d}us", .{ns / std.time.ns_per_us});
+ try writer.print(" {d}us", .{ns / std.time.ns_per_us});
} else {
- try stderr.print(" {d}ns", .{ns});
+ try writer.print(" {d}ns", .{ns});
}
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.reset);
}
if (s.result_peak_rss != 0) {
const rss = s.result_peak_rss;
- try ttyconf.setColor(stderr, .dim);
+ try stderr.setColor(.dim);
if (rss >= 1000_000_000) {
- try stderr.print(" MaxRSS:{d}G", .{rss / 1000_000_000});
+ try writer.print(" MaxRSS:{d}G", .{rss / 1000_000_000});
} else if (rss >= 1000_000) {
- try stderr.print(" MaxRSS:{d}M", .{rss / 1000_000});
+ try writer.print(" MaxRSS:{d}M", .{rss / 1000_000});
} else if (rss >= 1000) {
- try stderr.print(" MaxRSS:{d}K", .{rss / 1000});
+ try writer.print(" MaxRSS:{d}K", .{rss / 1000});
} else {
- try stderr.print(" MaxRSS:{d}B", .{rss});
+ try writer.print(" MaxRSS:{d}B", .{rss});
}
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.reset);
}
- try stderr.writeAll("\n");
+ try writer.writeAll("\n");
},
.skipped, .skipped_oom => |skip| {
- try ttyconf.setColor(stderr, .yellow);
- try stderr.writeAll(" skipped");
+ try stderr.setColor(.yellow);
+ try writer.writeAll(" skipped");
if (skip == .skipped_oom) {
- try stderr.writeAll(" (not enough memory)");
- try ttyconf.setColor(stderr, .dim);
- try stderr.print(" upper bound of {d} exceeded runner limit ({d})", .{ s.max_rss, run.max_rss });
- try ttyconf.setColor(stderr, .yellow);
+ try writer.writeAll(" (not enough memory)");
+ try stderr.setColor(.dim);
+ try writer.print(" upper bound of {d} exceeded runner limit ({d})", .{ s.max_rss, run.max_rss });
+ try stderr.setColor(.yellow);
}
- try stderr.writeAll("\n");
- try ttyconf.setColor(stderr, .reset);
+ try writer.writeAll("\n");
+ try stderr.setColor(.reset);
},
.failure => {
- try printStepFailure(s, stderr, ttyconf, false);
- try ttyconf.setColor(stderr, .reset);
+ try printStepFailure(s, stderr, false);
+ try stderr.setColor(.reset);
},
}
}
-fn printStepFailure(
- s: *Step,
- stderr: *Writer,
- ttyconf: tty.Config,
- dim: bool,
-) !void {
+fn printStepFailure(s: *Step, stderr: Io.Terminal, dim: bool) !void {
+ const w = stderr.writer;
if (s.result_error_bundle.errorMessageCount() > 0) {
- try ttyconf.setColor(stderr, .red);
- try stderr.print(" {d} errors\n", .{
+ try stderr.setColor(.red);
+ try w.print(" {d} errors\n", .{
s.result_error_bundle.errorMessageCount(),
});
} else if (!s.test_results.isSuccess()) {
// These first values include all of the test "statuses". Every test is either passsed,
// skipped, failed, crashed, or timed out.
- try ttyconf.setColor(stderr, .green);
- try stderr.print(" {d} pass", .{s.test_results.passCount()});
- try ttyconf.setColor(stderr, .reset);
- if (dim) try ttyconf.setColor(stderr, .dim);
+ try stderr.setColor(.green);
+ try w.print(" {d} pass", .{s.test_results.passCount()});
+ try stderr.setColor(.reset);
+ if (dim) try stderr.setColor(.dim);
if (s.test_results.skip_count > 0) {
- try stderr.writeAll(", ");
- try ttyconf.setColor(stderr, .yellow);
- try stderr.print("{d} skip", .{s.test_results.skip_count});
- try ttyconf.setColor(stderr, .reset);
- if (dim) try ttyconf.setColor(stderr, .dim);
+ try w.writeAll(", ");
+ try stderr.setColor(.yellow);
+ try w.print("{d} skip", .{s.test_results.skip_count});
+ try stderr.setColor(.reset);
+ if (dim) try stderr.setColor(.dim);
}
if (s.test_results.fail_count > 0) {
- try stderr.writeAll(", ");
- try ttyconf.setColor(stderr, .red);
- try stderr.print("{d} fail", .{s.test_results.fail_count});
- try ttyconf.setColor(stderr, .reset);
- if (dim) try ttyconf.setColor(stderr, .dim);
+ try w.writeAll(", ");
+ try stderr.setColor(.red);
+ try w.print("{d} fail", .{s.test_results.fail_count});
+ try stderr.setColor(.reset);
+ if (dim) try stderr.setColor(.dim);
}
if (s.test_results.crash_count > 0) {
- try stderr.writeAll(", ");
- try ttyconf.setColor(stderr, .red);
- try stderr.print("{d} crash", .{s.test_results.crash_count});
- try ttyconf.setColor(stderr, .reset);
- if (dim) try ttyconf.setColor(stderr, .dim);
+ try w.writeAll(", ");
+ try stderr.setColor(.red);
+ try w.print("{d} crash", .{s.test_results.crash_count});
+ try stderr.setColor(.reset);
+ if (dim) try stderr.setColor(.dim);
}
if (s.test_results.timeout_count > 0) {
- try stderr.writeAll(", ");
- try ttyconf.setColor(stderr, .red);
- try stderr.print("{d} timeout", .{s.test_results.timeout_count});
- try ttyconf.setColor(stderr, .reset);
- if (dim) try ttyconf.setColor(stderr, .dim);
+ try w.writeAll(", ");
+ try stderr.setColor(.red);
+ try w.print("{d} timeout", .{s.test_results.timeout_count});
+ try stderr.setColor(.reset);
+ if (dim) try stderr.setColor(.dim);
}
- try stderr.print(" ({d} total)", .{s.test_results.test_count});
+ try w.print(" ({d} total)", .{s.test_results.test_count});
// Memory leaks are intentionally written after the total, because is isn't a test *status*,
// but just a flag that any tests -- even passed ones -- can have. We also use a different
// separator, so it looks like:
// 2 pass, 1 skip, 2 fail (5 total); 2 leaks
if (s.test_results.leak_count > 0) {
- try stderr.writeAll("; ");
- try ttyconf.setColor(stderr, .red);
- try stderr.print("{d} leaks", .{s.test_results.leak_count});
- try ttyconf.setColor(stderr, .reset);
- if (dim) try ttyconf.setColor(stderr, .dim);
+ try w.writeAll("; ");
+ try stderr.setColor(.red);
+ try w.print("{d} leaks", .{s.test_results.leak_count});
+ try stderr.setColor(.reset);
+ if (dim) try stderr.setColor(.dim);
}
// It's usually not helpful to know how many error logs there were because they tend to
@@ -1148,21 +1144,21 @@ fn printStepFailure(
break :show alt_results.isSuccess();
};
if (show_err_logs) {
- try stderr.writeAll("; ");
- try ttyconf.setColor(stderr, .red);
- try stderr.print("{d} error logs", .{s.test_results.log_err_count});
- try ttyconf.setColor(stderr, .reset);
- if (dim) try ttyconf.setColor(stderr, .dim);
+ try w.writeAll("; ");
+ try stderr.setColor(.red);
+ try w.print("{d} error logs", .{s.test_results.log_err_count});
+ try stderr.setColor(.reset);
+ if (dim) try stderr.setColor(.dim);
}
- try stderr.writeAll("\n");
+ try w.writeAll("\n");
} else if (s.result_error_msgs.items.len > 0) {
- try ttyconf.setColor(stderr, .red);
- try stderr.writeAll(" failure\n");
+ try stderr.setColor(.red);
+ try w.writeAll(" failure\n");
} else {
assert(s.result_stderr.len > 0);
- try ttyconf.setColor(stderr, .red);
- try stderr.writeAll(" stderr\n");
+ try stderr.setColor(.red);
+ try w.writeAll(" w\n");
}
}
@@ -1170,11 +1166,11 @@ fn printTreeStep(
b: *std.Build,
s: *Step,
run: *const Run,
- stderr: *Writer,
- ttyconf: tty.Config,
+ stderr: Io.Terminal,
parent_node: *PrintNode,
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
) !void {
+ const writer = stderr.writer;
const first = step_stack.swapRemove(s);
const summary = run.summary;
const skip = switch (summary) {
@@ -1184,26 +1180,26 @@ fn printTreeStep(
.failures => s.state == .success,
};
if (skip) return;
- try printPrefix(parent_node, stderr, ttyconf);
+ try printPrefix(parent_node, stderr);
if (parent_node.parent != null) {
if (parent_node.last) {
- try printChildNodePrefix(stderr, ttyconf);
+ try printChildNodePrefix(stderr);
} else {
- try stderr.writeAll(switch (ttyconf) {
- .no_color, .windows_api => "+- ",
+ try writer.writeAll(switch (stderr.mode) {
.escape_codes => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─
+ else => "+- ",
});
}
}
- if (!first) try ttyconf.setColor(stderr, .dim);
+ if (!first) try stderr.setColor(.dim);
// dep_prefix omitted here because it is redundant with the tree.
- try stderr.writeAll(s.name);
+ try writer.writeAll(s.name);
if (first) {
- try printStepStatus(s, stderr, ttyconf, run);
+ try printStepStatus(s, stderr, run);
const last_index = if (summary == .all) s.dependencies.items.len -| 1 else blk: {
var i: usize = s.dependencies.items.len;
@@ -1225,17 +1221,17 @@ fn printTreeStep(
.parent = parent_node,
.last = i == last_index,
};
- try printTreeStep(b, dep, run, stderr, ttyconf, &print_node, step_stack);
+ try printTreeStep(b, dep, run, stderr, &print_node, step_stack);
}
} else {
if (s.dependencies.items.len == 0) {
- try stderr.writeAll(" (reused)\n");
+ try writer.writeAll(" (reused)\n");
} else {
- try stderr.print(" (+{d} more reused dependencies)\n", .{
+ try writer.print(" (+{d} more reused dependencies)\n", .{
s.dependencies.items.len,
});
}
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.reset);
}
}
@@ -1306,7 +1302,8 @@ fn workerMakeOneStep(
prog_node: std.Progress.Node,
run: *Run,
) void {
- const io = b.graph.io;
+ const graph = b.graph;
+ const io = graph.io;
const gpa = run.gpa;
// First, check the conditions for running this step. If they are not met,
@@ -1366,7 +1363,6 @@ fn workerMakeOneStep(
.progress_node = sub_prog_node,
.watch = run.watch,
.web_server = if (run.web_server) |*ws| ws else null,
- .ttyconf = run.ttyconf,
.unit_test_timeout_ns = run.unit_test_timeout_ns,
.gpa = gpa,
});
@@ -1376,10 +1372,11 @@ fn workerMakeOneStep(
const show_error_msgs = s.result_error_msgs.items.len > 0;
const show_stderr = s.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
- const bw, _ = std.debug.lockStderrWriter(&stdio_buffer_allocation);
- defer std.debug.unlockStderrWriter();
- const ttyconf = run.ttyconf;
- printErrorMessages(gpa, s, .{}, bw, ttyconf, run.error_style, run.multiline_errors) catch {};
+ const stderr = io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode) catch |err| switch (err) {
+ error.Canceled => return,
+ };
+ defer io.unlockStderr();
+ printErrorMessages(gpa, s, .{}, stderr.terminal(), run.error_style, run.multiline_errors) catch {};
}
handle_result: {
@@ -1446,11 +1443,11 @@ pub fn printErrorMessages(
gpa: Allocator,
failing_step: *Step,
options: std.zig.ErrorBundle.RenderOptions,
- stderr: *Writer,
- ttyconf: tty.Config,
+ stderr: Io.Terminal,
error_style: ErrorStyle,
multiline_errors: MultilineErrors,
) !void {
+ const writer = stderr.writer;
if (error_style.verboseContext()) {
// Provide context for where these error messages are coming from by
// printing the corresponding Step subtree.
@@ -1462,70 +1459,70 @@ pub fn printErrorMessages(
}
// Now, `step_stack` has the subtree that we want to print, in reverse order.
- try ttyconf.setColor(stderr, .dim);
+ try stderr.setColor(.dim);
var indent: usize = 0;
while (step_stack.pop()) |s| : (indent += 1) {
if (indent > 0) {
- try stderr.splatByteAll(' ', (indent - 1) * 3);
- try printChildNodePrefix(stderr, ttyconf);
+ try writer.splatByteAll(' ', (indent - 1) * 3);
+ try printChildNodePrefix(stderr);
}
- try stderr.writeAll(s.name);
+ try writer.writeAll(s.name);
if (s == failing_step) {
- try printStepFailure(s, stderr, ttyconf, true);
+ try printStepFailure(s, stderr, true);
} else {
- try stderr.writeAll("\n");
+ try writer.writeAll("\n");
}
}
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.reset);
} else {
// Just print the failing step itself.
- try ttyconf.setColor(stderr, .dim);
- try stderr.writeAll(failing_step.name);
- try printStepFailure(failing_step, stderr, ttyconf, true);
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.dim);
+ try writer.writeAll(failing_step.name);
+ try printStepFailure(failing_step, stderr, true);
+ try stderr.setColor(.reset);
}
if (failing_step.result_stderr.len > 0) {
- try stderr.writeAll(failing_step.result_stderr);
+ try writer.writeAll(failing_step.result_stderr);
if (!mem.endsWith(u8, failing_step.result_stderr, "\n")) {
- try stderr.writeAll("\n");
+ try writer.writeAll("\n");
}
}
- try failing_step.result_error_bundle.renderToWriter(options, stderr, ttyconf);
+ try failing_step.result_error_bundle.renderToTerminal(options, stderr);
for (failing_step.result_error_msgs.items) |msg| {
- try ttyconf.setColor(stderr, .red);
- try stderr.writeAll("error:");
- try ttyconf.setColor(stderr, .reset);
+ try stderr.setColor(.red);
+ try writer.writeAll("error:");
+ try stderr.setColor(.reset);
if (std.mem.indexOfScalar(u8, msg, '\n') == null) {
- try stderr.print(" {s}\n", .{msg});
+ try writer.print(" {s}\n", .{msg});
} else switch (multiline_errors) {
.indent => {
var it = std.mem.splitScalar(u8, msg, '\n');
- try stderr.print(" {s}\n", .{it.first()});
+ try writer.print(" {s}\n", .{it.first()});
while (it.next()) |line| {
- try stderr.print(" {s}\n", .{line});
+ try writer.print(" {s}\n", .{line});
}
},
- .newline => try stderr.print("\n{s}\n", .{msg}),
- .none => try stderr.print(" {s}\n", .{msg}),
+ .newline => try writer.print("\n{s}\n", .{msg}),
+ .none => try writer.print(" {s}\n", .{msg}),
}
}
if (error_style.verboseContext()) {
if (failing_step.result_failed_command) |cmd_str| {
- try ttyconf.setColor(stderr, .red);
- try stderr.writeAll("failed command: ");
- try ttyconf.setColor(stderr, .reset);
- try stderr.writeAll(cmd_str);
- try stderr.writeByte('\n');
+ try stderr.setColor(.red);
+ try writer.writeAll("failed command: ");
+ try stderr.setColor(.reset);
+ try writer.writeAll(cmd_str);
+ try writer.writeByte('\n');
}
}
- try stderr.writeByte('\n');
+ try writer.writeByte('\n');
}
fn printSteps(builder: *std.Build, w: *Writer) !void {
@@ -1843,9 +1840,9 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void {
}
var stdio_buffer_allocation: [256]u8 = undefined;
-var stdout_writer_allocation: std.fs.File.Writer = undefined;
+var stdout_writer_allocation: Io.File.Writer = undefined;
-fn initStdoutWriter() *Writer {
- stdout_writer_allocation = std.fs.File.stdout().writerStreaming(&stdio_buffer_allocation);
+fn initStdoutWriter(io: Io) *Writer {
+ stdout_writer_allocation = Io.File.stdout().writerStreaming(io, &stdio_buffer_allocation);
return &stdout_writer_allocation.interface;
}
diff --git a/lib/compiler/libc.zig b/lib/compiler/libc.zig
index a18a7a0e06..eb4614c95d 100644
--- a/lib/compiler/libc.zig
+++ b/lib/compiler/libc.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const LibCInstallation = std.zig.LibCInstallation;
@@ -29,7 +30,7 @@ pub fn main() !void {
const arena = arena_instance.allocator();
const gpa = arena;
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -39,7 +40,7 @@ pub fn main() !void {
var input_file: ?[]const u8 = null;
var target_arch_os_abi: []const u8 = "native";
var print_includes: bool = false;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_writer.interface;
{
var i: usize = 2;
@@ -49,7 +50,7 @@ pub fn main() !void {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
try stdout.writeAll(usage_libc);
try stdout.flush();
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
} else if (mem.eql(u8, arg, "-target")) {
if (i + 1 >= args.len) fatal("expected parameter after {s}", .{arg});
i += 1;
@@ -77,7 +78,7 @@ pub fn main() !void {
if (input_file) |libc_file| {
const libc = try arena.create(LibCInstallation);
libc.* = LibCInstallation.parse(arena, libc_file, &target) catch |err| {
- fatal("unable to parse libc file at path {s}: {s}", .{ libc_file, @errorName(err) });
+ fatal("unable to parse libc file at path {s}: {t}", .{ libc_file, err });
};
break :libc libc;
} else {
@@ -96,7 +97,7 @@ pub fn main() !void {
libc_installation,
) catch |err| {
const zig_target = try target.zigTriple(arena);
- fatal("unable to detect libc for target {s}: {s}", .{ zig_target, @errorName(err) });
+ fatal("unable to detect libc for target {s}: {t}", .{ zig_target, err });
};
if (libc_dirs.libc_include_dir_list.len == 0) {
@@ -109,24 +110,23 @@ pub fn main() !void {
try stdout.writeByte('\n');
}
try stdout.flush();
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
}
if (input_file) |libc_file| {
var libc = LibCInstallation.parse(gpa, libc_file, &target) catch |err| {
- fatal("unable to parse libc file at path {s}: {s}", .{ libc_file, @errorName(err) });
+ fatal("unable to parse libc file at path {s}: {t}", .{ libc_file, err });
};
defer libc.deinit(gpa);
} else {
if (!target_query.canDetectLibC()) {
fatal("unable to detect libc for non-native target", .{});
}
- var libc = LibCInstallation.findNative(.{
- .allocator = gpa,
+ var libc = LibCInstallation.findNative(gpa, io, .{
.verbose = true,
.target = &target,
}) catch |err| {
- fatal("unable to detect native libc: {s}", .{@errorName(err)});
+ fatal("unable to detect native libc: {t}", .{err});
};
defer libc.deinit(gpa);
diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig
index 7cf0f14e42..77b60c9b37 100644
--- a/lib/compiler/objcopy.zig
+++ b/lib/compiler/objcopy.zig
@@ -1,12 +1,13 @@
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const fs = std.fs;
const elf = std.elf;
const Allocator = std.mem.Allocator;
-const File = std.fs.File;
+const File = std.Io.File;
const assert = std.debug.assert;
-
const fatal = std.process.fatal;
const Server = std.zig.Server;
@@ -24,11 +25,15 @@ pub fn main() !void {
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
const gpa = general_purpose_allocator.allocator();
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
- return cmdObjCopy(gpa, arena, args[1..]);
+ return cmdObjCopy(arena, io, args[1..]);
}
-fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+fn cmdObjCopy(arena: Allocator, io: Io, args: []const []const u8) !void {
var i: usize = 0;
var opt_out_fmt: ?std.Target.ObjectFormat = null;
var opt_input: ?[]const u8 = null;
@@ -56,7 +61,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
fatal("unexpected positional argument: '{s}'", .{arg});
}
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- return std.fs.File.stdout().writeAll(usage);
+ return Io.File.stdout().writeStreamingAll(io, usage);
} else if (mem.eql(u8, arg, "-O") or mem.eql(u8, arg, "--output-target")) {
i += 1;
if (i >= args.len) fatal("expected another argument after '{s}'", .{arg});
@@ -147,16 +152,12 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
const input = opt_input orelse fatal("expected input parameter", .{});
const output = opt_output orelse fatal("expected output parameter", .{});
- var threaded: std.Io.Threaded = .init(gpa);
- defer threaded.deinit();
- const io = threaded.io();
-
- const input_file = fs.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err });
- defer input_file.close();
+ const input_file = Io.Dir.cwd().openFile(io, input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err });
+ defer input_file.close(io);
- const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err });
+ const stat = input_file.stat(io) catch |err| fatal("failed to stat {s}: {t}", .{ input, err });
- var in: File.Reader = .initSize(input_file.adaptToNewApi(), io, &input_buffer, stat.size);
+ var in: File.Reader = .initSize(input_file, io, &input_buffer, stat.size);
const elf_hdr = std.elf.Header.read(&in.interface) catch |err| switch (err) {
error.ReadFailed => fatal("unable to read {s}: {t}", .{ input, in.err.? }),
@@ -177,12 +178,12 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
}
};
- const mode = if (out_fmt != .elf or only_keep_debug) fs.File.default_mode else stat.mode;
+ const permissions: Io.File.Permissions = if (out_fmt != .elf or only_keep_debug) .default_file else stat.permissions;
- var output_file = try fs.cwd().createFile(output, .{ .mode = mode });
- defer output_file.close();
+ var output_file = try Io.Dir.cwd().createFile(io, output, .{ .permissions = permissions });
+ defer output_file.close(io);
- var out = output_file.writer(&output_buffer);
+ var out = output_file.writer(io, &output_buffer);
switch (out_fmt) {
.hex, .raw => {
@@ -221,8 +222,8 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
try out.end();
if (listen) {
- var stdin_reader = fs.File.stdin().reader(io, &stdin_buffer);
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdin_reader = Io.File.stdin().reader(io, &stdin_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var server = try Server.init(.{
.in = &stdin_reader.interface,
.out = &stdout_writer.interface,
@@ -234,7 +235,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
const hdr = try server.receiveMessage();
switch (hdr.tag) {
.exit => {
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
},
.update => {
if (seen_update) fatal("zig objcopy only supports 1 update for now", .{});
@@ -249,7 +250,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
}
}
}
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
}
const usage =
@@ -675,8 +676,9 @@ fn containsValidAddressRange(segments: []*BinaryElfSegment) bool {
}
fn padFile(out: *File.Writer, opt_size: ?u64) !void {
+ const io = out.io;
const size = opt_size orelse return;
- try out.file.setEndPos(size);
+ try out.file.setLength(io, size);
}
test "HexWriter.Record.Address has correct payload and checksum" {
diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig
index 28305e801b..922031d008 100644
--- a/lib/compiler/reduce.zig
+++ b/lib/compiler/reduce.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -54,6 +55,10 @@ pub fn main() !void {
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
const gpa = general_purpose_allocator.allocator();
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
var opt_checker_path: ?[]const u8 = null;
@@ -68,9 +73,9 @@ pub fn main() !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- const stdout = std.fs.File.stdout();
+ const stdout = Io.File.stdout();
try stdout.writeAll(usage);
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
} else if (mem.eql(u8, arg, "--")) {
argv = args[i + 1 ..];
break;
@@ -87,9 +92,7 @@ pub fn main() !void {
if (i >= args.len) fatal("expected 32-bit integer after {s}", .{arg});
const next_arg = args[i];
seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| {
- fatal("unable to parse seed '{s}' as 32-bit integer: {s}", .{
- next_arg, @errorName(err),
- });
+ fatal("unable to parse seed '{s}' as 32-bit integer: {t}", .{ next_arg, err });
};
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
@@ -120,7 +123,7 @@ pub fn main() !void {
var astgen_input: std.Io.Writer.Allocating = .init(gpa);
defer astgen_input.deinit();
- var tree = try parse(gpa, root_source_file_path);
+ var tree = try parse(gpa, io, root_source_file_path);
defer {
gpa.free(tree.source);
tree.deinit(gpa);
@@ -185,7 +188,7 @@ pub fn main() !void {
std.debug.print("{s} ", .{@tagName(t)});
}
std.debug.print("\n", .{});
- try transformationsToFixups(gpa, arena, root_source_file_path, this_set, &fixups);
+ try transformationsToFixups(gpa, arena, io, root_source_file_path, this_set, &fixups);
rendered.clearRetainingCapacity();
try tree.render(gpa, &rendered.writer, fixups);
@@ -232,16 +235,16 @@ pub fn main() !void {
}
}
- try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() });
+ try Io.Dir.cwd().writeFile(io, .{ .sub_path = root_source_file_path, .data = rendered.written() });
// std.debug.print("trying this code:\n{s}\n", .{rendered.items});
const interestingness = try runCheck(arena, interestingness_argv.items);
- std.debug.print("{d} random transformations: {s}. {d}/{d}\n", .{
- subset_size, @tagName(interestingness), start_index, transformations.items.len,
+ std.debug.print("{d} random transformations: {t}. {d}/{d}\n", .{
+ subset_size, interestingness, start_index, transformations.items.len,
});
switch (interestingness) {
.interesting => {
- const new_tree = try parse(gpa, root_source_file_path);
+ const new_tree = try parse(gpa, io, root_source_file_path);
gpa.free(tree.source);
tree.deinit(gpa);
tree = new_tree;
@@ -273,12 +276,12 @@ pub fn main() !void {
fixups.clearRetainingCapacity();
rendered.clearRetainingCapacity();
try tree.render(gpa, &rendered.writer, fixups);
- try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() });
+ try Io.Dir.cwd().writeFile(io, .{ .sub_path = root_source_file_path, .data = rendered.written() });
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
}
std.debug.print("no more transformations found\n", .{});
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
}
fn sortTransformations(transformations: []Walk.Transformation, rng: std.Random) void {
@@ -302,11 +305,8 @@ fn termToInteresting(term: std.process.Child.Term) Interestingness {
};
}
-fn runCheck(arena: std.mem.Allocator, argv: []const []const u8) !Interestingness {
- const result = try std.process.Child.run(.{
- .allocator = arena,
- .argv = argv,
- });
+fn runCheck(arena: Allocator, io: Io, argv: []const []const u8) !Interestingness {
+ const result = try std.process.Child.run(arena, io, .{ .argv = argv });
if (result.stderr.len != 0)
std.debug.print("{s}", .{result.stderr});
return termToInteresting(result.term);
@@ -315,6 +315,7 @@ fn runCheck(arena: std.mem.Allocator, argv: []const []const u8) !Interestingness
fn transformationsToFixups(
gpa: Allocator,
arena: Allocator,
+ io: Io,
root_source_file_path: []const u8,
transforms: []const Walk.Transformation,
fixups: *Ast.Render.Fixups,
@@ -352,7 +353,7 @@ fn transformationsToFixups(
inline_imported_file.imported_string,
});
defer gpa.free(full_imported_path);
- var other_file_ast = try parse(gpa, full_imported_path);
+ var other_file_ast = try parse(gpa, io, full_imported_path);
defer {
gpa.free(other_file_ast.source);
other_file_ast.deinit(gpa);
@@ -396,8 +397,9 @@ fn transformationsToFixups(
};
}
-fn parse(gpa: Allocator, file_path: []const u8) !Ast {
- const source_code = std.fs.cwd().readFileAllocOptions(
+fn parse(gpa: Allocator, io: Io, file_path: []const u8) !Ast {
+ const source_code = Io.Dir.cwd().readFileAllocOptions(
+ io,
file_path,
gpa,
.limited(std.math.maxInt(u32)),
diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig
index 59568e9cef..59843585a2 100644
--- a/lib/compiler/resinator/cli.zig
+++ b/lib/compiler/resinator/cli.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const code_pages = @import("code_pages.zig");
const SupportedCodePage = code_pages.SupportedCodePage;
const lang = @import("lang.zig");
@@ -124,15 +125,15 @@ pub const Diagnostics = struct {
try self.errors.append(self.allocator, error_details);
}
- pub fn renderToStdErr(self: *Diagnostics, args: []const []const u8) void {
- const stderr, const ttyconf = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- self.renderToWriter(args, stderr, ttyconf) catch return;
+ pub fn renderToStderr(self: *Diagnostics, io: Io, args: []const []const u8) Io.Cancelable!void {
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ self.renderToWriter(args, stderr.terminal()) catch return;
}
- pub fn renderToWriter(self: *Diagnostics, args: []const []const u8, writer: *std.Io.Writer, config: std.Io.tty.Config) !void {
+ pub fn renderToWriter(self: *Diagnostics, args: []const []const u8, t: Io.Terminal) !void {
for (self.errors.items) |err_details| {
- try renderErrorMessage(writer, config, err_details, args);
+ try renderErrorMessage(t, err_details, args);
}
}
@@ -169,7 +170,7 @@ pub const Options = struct {
coff_options: cvtres.CoffOptions = .{},
pub const IoSource = union(enum) {
- stdio: std.fs.File,
+ stdio: Io.File,
filename: []const u8,
};
pub const AutoIncludes = enum { any, msvc, gnu, none };
@@ -249,13 +250,13 @@ pub const Options = struct {
/// worlds' situation where we'll be compatible with most use-cases
/// of the .rc extension being omitted from the CLI args, but still
/// work fine if the file itself does not have an extension.
- pub fn maybeAppendRC(options: *Options, cwd: std.fs.Dir) !void {
+ pub fn maybeAppendRC(options: *Options, io: Io, cwd: Io.Dir) !void {
switch (options.input_source) {
.stdio => return,
.filename => {},
}
if (options.input_format == .rc and std.fs.path.extension(options.input_source.filename).len == 0) {
- cwd.access(options.input_source.filename, .{}) catch |err| switch (err) {
+ cwd.access(io, options.input_source.filename, .{}) catch |err| switch (err) {
error.FileNotFound => {
var filename_bytes = try options.allocator.alloc(u8, options.input_source.filename.len + 3);
@memcpy(filename_bytes[0..options.input_source.filename.len], options.input_source.filename);
@@ -418,7 +419,7 @@ pub const Arg = struct {
};
}
- pub fn looksLikeFilepath(self: Arg) bool {
+ pub fn looksLikeFilepath(self: Arg, io: Io) bool {
const meets_min_requirements = self.prefix == .slash and isSupportedInputExtension(std.fs.path.extension(self.full));
if (!meets_min_requirements) return false;
@@ -437,7 +438,7 @@ pub const Arg = struct {
// It's still possible for a file path to look like a /fo option but not actually
// be one, e.g. `/foo/bar.rc`. As a last ditch effort to reduce false negatives,
// check if the file path exists and, if so, then we ignore the 'could be /fo option'-ness
- std.fs.accessAbsolute(self.full, .{}) catch return false;
+ Io.Dir.accessAbsolute(io, self.full, .{}) catch return false;
return true;
}
@@ -489,7 +490,7 @@ pub const ParseError = error{ParseError} || Allocator.Error;
/// Note: Does not run `Options.maybeAppendRC` automatically. If that behavior is desired,
/// it must be called separately.
-pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagnostics) ParseError!Options {
+pub fn parse(allocator: Allocator, io: Io, args: []const []const u8, diagnostics: *Diagnostics) ParseError!Options {
var options = Options{ .allocator = allocator };
errdefer options.deinit();
@@ -529,7 +530,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
}
const args_remaining = args.len - arg_i;
- if (args_remaining <= 2 and arg.looksLikeFilepath()) {
+ if (args_remaining <= 2 and arg.looksLikeFilepath(io)) {
var err_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = true, .arg_index = arg_i };
try err_details.msg.appendSlice(allocator, "this argument was inferred to be a filepath, so argument parsing was terminated");
try diagnostics.append(err_details);
@@ -1343,41 +1344,42 @@ test parsePercent {
try std.testing.expectError(error.InvalidFormat, parsePercent("~1"));
}
-pub fn renderErrorMessage(writer: *std.Io.Writer, config: std.Io.tty.Config, err_details: Diagnostics.ErrorDetails, args: []const []const u8) !void {
- try config.setColor(writer, .dim);
+pub fn renderErrorMessage(t: Io.Terminal, err_details: Diagnostics.ErrorDetails, args: []const []const u8) !void {
+ const writer = t.writer;
+ try t.setColor(.dim);
try writer.writeAll("<cli>");
- try config.setColor(writer, .reset);
- try config.setColor(writer, .bold);
+ try t.setColor(.reset);
+ try t.setColor(.bold);
try writer.writeAll(": ");
switch (err_details.type) {
.err => {
- try config.setColor(writer, .red);
+ try t.setColor(.red);
try writer.writeAll("error: ");
},
.warning => {
- try config.setColor(writer, .yellow);
+ try t.setColor(.yellow);
try writer.writeAll("warning: ");
},
.note => {
- try config.setColor(writer, .cyan);
+ try t.setColor(.cyan);
try writer.writeAll("note: ");
},
}
- try config.setColor(writer, .reset);
- try config.setColor(writer, .bold);
+ try t.setColor(.reset);
+ try t.setColor(.bold);
try writer.writeAll(err_details.msg.items);
try writer.writeByte('\n');
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
if (!err_details.print_args) {
try writer.writeByte('\n');
return;
}
- try config.setColor(writer, .dim);
+ try t.setColor(.dim);
const prefix = " ... ";
try writer.writeAll(prefix);
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
const arg_with_name = args[err_details.arg_index];
const prefix_slice = arg_with_name[0..err_details.arg_span.prefix_len];
@@ -1388,15 +1390,15 @@ pub fn renderErrorMessage(writer: *std.Io.Writer, config: std.Io.tty.Config, err
try writer.writeAll(prefix_slice);
if (before_name_slice.len > 0) {
- try config.setColor(writer, .dim);
+ try t.setColor(.dim);
try writer.writeAll(before_name_slice);
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
try writer.writeAll(name_slice);
if (after_name_slice.len > 0) {
- try config.setColor(writer, .dim);
+ try t.setColor(.dim);
try writer.writeAll(after_name_slice);
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
var next_arg_len: usize = 0;
@@ -1414,13 +1416,13 @@ pub fn renderErrorMessage(writer: *std.Io.Writer, config: std.Io.tty.Config, err
if (err_details.arg_span.value_offset >= arg_with_name.len) {
try writer.writeByte(' ');
}
- try config.setColor(writer, .dim);
+ try t.setColor(.dim);
try writer.writeAll(" ...");
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
try writer.writeByte('\n');
- try config.setColor(writer, .green);
+ try t.setColor(.green);
try writer.splatByteAll(' ', prefix.len);
// Special case for when the option is *only* a prefix (e.g. invalid option: -)
if (err_details.arg_span.prefix_len == arg_with_name.len) {
@@ -1446,7 +1448,7 @@ pub fn renderErrorMessage(writer: *std.Io.Writer, config: std.Io.tty.Config, err
}
}
try writer.writeByte('\n');
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
fn testParse(args: []const []const u8) !Options {
@@ -1991,6 +1993,8 @@ test "parse: input and output formats" {
}
test "maybeAppendRC" {
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
@@ -2000,21 +2004,21 @@ test "maybeAppendRC" {
// Create the file so that it's found. In this scenario, .rc should not get
// appended.
- var file = try tmp.dir.createFile("foo", .{});
- file.close();
- try options.maybeAppendRC(tmp.dir);
+ var file = try tmp.dir.createFile(io, "foo", .{});
+ file.close(io);
+ try options.maybeAppendRC(io, tmp.dir);
try std.testing.expectEqualStrings("foo", options.input_source.filename);
// Now delete the file and try again. But this time change the input format
// to non-rc.
- try tmp.dir.deleteFile("foo");
+ try tmp.dir.deleteFile(io, "foo");
options.input_format = .res;
- try options.maybeAppendRC(tmp.dir);
+ try options.maybeAppendRC(io, tmp.dir);
try std.testing.expectEqualStrings("foo", options.input_source.filename);
// Finally, reset the input format to rc. Since the verbatim name is no longer found
// and the input filename does not have an extension, .rc should get appended.
options.input_format = .rc;
- try options.maybeAppendRC(tmp.dir);
+ try options.maybeAppendRC(io, tmp.dir);
try std.testing.expectEqualStrings("foo.rc", options.input_source.filename);
}
diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig
index 08e161e505..4f75fca18a 100644
--- a/lib/compiler/resinator/compile.zig
+++ b/lib/compiler/resinator/compile.zig
@@ -34,7 +34,7 @@ const code_pages = @import("code_pages.zig");
const errors = @import("errors.zig");
pub const CompileOptions = struct {
- cwd: std.fs.Dir,
+ cwd: std.Io.Dir,
diagnostics: *Diagnostics,
source_mappings: ?*SourceMappings = null,
/// List of paths (absolute or relative to `cwd`) for every file that the resources within the .rc file depend on.
@@ -96,7 +96,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
var search_dirs: std.ArrayList(SearchDir) = .empty;
defer {
for (search_dirs.items) |*search_dir| {
- search_dir.deinit(allocator);
+ search_dir.deinit(allocator, io);
}
search_dirs.deinit(allocator);
}
@@ -106,13 +106,13 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
// If dirname returns null, then the root path will be the same as
// the cwd so we don't need to add it as a distinct search path.
if (std.fs.path.dirname(root_path)) |root_dir_path| {
- var root_dir = try options.cwd.openDir(root_dir_path, .{});
- errdefer root_dir.close();
+ var root_dir = try options.cwd.openDir(io, root_dir_path, .{});
+ errdefer root_dir.close(io);
try search_dirs.append(allocator, .{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) });
}
}
- // Re-open the passed in cwd since we want to be able to close it (std.fs.cwd() shouldn't be closed)
- const cwd_dir = options.cwd.openDir(".", .{}) catch |err| {
+ // Re-open the passed in cwd since we want to be able to close it (Io.Dir.cwd() shouldn't be closed)
+ const cwd_dir = options.cwd.openDir(io, ".", .{}) catch |err| {
try options.diagnostics.append(.{
.err = .failed_to_open_cwd,
.token = .{
@@ -132,19 +132,19 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
};
try search_dirs.append(allocator, .{ .dir = cwd_dir, .path = null });
for (options.extra_include_paths) |extra_include_path| {
- var dir = openSearchPathDir(options.cwd, extra_include_path) catch {
+ var dir = openSearchPathDir(options.cwd, io, extra_include_path) catch {
// TODO: maybe a warning that the search path is skipped?
continue;
};
- errdefer dir.close();
+ errdefer dir.close(io);
try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, extra_include_path) });
}
for (options.system_include_paths) |system_include_path| {
- var dir = openSearchPathDir(options.cwd, system_include_path) catch {
+ var dir = openSearchPathDir(options.cwd, io, system_include_path) catch {
// TODO: maybe a warning that the search path is skipped?
continue;
};
- errdefer dir.close();
+ errdefer dir.close(io);
try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, system_include_path) });
}
if (!options.ignore_include_env_var) {
@@ -159,8 +159,8 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
};
var it = std.mem.tokenizeScalar(u8, INCLUDE, delimiter);
while (it.next()) |search_path| {
- var dir = openSearchPathDir(options.cwd, search_path) catch continue;
- errdefer dir.close();
+ var dir = openSearchPathDir(options.cwd, io, search_path) catch continue;
+ errdefer dir.close(io);
try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, search_path) });
}
}
@@ -196,7 +196,7 @@ pub const Compiler = struct {
arena: Allocator,
allocator: Allocator,
io: Io,
- cwd: std.fs.Dir,
+ cwd: std.Io.Dir,
state: State = .{},
diagnostics: *Diagnostics,
dependencies: ?*Dependencies,
@@ -388,7 +388,9 @@ pub const Compiler = struct {
/// matching file is invalid. That is, it does not do the `cmd` PATH searching
/// thing of continuing to look for matching files until it finds a valid
/// one if a matching file is invalid.
- fn searchForFile(self: *Compiler, path: []const u8) !std.fs.File {
+ fn searchForFile(self: *Compiler, path: []const u8) !std.Io.File {
+ const io = self.io;
+
// If the path is absolute, then it is not resolved relative to any search
// paths, so there's no point in checking them.
//
@@ -404,8 +406,8 @@ pub const Compiler = struct {
// `/test.bin` relative to include paths and instead only treats it as
// an absolute path.
if (std.fs.path.isAbsolute(path)) {
- const file = try utils.openFileNotDir(std.fs.cwd(), path, .{});
- errdefer file.close();
+ const file = try utils.openFileNotDir(Io.Dir.cwd(), io, path, .{});
+ errdefer file.close(io);
if (self.dependencies) |dependencies| {
const duped_path = try dependencies.allocator.dupe(u8, path);
@@ -414,10 +416,10 @@ pub const Compiler = struct {
}
}
- var first_error: ?(std.fs.File.OpenError || std.fs.File.StatError) = null;
+ var first_error: ?(std.Io.File.OpenError || std.Io.File.StatError) = null;
for (self.search_dirs) |search_dir| {
- if (utils.openFileNotDir(search_dir.dir, path, .{})) |file| {
- errdefer file.close();
+ if (utils.openFileNotDir(search_dir.dir, io, path, .{})) |file| {
+ errdefer file.close(io);
if (self.dependencies) |dependencies| {
const searched_file_path = try std.fs.path.join(dependencies.allocator, &.{
@@ -587,7 +589,7 @@ pub const Compiler = struct {
});
},
};
- defer file_handle.close();
+ defer file_handle.close(io);
var file_buffer: [2048]u8 = undefined;
var file_reader = file_handle.reader(io, &file_buffer);
@@ -2892,13 +2894,13 @@ pub const Compiler = struct {
}
};
-pub const OpenSearchPathError = std.fs.Dir.OpenError;
+pub const OpenSearchPathError = std.Io.Dir.OpenError;
-fn openSearchPathDir(dir: std.fs.Dir, path: []const u8) OpenSearchPathError!std.fs.Dir {
+fn openSearchPathDir(dir: std.Io.Dir, io: Io, path: []const u8) OpenSearchPathError!std.Io.Dir {
// Validate the search path to avoid possible unreachable on invalid paths,
// see https://github.com/ziglang/zig/issues/15607 for why this is currently necessary.
try validateSearchPath(path);
- return dir.openDir(path, .{});
+ return dir.openDir(io, path, .{});
}
/// Very crude attempt at validating a path. This is imperfect
@@ -2927,11 +2929,11 @@ fn validateSearchPath(path: []const u8) error{BadPathName}!void {
}
pub const SearchDir = struct {
- dir: std.fs.Dir,
+ dir: std.Io.Dir,
path: ?[]const u8,
- pub fn deinit(self: *SearchDir, allocator: Allocator) void {
- self.dir.close();
+ pub fn deinit(self: *SearchDir, allocator: Allocator, io: Io) void {
+ self.dir.close(io);
if (self.path) |path| {
allocator.free(path);
}
diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig
index 0060990ab6..4f41a5b56d 100644
--- a/lib/compiler/resinator/errors.zig
+++ b/lib/compiler/resinator/errors.zig
@@ -67,12 +67,12 @@ pub const Diagnostics = struct {
return @intCast(index);
}
- pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, source_mappings: ?SourceMappings) void {
+ pub fn renderToStderr(self: *Diagnostics, cwd: Io.Dir, source: []const u8, source_mappings: ?SourceMappings) Io.Cancelable!void {
const io = self.io;
- const stderr, const ttyconf = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
for (self.errors.items) |err_details| {
- renderErrorMessage(io, stderr, ttyconf, cwd, err_details, source, self.strings.items, source_mappings) catch return;
+ renderErrorMessage(io, stderr.terminal(), cwd, err_details, source, self.strings.items, source_mappings) catch return;
}
}
@@ -169,9 +169,9 @@ pub const ErrorDetails = struct {
filename_string_index: FilenameStringIndex,
pub const FilenameStringIndex = std.meta.Int(.unsigned, 32 - @bitSizeOf(FileOpenErrorEnum));
- pub const FileOpenErrorEnum = std.meta.FieldEnum(std.fs.File.OpenError || std.fs.File.StatError);
+ pub const FileOpenErrorEnum = std.meta.FieldEnum(Io.File.OpenError || Io.File.StatError);
- pub fn enumFromError(err: (std.fs.File.OpenError || std.fs.File.StatError)) FileOpenErrorEnum {
+ pub fn enumFromError(err: (Io.File.OpenError || Io.File.StatError)) FileOpenErrorEnum {
return switch (err) {
inline else => |e| @field(ErrorDetails.FileOpenError.FileOpenErrorEnum, @errorName(e)),
};
@@ -901,9 +901,8 @@ const truncated_str = "<...truncated...>";
pub fn renderErrorMessage(
io: Io,
- writer: *std.Io.Writer,
- tty_config: std.Io.tty.Config,
- cwd: std.fs.Dir,
+ t: Io.Terminal,
+ cwd: Io.Dir,
err_details: ErrorDetails,
source: []const u8,
strings: []const []const u8,
@@ -927,36 +926,37 @@ pub fn renderErrorMessage(
const err_line = if (corresponding_span) |span| span.start_line else err_details.token.line_number;
- try tty_config.setColor(writer, .bold);
+ const writer = t.writer;
+ try t.setColor(.bold);
if (corresponding_file) |file| {
try writer.writeAll(file);
} else {
- try tty_config.setColor(writer, .dim);
+ try t.setColor(.dim);
try writer.writeAll("<after preprocessor>");
- try tty_config.setColor(writer, .reset);
- try tty_config.setColor(writer, .bold);
+ try t.setColor(.reset);
+ try t.setColor(.bold);
}
try writer.print(":{d}:{d}: ", .{ err_line, column });
switch (err_details.type) {
.err => {
- try tty_config.setColor(writer, .red);
+ try t.setColor(.red);
try writer.writeAll("error: ");
},
.warning => {
- try tty_config.setColor(writer, .yellow);
+ try t.setColor(.yellow);
try writer.writeAll("warning: ");
},
.note => {
- try tty_config.setColor(writer, .cyan);
+ try t.setColor(.cyan);
try writer.writeAll("note: ");
},
.hint => unreachable,
}
- try tty_config.setColor(writer, .reset);
- try tty_config.setColor(writer, .bold);
+ try t.setColor(.reset);
+ try t.setColor(.bold);
try err_details.render(writer, source, strings);
try writer.writeByte('\n');
- try tty_config.setColor(writer, .reset);
+ try t.setColor(.reset);
if (!err_details.print_source_line) {
try writer.writeByte('\n');
@@ -983,20 +983,20 @@ pub fn renderErrorMessage(
try writer.writeAll(source_line_for_display.line);
if (source_line_for_display.truncated) {
- try tty_config.setColor(writer, .dim);
+ try t.setColor(.dim);
try writer.writeAll(truncated_str);
- try tty_config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
try writer.writeByte('\n');
- try tty_config.setColor(writer, .green);
+ try t.setColor(.green);
const num_spaces = truncated_visual_info.point_offset - truncated_visual_info.before_len;
try writer.splatByteAll(' ', num_spaces);
try writer.splatByteAll('~', truncated_visual_info.before_len);
try writer.writeByte('^');
try writer.splatByteAll('~', truncated_visual_info.after_len);
try writer.writeByte('\n');
- try tty_config.setColor(writer, .reset);
+ try t.setColor(.reset);
if (corresponding_span != null and corresponding_file != null) {
var worth_printing_lines: bool = true;
@@ -1021,22 +1021,22 @@ pub fn renderErrorMessage(
break :blk null;
},
};
- defer if (corresponding_lines) |*cl| cl.deinit();
+ defer if (corresponding_lines) |*cl| cl.deinit(io);
- try tty_config.setColor(writer, .bold);
+ try t.setColor(.bold);
if (corresponding_file) |file| {
try writer.writeAll(file);
} else {
- try tty_config.setColor(writer, .dim);
+ try t.setColor(.dim);
try writer.writeAll("<after preprocessor>");
- try tty_config.setColor(writer, .reset);
- try tty_config.setColor(writer, .bold);
+ try t.setColor(.reset);
+ try t.setColor(.bold);
}
try writer.print(":{d}:{d}: ", .{ err_line, column });
- try tty_config.setColor(writer, .cyan);
+ try t.setColor(.cyan);
try writer.writeAll("note: ");
- try tty_config.setColor(writer, .reset);
- try tty_config.setColor(writer, .bold);
+ try t.setColor(.reset);
+ try t.setColor(.bold);
try writer.writeAll("this line originated from line");
if (corresponding_span.?.start_line != corresponding_span.?.end_line) {
try writer.print("s {}-{}", .{ corresponding_span.?.start_line, corresponding_span.?.end_line });
@@ -1044,7 +1044,7 @@ pub fn renderErrorMessage(
try writer.print(" {}", .{corresponding_span.?.start_line});
}
try writer.print(" of file '{s}'\n", .{corresponding_file.?});
- try tty_config.setColor(writer, .reset);
+ try t.setColor(.reset);
if (!worth_printing_lines) return;
@@ -1055,21 +1055,21 @@ pub fn renderErrorMessage(
}) |display_line| {
try writer.writeAll(display_line.line);
if (display_line.truncated) {
- try tty_config.setColor(writer, .dim);
+ try t.setColor(.dim);
try writer.writeAll(truncated_str);
- try tty_config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
try writer.writeByte('\n');
}
break :write_lines null;
};
if (write_lines_err) |err| {
- try tty_config.setColor(writer, .red);
+ try t.setColor(.red);
try writer.writeAll(" | ");
- try tty_config.setColor(writer, .reset);
- try tty_config.setColor(writer, .dim);
+ try t.setColor(.reset);
+ try t.setColor(.dim);
try writer.print("unable to print line(s) from file: {s}\n", .{@errorName(err)});
- try tty_config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
try writer.writeByte('\n');
}
@@ -1094,13 +1094,13 @@ const CorrespondingLines = struct {
last_byte: u8 = 0,
at_eof: bool = false,
span: SourceMappings.CorrespondingSpan,
- file: std.fs.File,
- file_reader: std.fs.File.Reader,
+ file: Io.File,
+ file_reader: Io.File.Reader,
code_page: SupportedCodePage,
pub fn init(
io: Io,
- cwd: std.fs.Dir,
+ cwd: Io.Dir,
err_details: ErrorDetails,
line_for_comparison: []const u8,
corresponding_span: SourceMappings.CorrespondingSpan,
@@ -1120,12 +1120,12 @@ const CorrespondingLines = struct {
var corresponding_lines = CorrespondingLines{
.span = corresponding_span,
- .file = try utils.openFileNotDir(cwd, corresponding_file, .{}),
+ .file = try utils.openFileNotDir(cwd, io, corresponding_file, .{}),
.code_page = err_details.code_page,
.file_reader = undefined,
};
corresponding_lines.file_reader = corresponding_lines.file.reader(io, file_reader_buf);
- errdefer corresponding_lines.deinit();
+ errdefer corresponding_lines.deinit(io);
try corresponding_lines.writeLineFromStreamVerbatim(
&corresponding_lines.file_reader.interface,
@@ -1221,8 +1221,8 @@ const CorrespondingLines = struct {
};
}
- pub fn deinit(self: *CorrespondingLines) void {
- self.file.close();
+ pub fn deinit(self: *CorrespondingLines, io: Io) void {
+ self.file.close(io);
}
};
diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig
index 6d6819f45a..e286a5f4b9 100644
--- a/lib/compiler/resinator/main.zig
+++ b/lib/compiler/resinator/main.zig
@@ -24,6 +24,10 @@ pub fn main() !void {
defer std.debug.assert(debug_allocator.deinit() == .ok);
const gpa = debug_allocator.allocator();
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
var arena_state = std.heap.ArenaAllocator.init(gpa);
defer arena_state.deinit();
const arena = arena_state.allocator();
@@ -31,8 +35,8 @@ pub fn main() !void {
const args = try std.process.argsAlloc(arena);
if (args.len < 2) {
- const w, const ttyconf = std.debug.lockStderrWriter(&.{});
- try renderErrorMessage(w, ttyconf, .err, "expected zig lib dir as first argument", .{});
+ const stderr = try io.lockStderr(&.{}, null);
+ try renderErrorMessage(stderr.terminal(), .err, "expected zig lib dir as first argument", .{});
std.process.exit(1);
}
const zig_lib_dir = args[1];
@@ -45,7 +49,7 @@ pub fn main() !void {
}
var stdout_buffer: [1024]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
var error_handler: ErrorHandler = switch (zig_integration) {
true => .{
@@ -60,35 +64,31 @@ pub fn main() !void {
var options = options: {
var cli_diagnostics = cli.Diagnostics.init(gpa);
defer cli_diagnostics.deinit();
- var options = cli.parse(gpa, cli_args, &cli_diagnostics) catch |err| switch (err) {
+ var options = cli.parse(gpa, io, cli_args, &cli_diagnostics) catch |err| switch (err) {
error.ParseError => {
- try error_handler.emitCliDiagnostics(gpa, cli_args, &cli_diagnostics);
+ try error_handler.emitCliDiagnostics(gpa, io, cli_args, &cli_diagnostics);
std.process.exit(1);
},
else => |e| return e,
};
- try options.maybeAppendRC(std.fs.cwd());
+ try options.maybeAppendRC(io, Io.Dir.cwd());
if (!zig_integration) {
// print any warnings/notes
- cli_diagnostics.renderToStdErr(cli_args);
+ try cli_diagnostics.renderToStderr(io, cli_args);
// If there was something printed, then add an extra newline separator
// so that there is a clear separation between the cli diagnostics and whatever
// gets printed after
if (cli_diagnostics.errors.items.len > 0) {
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- try stderr.writeByte('\n');
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ try stderr.file_writer.interface.writeByte('\n');
}
}
break :options options;
};
defer options.deinit();
- var threaded: std.Io.Threaded = .init(gpa);
- defer threaded.deinit();
- const io = threaded.io();
-
if (options.print_help_and_exit) {
try cli.writeUsage(stdout, "zig rc");
try stdout.flush();
@@ -130,18 +130,15 @@ pub fn main() !void {
var stderr_buf: [512]u8 = undefined;
var diagnostics: aro.Diagnostics = .{ .output = output: {
if (zig_integration) break :output .{ .to_list = .{ .arena = .init(gpa) } };
- const w, const ttyconf = std.debug.lockStderrWriter(&stderr_buf);
- break :output .{ .to_writer = .{
- .writer = w,
- .color = ttyconf,
- } };
+ const stderr = try io.lockStderr(&stderr_buf, null);
+ break :output .{ .to_writer = stderr.terminal() };
} };
defer {
diagnostics.deinit();
- if (!zig_integration) std.debug.unlockStderrWriter();
+ if (!zig_integration) std.debug.unlockStderr();
}
- var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, std.fs.cwd());
+ var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, Io.Dir.cwd());
defer comp.deinit();
var argv: std.ArrayList([]const u8) = .empty;
@@ -175,11 +172,11 @@ pub fn main() !void {
std.process.exit(1);
},
error.FileTooBig => {
- try error_handler.emitMessage(gpa, .err, "failed during preprocessing: maximum file size exceeded", .{});
+ try error_handler.emitMessage(gpa, io, .err, "failed during preprocessing: maximum file size exceeded", .{});
std.process.exit(1);
},
error.WriteFailed => {
- try error_handler.emitMessage(gpa, .err, "failed during preprocessing: error writing the preprocessed output", .{});
+ try error_handler.emitMessage(gpa, io, .err, "failed during preprocessing: error writing the preprocessed output", .{});
std.process.exit(1);
},
error.OutOfMemory => |e| return e,
@@ -191,13 +188,13 @@ pub fn main() !void {
.stdio => |file| {
var file_reader = file.reader(io, &.{});
break :full_input file_reader.interface.allocRemaining(gpa, .unlimited) catch |err| {
- try error_handler.emitMessage(gpa, .err, "unable to read input from stdin: {s}", .{@errorName(err)});
+ try error_handler.emitMessage(gpa, io, .err, "unable to read input from stdin: {s}", .{@errorName(err)});
std.process.exit(1);
};
},
.filename => |input_filename| {
- break :full_input std.fs.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| {
- try error_handler.emitMessage(gpa, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) });
+ break :full_input Io.Dir.cwd().readFileAlloc(io, input_filename, gpa, .unlimited) catch |err| {
+ try error_handler.emitMessage(gpa, io, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) });
std.process.exit(1);
};
},
@@ -209,10 +206,10 @@ pub fn main() !void {
if (options.preprocess == .only) {
switch (options.output_source) {
.stdio => |output_file| {
- try output_file.writeAll(full_input);
+ try output_file.writeStreamingAll(io, full_input);
},
.filename => |output_filename| {
- try std.fs.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input });
+ try Io.Dir.cwd().writeFile(io, .{ .sub_path = output_filename, .data = full_input });
},
}
return;
@@ -227,16 +224,16 @@ pub fn main() !void {
.source = .{ .memory = .empty },
}
else if (options.input_format == .res)
- IoStream.fromIoSource(options.input_source, .input) catch |err| {
- try error_handler.emitMessage(gpa, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) });
+ IoStream.fromIoSource(io, options.input_source, .input) catch |err| {
+ try error_handler.emitMessage(gpa, io, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) });
std.process.exit(1);
}
else
- IoStream.fromIoSource(options.output_source, .output) catch |err| {
- try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
+ IoStream.fromIoSource(io, options.output_source, .output) catch |err| {
+ try error_handler.emitMessage(gpa, io, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
std.process.exit(1);
};
- defer res_stream.deinit(gpa);
+ defer res_stream.deinit(gpa, io);
const res_data = res_data: {
if (options.input_format != .res) {
@@ -246,17 +243,17 @@ pub fn main() !void {
var mapping_results = parseAndRemoveLineCommands(gpa, full_input, full_input, .{ .initial_filename = options.input_source.filename }) catch |err| switch (err) {
error.InvalidLineCommand => {
// TODO: Maybe output the invalid line command
- try error_handler.emitMessage(gpa, .err, "invalid line command in the preprocessed source", .{});
+ try error_handler.emitMessage(gpa, io, .err, "invalid line command in the preprocessed source", .{});
if (options.preprocess == .no) {
- try error_handler.emitMessage(gpa, .note, "line commands must be of the format: #line <num> \"<path>\"", .{});
+ try error_handler.emitMessage(gpa, io, .note, "line commands must be of the format: #line <num> \"<path>\"", .{});
} else {
- try error_handler.emitMessage(gpa, .note, "this is likely to be a bug, please report it", .{});
+ try error_handler.emitMessage(gpa, io, .note, "this is likely to be a bug, please report it", .{});
}
std.process.exit(1);
},
error.LineNumberOverflow => {
// TODO: Better error message
- try error_handler.emitMessage(gpa, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)});
+ try error_handler.emitMessage(gpa, io, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)});
std.process.exit(1);
},
error.OutOfMemory => |e| return e,
@@ -272,12 +269,12 @@ pub fn main() !void {
defer diagnostics.deinit();
var output_buffer: [4096]u8 = undefined;
- var res_stream_writer = res_stream.source.writer(gpa, &output_buffer);
+ var res_stream_writer = res_stream.source.writer(gpa, io, &output_buffer);
defer res_stream_writer.deinit(&res_stream.source);
const output_buffered_stream = res_stream_writer.interface();
compile(gpa, io, final_input, output_buffered_stream, .{
- .cwd = std.fs.cwd(),
+ .cwd = Io.Dir.cwd(),
.diagnostics = &diagnostics,
.source_mappings = &mapping_results.mappings,
.dependencies = maybe_dependencies,
@@ -294,9 +291,9 @@ pub fn main() !void {
.warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page,
}) catch |err| switch (err) {
error.ParseError, error.CompileError => {
- try error_handler.emitDiagnostics(gpa, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings);
+ try error_handler.emitDiagnostics(gpa, Io.Dir.cwd(), final_input, &diagnostics, mapping_results.mappings);
// Delete the output file on error
- res_stream.cleanupAfterError();
+ res_stream.cleanupAfterError(io);
std.process.exit(1);
},
else => |e| return e,
@@ -306,19 +303,19 @@ pub fn main() !void {
// print any warnings/notes
if (!zig_integration) {
- diagnostics.renderToStdErr(std.fs.cwd(), final_input, mapping_results.mappings);
+ try diagnostics.renderToStderr(Io.Dir.cwd(), final_input, mapping_results.mappings);
}
// write the depfile
if (options.depfile_path) |depfile_path| {
- var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| {
- try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
+ var depfile = Io.Dir.cwd().createFile(io, depfile_path, .{}) catch |err| {
+ try error_handler.emitMessage(gpa, io, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
std.process.exit(1);
};
- defer depfile.close();
+ defer depfile.close(io);
var depfile_buffer: [1024]u8 = undefined;
- var depfile_writer = depfile.writer(&depfile_buffer);
+ var depfile_writer = depfile.writer(io, &depfile_buffer);
switch (options.depfile_fmt) {
.json => {
var write_stream: std.json.Stringify = .{
@@ -340,7 +337,7 @@ pub fn main() !void {
if (options.output_format != .coff) return;
break :res_data res_stream.source.readAll(gpa, io) catch |err| {
- try error_handler.emitMessage(gpa, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
+ try error_handler.emitMessage(gpa, io, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
std.process.exit(1);
};
};
@@ -353,27 +350,27 @@ pub fn main() !void {
var res_reader: std.Io.Reader = .fixed(res_data.bytes);
break :resources cvtres.parseRes(gpa, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| {
// TODO: Better errors
- try error_handler.emitMessage(gpa, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
+ try error_handler.emitMessage(gpa, io, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
std.process.exit(1);
};
};
defer resources.deinit();
- var coff_stream = IoStream.fromIoSource(options.output_source, .output) catch |err| {
- try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
+ var coff_stream = IoStream.fromIoSource(io, options.output_source, .output) catch |err| {
+ try error_handler.emitMessage(gpa, io, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
std.process.exit(1);
};
- defer coff_stream.deinit(gpa);
+ defer coff_stream.deinit(gpa, io);
var coff_output_buffer: [4096]u8 = undefined;
- var coff_output_buffered_stream = coff_stream.source.writer(gpa, &coff_output_buffer);
+ var coff_output_buffered_stream = coff_stream.source.writer(gpa, io, &coff_output_buffer);
var cvtres_diagnostics: cvtres.Diagnostics = .{ .none = {} };
cvtres.writeCoff(gpa, coff_output_buffered_stream.interface(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
switch (err) {
error.DuplicateResource => {
const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
- try error_handler.emitMessage(gpa, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{
+ try error_handler.emitMessage(gpa, io, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{
duplicate_resource.name_value,
fmtResourceType(duplicate_resource.type_value),
duplicate_resource.language,
@@ -381,8 +378,8 @@ pub fn main() !void {
},
error.ResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
- try error_handler.emitMessage(gpa, .err, "resource has a data length that is too large to be written into a coff section", .{});
- try error_handler.emitMessage(gpa, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{
+ try error_handler.emitMessage(gpa, io, .err, "resource has a data length that is too large to be written into a coff section", .{});
+ try error_handler.emitMessage(gpa, io, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value),
overflow_resource.language,
@@ -390,19 +387,19 @@ pub fn main() !void {
},
error.TotalResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
- try error_handler.emitMessage(gpa, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{});
- try error_handler.emitMessage(gpa, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{
+ try error_handler.emitMessage(gpa, io, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{});
+ try error_handler.emitMessage(gpa, io, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value),
overflow_resource.language,
});
},
else => {
- try error_handler.emitMessage(gpa, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) });
+ try error_handler.emitMessage(gpa, io, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) });
},
}
// Delete the output file on error
- coff_stream.cleanupAfterError();
+ coff_stream.cleanupAfterError(io);
std.process.exit(1);
};
@@ -416,58 +413,58 @@ const IoStream = struct {
pub const IoDirection = enum { input, output };
- pub fn fromIoSource(source: cli.Options.IoSource, io: IoDirection) !IoStream {
+ pub fn fromIoSource(io: Io, source: cli.Options.IoSource, io_direction: IoDirection) !IoStream {
return .{
.name = switch (source) {
.filename => |filename| filename,
- .stdio => switch (io) {
+ .stdio => switch (io_direction) {
.input => "<stdin>",
.output => "<stdout>",
},
},
.intermediate = false,
- .source = try Source.fromIoSource(source, io),
+ .source = try Source.fromIoSource(io, source, io_direction),
};
}
- pub fn deinit(self: *IoStream, allocator: Allocator) void {
- self.source.deinit(allocator);
+ pub fn deinit(self: *IoStream, allocator: Allocator, io: Io) void {
+ self.source.deinit(allocator, io);
}
- pub fn cleanupAfterError(self: *IoStream) void {
+ pub fn cleanupAfterError(self: *IoStream, io: Io) void {
switch (self.source) {
.file => |file| {
// Delete the output file on error
- file.close();
+ file.close(io);
// Failing to delete is not really a big deal, so swallow any errors
- std.fs.cwd().deleteFile(self.name) catch {};
+ Io.Dir.cwd().deleteFile(io, self.name) catch {};
},
.stdio, .memory, .closed => return,
}
}
pub const Source = union(enum) {
- file: std.fs.File,
- stdio: std.fs.File,
+ file: Io.File,
+ stdio: Io.File,
memory: std.ArrayList(u8),
/// The source has been closed and any usage of the Source in this state is illegal (except deinit).
closed: void,
- pub fn fromIoSource(source: cli.Options.IoSource, io: IoDirection) !Source {
+ pub fn fromIoSource(io: Io, source: cli.Options.IoSource, io_direction: IoDirection) !Source {
switch (source) {
.filename => |filename| return .{
- .file = switch (io) {
- .input => try openFileNotDir(std.fs.cwd(), filename, .{}),
- .output => try std.fs.cwd().createFile(filename, .{}),
+ .file = switch (io_direction) {
+ .input => try openFileNotDir(Io.Dir.cwd(), io, filename, .{}),
+ .output => try Io.Dir.cwd().createFile(io, filename, .{}),
},
},
.stdio => |file| return .{ .stdio = file },
}
}
- pub fn deinit(self: *Source, allocator: Allocator) void {
+ pub fn deinit(self: *Source, allocator: Allocator, io: Io) void {
switch (self.*) {
- .file => |file| file.close(),
+ .file => |file| file.close(io),
.stdio => {},
.memory => |*list| list.deinit(allocator),
.closed => {},
@@ -500,10 +497,10 @@ const IoStream = struct {
}
pub const Writer = union(enum) {
- file: std.fs.File.Writer,
+ file: Io.File.Writer,
allocating: std.Io.Writer.Allocating,
- pub const Error = Allocator.Error || std.fs.File.WriteError;
+ pub const Error = Allocator.Error || Io.File.WriteError;
pub fn interface(this: *@This()) *std.Io.Writer {
return switch (this.*) {
@@ -521,9 +518,9 @@ const IoStream = struct {
}
};
- pub fn writer(source: *Source, allocator: Allocator, buffer: []u8) Writer {
+ pub fn writer(source: *Source, allocator: Allocator, io: Io, buffer: []u8) Writer {
return switch (source.*) {
- .file, .stdio => |file| .{ .file = file.writer(buffer) },
+ .file, .stdio => |file| .{ .file = file.writer(io, buffer) },
.memory => |*list| .{ .allocating = .fromArrayList(allocator, list) },
.closed => unreachable,
};
@@ -550,16 +547,16 @@ const LazyIncludePaths = struct {
else => |e| {
switch (e) {
error.UnsupportedAutoIncludesMachineType => {
- try error_handler.emitMessage(self.arena, .err, "automatic include path detection is not supported for target '{s}'", .{@tagName(self.target_machine_type)});
+ try error_handler.emitMessage(self.arena, io, .err, "automatic include path detection is not supported for target '{s}'", .{@tagName(self.target_machine_type)});
},
error.MsvcIncludesNotFound => {
- try error_handler.emitMessage(self.arena, .err, "MSVC include paths could not be automatically detected", .{});
+ try error_handler.emitMessage(self.arena, io, .err, "MSVC include paths could not be automatically detected", .{});
},
error.MingwIncludesNotFound => {
- try error_handler.emitMessage(self.arena, .err, "MinGW include paths could not be automatically detected", .{});
+ try error_handler.emitMessage(self.arena, io, .err, "MinGW include paths could not be automatically detected", .{});
},
}
- try error_handler.emitMessage(self.arena, .note, "to disable auto includes, use the option /:auto-includes none", .{});
+ try error_handler.emitMessage(self.arena, io, .note, "to disable auto includes, use the option /:auto-includes none", .{});
std.process.exit(1);
},
};
@@ -618,7 +615,7 @@ fn getIncludePaths(
};
const target = std.zig.resolveTargetQueryOrFatal(io, target_query);
const is_native_abi = target_query.isNativeAbi();
- const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch {
+ const detected_libc = std.zig.LibCDirs.detect(arena, io, zig_lib_dir, &target, is_native_abi, true, null) catch {
if (includes == .any) {
// fall back to mingw
includes = .gnu;
@@ -644,7 +641,7 @@ fn getIncludePaths(
};
const target = std.zig.resolveTargetQueryOrFatal(io, target_query);
const is_native_abi = target_query.isNativeAbi();
- const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch |err| switch (err) {
+ const detected_libc = std.zig.LibCDirs.detect(arena, io, zig_lib_dir, &target, is_native_abi, true, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => return error.MingwIncludesNotFound,
};
@@ -664,6 +661,7 @@ const ErrorHandler = union(enum) {
pub fn emitCliDiagnostics(
self: *ErrorHandler,
allocator: Allocator,
+ io: Io,
args: []const []const u8,
diagnostics: *cli.Diagnostics,
) !void {
@@ -674,7 +672,7 @@ const ErrorHandler = union(enum) {
try server.serveErrorBundle(error_bundle);
},
- .stderr => diagnostics.renderToStdErr(args),
+ .stderr => return diagnostics.renderToStderr(io, args),
}
}
@@ -684,6 +682,7 @@ const ErrorHandler = union(enum) {
fail_msg: []const u8,
comp: *aro.Compilation,
) !void {
+ const io = comp.io;
switch (self.*) {
.server => |*server| {
var error_bundle = try compiler_util.aroDiagnosticsToErrorBundle(
@@ -697,9 +696,9 @@ const ErrorHandler = union(enum) {
},
.stderr => {
// aro errors have already been emitted
- const stderr, const ttyconf = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- try renderErrorMessage(stderr, ttyconf, .err, "{s}", .{fail_msg});
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ try renderErrorMessage(stderr.terminal(), .err, "{s}", .{fail_msg});
},
}
}
@@ -707,7 +706,7 @@ const ErrorHandler = union(enum) {
pub fn emitDiagnostics(
self: *ErrorHandler,
allocator: Allocator,
- cwd: std.fs.Dir,
+ cwd: Io.Dir,
source: []const u8,
diagnostics: *Diagnostics,
mappings: SourceMappings,
@@ -719,13 +718,14 @@ const ErrorHandler = union(enum) {
try server.serveErrorBundle(error_bundle);
},
- .stderr => diagnostics.renderToStdErr(cwd, source, mappings),
+ .stderr => return diagnostics.renderToStderr(cwd, source, mappings),
}
}
pub fn emitMessage(
self: *ErrorHandler,
allocator: Allocator,
+ io: Io,
msg_type: @import("utils.zig").ErrorMessageType,
comptime format: []const u8,
args: anytype,
@@ -741,9 +741,9 @@ const ErrorHandler = union(enum) {
try server.serveErrorBundle(error_bundle);
},
.stderr => {
- const stderr, const ttyconf = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- try renderErrorMessage(stderr, ttyconf, msg_type, format, args);
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ try renderErrorMessage(stderr.terminal(), msg_type, format, args);
},
}
}
diff --git a/lib/compiler/resinator/utils.zig b/lib/compiler/resinator/utils.zig
index 021b8cf4de..42d3cc5e31 100644
--- a/lib/compiler/resinator/utils.zig
+++ b/lib/compiler/resinator/utils.zig
@@ -1,6 +1,8 @@
-const std = @import("std");
const builtin = @import("builtin");
+const std = @import("std");
+const Io = std.Io;
+
pub const UncheckedSliceWriter = struct {
const Self = @This();
@@ -23,19 +25,20 @@ pub const UncheckedSliceWriter = struct {
}
};
-/// Cross-platform 'std.fs.Dir.openFile' wrapper that will always return IsDir if
+/// Cross-platform 'Io.Dir.openFile' wrapper that will always return IsDir if
/// a directory is attempted to be opened.
/// TODO: Remove once https://github.com/ziglang/zig/issues/5732 is addressed.
pub fn openFileNotDir(
- cwd: std.fs.Dir,
+ cwd: Io.Dir,
+ io: Io,
path: []const u8,
- flags: std.fs.File.OpenFlags,
-) (std.fs.File.OpenError || std.fs.File.StatError)!std.fs.File {
- const file = try cwd.openFile(path, flags);
- errdefer file.close();
+ flags: Io.File.OpenFlags,
+) (Io.File.OpenError || Io.File.StatError)!Io.File {
+ const file = try cwd.openFile(io, path, flags);
+ errdefer file.close(io);
// https://github.com/ziglang/zig/issues/5732
if (builtin.os.tag != .windows) {
- const stat = try file.stat();
+ const stat = try file.stat(io);
if (stat.kind == .directory)
return error.IsDir;
@@ -89,31 +92,32 @@ pub const ErrorMessageType = enum { err, warning, note };
/// Used for generic colored errors/warnings/notes, more context-specific error messages
/// are handled elsewhere.
-pub fn renderErrorMessage(writer: *std.Io.Writer, config: std.Io.tty.Config, msg_type: ErrorMessageType, comptime format: []const u8, args: anytype) !void {
+pub fn renderErrorMessage(t: Io.Terminal, msg_type: ErrorMessageType, comptime format: []const u8, args: anytype) !void {
+ const writer = t.writer;
switch (msg_type) {
.err => {
- try config.setColor(writer, .bold);
- try config.setColor(writer, .red);
+ try t.setColor(.bold);
+ try t.setColor(.red);
try writer.writeAll("error: ");
},
.warning => {
- try config.setColor(writer, .bold);
- try config.setColor(writer, .yellow);
+ try t.setColor(.bold);
+ try t.setColor(.yellow);
try writer.writeAll("warning: ");
},
.note => {
- try config.setColor(writer, .reset);
- try config.setColor(writer, .cyan);
+ try t.setColor(.reset);
+ try t.setColor(.cyan);
try writer.writeAll("note: ");
},
}
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
if (msg_type == .err) {
- try config.setColor(writer, .bold);
+ try t.setColor(.bold);
}
try writer.print(format, args);
try writer.writeByte('\n');
- try config.setColor(writer, .reset);
+ try t.setColor(.reset);
}
pub fn isLineEndingPair(first: u8, second: u8) bool {
diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig
index 12825146a2..e2b58b8668 100644
--- a/lib/compiler/std-docs.zig
+++ b/lib/compiler/std-docs.zig
@@ -1,12 +1,14 @@
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Cache = std.Build.Cache;
-fn usage() noreturn {
- std.fs.File.stdout().writeAll(
+fn usage(io: Io) noreturn {
+ Io.File.stdout().writeStreamingAll(io,
\\Usage: zig std [options]
\\
\\Options:
@@ -27,6 +29,10 @@ pub fn main() !void {
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
const gpa = general_purpose_allocator.allocator();
+ var threaded: Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
var argv = try std.process.argsWithAllocator(arena);
defer argv.deinit();
assert(argv.skip());
@@ -34,18 +40,18 @@ pub fn main() !void {
const zig_exe_path = argv.next().?;
const global_cache_path = argv.next().?;
- var lib_dir = try std.fs.cwd().openDir(zig_lib_directory, .{});
- defer lib_dir.close();
+ var lib_dir = try Io.Dir.cwd().openDir(io, zig_lib_directory, .{});
+ defer lib_dir.close(io);
var listen_port: u16 = 0;
var force_open_browser: ?bool = null;
while (argv.next()) |arg| {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- usage();
+ usage(io);
} else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--port")) {
- listen_port = std.fmt.parseInt(u16, argv.next() orelse usage(), 10) catch |err| {
+ listen_port = std.fmt.parseInt(u16, argv.next() orelse usage(io), 10) catch |err| {
std.log.err("expected port number: {}", .{err});
- usage();
+ usage(io);
};
} else if (mem.eql(u8, arg, "--open-browser")) {
force_open_browser = true;
@@ -53,69 +59,70 @@ pub fn main() !void {
force_open_browser = false;
} else {
std.log.err("unrecognized argument: {s}", .{arg});
- usage();
+ usage(io);
}
}
const should_open_browser = force_open_browser orelse (listen_port == 0);
- const address = std.net.Address.parseIp("127.0.0.1", listen_port) catch unreachable;
- var http_server = try address.listen(.{
+ const address = Io.net.IpAddress.parse("127.0.0.1", listen_port) catch unreachable;
+ var http_server = try address.listen(io, .{
.reuse_address = true,
});
- const port = http_server.listen_address.in.getPort();
+ const port = http_server.socket.address.getPort();
const url_with_newline = try std.fmt.allocPrint(arena, "http://127.0.0.1:{d}/\n", .{port});
- std.fs.File.stdout().writeAll(url_with_newline) catch {};
+ Io.File.stdout().writeStreamingAll(io, url_with_newline) catch {};
if (should_open_browser) {
- openBrowserTab(gpa, url_with_newline[0 .. url_with_newline.len - 1 :'\n']) catch |err| {
- std.log.err("unable to open browser: {s}", .{@errorName(err)});
+ openBrowserTab(gpa, io, url_with_newline[0 .. url_with_newline.len - 1 :'\n']) catch |err| {
+ std.log.err("unable to open browser: {t}", .{err});
};
}
var context: Context = .{
.gpa = gpa,
+ .io = io,
.zig_exe_path = zig_exe_path,
.global_cache_path = global_cache_path,
.lib_dir = lib_dir,
.zig_lib_directory = zig_lib_directory,
};
+ var group: Io.Group = .init;
+ defer group.cancel(io);
+
while (true) {
- const connection = try http_server.accept();
- _ = std.Thread.spawn(.{}, accept, .{ &context, connection }) catch |err| {
- std.log.err("unable to accept connection: {s}", .{@errorName(err)});
- connection.stream.close();
- continue;
- };
+ const stream = try http_server.accept(io);
+ group.async(io, accept, .{ &context, stream });
}
}
-fn accept(context: *Context, connection: std.net.Server.Connection) void {
- defer connection.stream.close();
+fn accept(context: *Context, stream: Io.net.Stream) void {
+ const io = context.io;
+ defer stream.close(io);
var recv_buffer: [4000]u8 = undefined;
var send_buffer: [4000]u8 = undefined;
- var conn_reader = connection.stream.reader(&recv_buffer);
- var conn_writer = connection.stream.writer(&send_buffer);
- var server = std.http.Server.init(conn_reader.interface(), &conn_writer.interface);
+ var conn_reader = stream.reader(io, &recv_buffer);
+ var conn_writer = stream.writer(io, &send_buffer);
+ var server = std.http.Server.init(&conn_reader.interface, &conn_writer.interface);
while (server.reader.state == .ready) {
var request = server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
- std.log.err("closing http connection: {s}", .{@errorName(err)});
+ std.log.err("closing http connection: {t}", .{err});
return;
},
};
serveRequest(&request, context) catch |err| switch (err) {
error.WriteFailed => {
if (conn_writer.err) |e| {
- std.log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(e) });
+ std.log.err("unable to serve {s}: {t}", .{ request.head.target, e });
} else {
- std.log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(err) });
+ std.log.err("unable to serve {s}: {t}", .{ request.head.target, err });
}
return;
},
else => {
- std.log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(err) });
+ std.log.err("unable to serve {s}: {t}", .{ request.head.target, err });
return;
},
};
@@ -124,7 +131,8 @@ fn accept(context: *Context, connection: std.net.Server.Connection) void {
const Context = struct {
gpa: Allocator,
- lib_dir: std.fs.Dir,
+ io: Io,
+ lib_dir: Io.Dir,
zig_lib_directory: []const u8,
zig_exe_path: []const u8,
global_cache_path: []const u8,
@@ -170,10 +178,11 @@ fn serveDocsFile(
content_type: []const u8,
) !void {
const gpa = context.gpa;
+ const io = context.io;
// The desired API is actually sendfile, which will require enhancing std.http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
- const file_contents = try context.lib_dir.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024));
+ const file_contents = try context.lib_dir.readFileAlloc(io, name, gpa, .limited(10 * 1024 * 1024));
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
@@ -185,6 +194,7 @@ fn serveDocsFile(
fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
const gpa = context.gpa;
+ const io = context.io;
var send_buffer: [0x4000]u8 = undefined;
var response = try request.respondStreaming(&send_buffer, .{
@@ -196,8 +206,8 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
},
});
- var std_dir = try context.lib_dir.openDir("std", .{ .iterate = true });
- defer std_dir.close();
+ var std_dir = try context.lib_dir.openDir(io, "std", .{ .iterate = true });
+ defer std_dir.close(io);
var walker = try std_dir.walk(gpa);
defer walker.deinit();
@@ -205,7 +215,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
var archiver: std.tar.Writer = .{ .underlying_writer = &response.writer };
archiver.prefix = "std";
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
switch (entry.kind) {
.file => {
if (!std.mem.endsWith(u8, entry.basename, ".zig"))
@@ -215,15 +225,16 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
},
else => continue,
}
- var file = try entry.dir.openFile(entry.basename, .{});
- defer file.close();
- const stat = try file.stat();
- var file_reader: std.fs.File.Reader = .{
+ var file = try entry.dir.openFile(io, entry.basename, .{});
+ defer file.close(io);
+ const stat = try file.stat(io);
+ var file_reader: Io.File.Reader = .{
+ .io = io,
.file = file,
- .interface = std.fs.File.Reader.initInterface(&.{}),
+ .interface = Io.File.Reader.initInterface(&.{}),
.size = stat.size,
};
- try archiver.writeFile(entry.path, &file_reader, stat.mtime);
+ try archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime);
}
{
@@ -245,6 +256,7 @@ fn serveWasm(
optimize_mode: std.builtin.OptimizeMode,
) !void {
const gpa = context.gpa;
+ const io = context.io;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
@@ -255,7 +267,7 @@ fn serveWasm(
const wasm_base_path = try buildWasmBinary(arena, context, optimize_mode);
const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = autodoc_root_name,
- .target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
+ .target = &(std.zig.system.resolveTargetQuery(io, std.Build.parseTargetQuery(.{
.arch_os_abi = autodoc_arch_os_abi,
.cpu_features = autodoc_cpu_features,
}) catch unreachable) catch unreachable),
@@ -263,7 +275,7 @@ fn serveWasm(
});
// std.http.Server does not have a sendfile API yet.
const bin_path = try wasm_base_path.join(arena, bin_name);
- const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024));
+ const file_contents = try bin_path.root_dir.handle.readFileAlloc(io, bin_path.sub_path, gpa, .limited(10 * 1024 * 1024));
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
@@ -283,6 +295,7 @@ fn buildWasmBinary(
optimize_mode: std.builtin.OptimizeMode,
) !Cache.Path {
const gpa = context.gpa;
+ const io = context.io;
var argv: std.ArrayList([]const u8) = .empty;
@@ -315,16 +328,16 @@ fn buildWasmBinary(
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
- try child.spawn();
+ try child.spawn(io);
- var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
+ var poller = Io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
- try sendMessage(child.stdin.?, .update);
- try sendMessage(child.stdin.?, .exit);
+ try sendMessage(io, child.stdin.?, .update);
+ try sendMessage(io, child.stdin.?, .exit);
var result: ?Cache.Path = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
@@ -348,7 +361,7 @@ fn buildWasmBinary(
result_error_bundle = try std.zig.Server.allocErrorBundle(arena, body);
},
.emit_digest => {
- var r: std.Io.Reader = .fixed(body);
+ var r: Io.Reader = .fixed(body);
const emit_digest = r.takeStruct(std.zig.Server.Message.EmitDigest, .little) catch unreachable;
if (!emit_digest.flags.cache_hit) {
std.log.info("source changes detected; rebuilt wasm component", .{});
@@ -371,10 +384,10 @@ fn buildWasmBinary(
}
// Send EOF to stdin.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
- switch (try child.wait()) {
+ switch (try child.wait(io)) {
.Exited => |code| {
if (code != 0) {
std.log.err(
@@ -394,7 +407,7 @@ fn buildWasmBinary(
}
if (result_error_bundle.errorMessageCount() > 0) {
- result_error_bundle.renderToStdErr(.{}, true);
+ try result_error_bundle.renderToStderr(io, .{}, .auto);
std.log.err("the following command failed with {d} compilation errors:\n{s}", .{
result_error_bundle.errorMessageCount(),
try std.Build.Step.allocPrintCmd(arena, null, argv.items),
@@ -410,24 +423,24 @@ fn buildWasmBinary(
};
}
-fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+fn sendMessage(io: Io, file: Io.File, tag: std.zig.Client.Message.Tag) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 0,
};
- var w = file.writer(&.{});
+ var w = file.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => return w.err.?,
};
}
-fn openBrowserTab(gpa: Allocator, url: []const u8) !void {
+fn openBrowserTab(gpa: Allocator, io: Io, url: []const u8) !void {
// Until https://github.com/ziglang/zig/issues/19205 is implemented, we
// spawn a thread for this child process.
- _ = try std.Thread.spawn(.{}, openBrowserTabThread, .{ gpa, url });
+ _ = try std.Thread.spawn(.{}, openBrowserTabThread, .{ gpa, io, url });
}
-fn openBrowserTabThread(gpa: Allocator, url: []const u8) !void {
+fn openBrowserTabThread(gpa: Allocator, io: Io, url: []const u8) !void {
const main_exe = switch (builtin.os.tag) {
.windows => "explorer",
.macos => "open",
@@ -437,6 +450,6 @@ fn openBrowserTabThread(gpa: Allocator, url: []const u8) !void {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Ignore;
- try child.spawn();
- _ = try child.wait();
+ try child.spawn(io);
+ _ = try child.wait(io);
}
diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig
index 72ed3e7677..da3da684d9 100644
--- a/lib/compiler/test_runner.zig
+++ b/lib/compiler/test_runner.zig
@@ -17,7 +17,7 @@ var fba: std.heap.FixedBufferAllocator = .init(&fba_buffer);
var fba_buffer: [8192]u8 = undefined;
var stdin_buffer: [4096]u8 = undefined;
var stdout_buffer: [4096]u8 = undefined;
-var runner_threaded_io: Io.Threaded = .init_single_threaded;
+const runner_threaded_io: Io = Io.Threaded.global_single_threaded.ioBasic();
/// Keep in sync with logic in `std.Build.addRunArtifact` which decides whether
/// the test runner will communicate with the build runner via `std.zig.Server`.
@@ -74,8 +74,8 @@ pub fn main() void {
fn mainServer() !void {
@disableInstrumentation();
- var stdin_reader = std.fs.File.stdin().readerStreaming(runner_threaded_io.io(), &stdin_buffer);
- var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdin_reader = Io.File.stdin().readerStreaming(runner_threaded_io, &stdin_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(runner_threaded_io, &stdout_buffer);
var server = try std.zig.Server.init(.{
.in = &stdin_reader.interface,
.out = &stdout_writer.interface,
@@ -131,7 +131,7 @@ fn mainServer() !void {
.run_test => {
testing.allocator_instance = .{};
- testing.io_instance = .init(testing.allocator);
+ testing.io_instance = .init(testing.allocator, .{});
log_err_count = 0;
const index = try server.receiveBody_u32();
const test_fn = builtin.test_functions[index];
@@ -224,16 +224,16 @@ fn mainTerminal() void {
var skip_count: usize = 0;
var fail_count: usize = 0;
var fuzz_count: usize = 0;
- const root_node = if (builtin.fuzz) std.Progress.Node.none else std.Progress.start(.{
+ const root_node = if (builtin.fuzz) std.Progress.Node.none else std.Progress.start(runner_threaded_io, .{
.root_name = "Test",
.estimated_total_items = test_fn_list.len,
});
- const have_tty = std.fs.File.stderr().isTty();
+ const have_tty = Io.File.stderr().isTty(runner_threaded_io) catch unreachable;
var leaks: usize = 0;
for (test_fn_list, 0..) |test_fn, i| {
testing.allocator_instance = .{};
- testing.io_instance = .init(testing.allocator);
+ testing.io_instance = .init(testing.allocator, .{});
defer {
testing.io_instance.deinit();
if (testing.allocator_instance.deinit() == .leak) leaks += 1;
@@ -318,7 +318,7 @@ pub fn log(
/// work-in-progress backends can handle it.
pub fn mainSimple() anyerror!void {
@disableInstrumentation();
- // is the backend capable of calling `std.fs.File.writeAll`?
+ // is the backend capable of calling `Io.File.writeAll`?
const enable_write = switch (builtin.zig_backend) {
.stage2_aarch64, .stage2_riscv64 => true,
else => false,
@@ -329,35 +329,37 @@ pub fn mainSimple() anyerror!void {
else => false,
};
+ testing.io_instance = .init(testing.allocator, .{});
+
var passed: u64 = 0;
var skipped: u64 = 0;
var failed: u64 = 0;
// we don't want to bring in File and Writer if the backend doesn't support it
- const stdout = if (enable_write) std.fs.File.stdout() else {};
+ const stdout = if (enable_write) Io.File.stdout() else {};
for (builtin.test_functions) |test_fn| {
if (enable_write) {
- stdout.writeAll(test_fn.name) catch {};
- stdout.writeAll("... ") catch {};
+ stdout.writeStreamingAll(runner_threaded_io, test_fn.name) catch {};
+ stdout.writeStreamingAll(runner_threaded_io, "... ") catch {};
}
if (test_fn.func()) |_| {
- if (enable_write) stdout.writeAll("PASS\n") catch {};
+ if (enable_write) stdout.writeStreamingAll(runner_threaded_io, "PASS\n") catch {};
} else |err| {
if (err != error.SkipZigTest) {
- if (enable_write) stdout.writeAll("FAIL\n") catch {};
+ if (enable_write) stdout.writeStreamingAll(runner_threaded_io, "FAIL\n") catch {};
failed += 1;
if (!enable_write) return err;
continue;
}
- if (enable_write) stdout.writeAll("SKIP\n") catch {};
+ if (enable_write) stdout.writeStreamingAll(runner_threaded_io, "SKIP\n") catch {};
skipped += 1;
continue;
}
passed += 1;
}
if (enable_print) {
- var stdout_writer = stdout.writer(&.{});
+ var stdout_writer = stdout.writer(runner_threaded_io, &.{});
stdout_writer.interface.print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
}
if (failed != 0) std.process.exit(1);
@@ -405,15 +407,19 @@ pub fn fuzz(
testOne(ctx, input.toSlice()) catch |err| switch (err) {
error.SkipZigTest => return,
else => {
- std.debug.lockStdErr();
- if (@errorReturnTrace()) |trace| std.debug.dumpStackTrace(trace);
- std.debug.print("failed with error.{t}\n", .{err});
+ const stderr = std.debug.lockStderr(&.{}, null).terminal();
+ p: {
+ if (@errorReturnTrace()) |trace| {
+ std.debug.writeStackTrace(trace, stderr) catch break :p;
+ }
+ stderr.writer.print("failed with error.{t}\n", .{err}) catch break :p;
+ }
std.process.exit(1);
},
};
if (log_err_count != 0) {
- std.debug.lockStdErr();
- std.debug.print("error logs detected\n", .{});
+ const stderr = std.debug.lockStderr(&.{}, .no_color);
+ stderr.interface.print("error logs detected\n", .{}) catch {};
std.process.exit(1);
}
}
diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig
index b0d7a5d9bd..ee50df422a 100644
--- a/lib/compiler/translate-c/main.zig
+++ b/lib/compiler/translate-c/main.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const mem = std.mem;
const process = std.process;
@@ -18,7 +19,7 @@ pub fn main() u8 {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -33,11 +34,14 @@ pub fn main() u8 {
zig_integration = true;
}
+ const NO_COLOR = std.zig.EnvVar.NO_COLOR.isSet();
+ const CLICOLOR_FORCE = std.zig.EnvVar.CLICOLOR_FORCE.isSet();
+
var stderr_buf: [1024]u8 = undefined;
- var stderr = std.fs.File.stderr().writer(&stderr_buf);
+ var stderr = Io.File.stderr().writer(io, &stderr_buf);
var diagnostics: aro.Diagnostics = switch (zig_integration) {
false => .{ .output = .{ .to_writer = .{
- .color = .detect(stderr.file),
+ .mode = Io.Terminal.Mode.detect(io, stderr.file, NO_COLOR, CLICOLOR_FORCE) catch unreachable,
.writer = &stderr.interface,
} } },
true => .{ .output = .{ .to_list = .{
@@ -46,7 +50,7 @@ pub fn main() u8 {
};
defer diagnostics.deinit();
- var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |err| switch (err) {
+ var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |err| switch (err) {
error.OutOfMemory => {
std.debug.print("ran out of memory initializing C compilation\n", .{});
if (fast_exit) process.exit(1);
@@ -68,7 +72,7 @@ pub fn main() u8 {
return 1;
},
error.FatalError => if (zig_integration) {
- serveErrorBundle(arena, &diagnostics) catch |bundle_err| {
+ serveErrorBundle(arena, io, &diagnostics) catch |bundle_err| {
std.debug.print("unable to serve error bundle: {}\n", .{bundle_err});
if (fast_exit) process.exit(1);
return 1;
@@ -92,14 +96,14 @@ pub fn main() u8 {
return @intFromBool(comp.diagnostics.errors != 0);
}
-fn serveErrorBundle(arena: std.mem.Allocator, diagnostics: *const aro.Diagnostics) !void {
+fn serveErrorBundle(arena: std.mem.Allocator, io: Io, diagnostics: *const aro.Diagnostics) !void {
const error_bundle = try compiler_util.aroDiagnosticsToErrorBundle(
diagnostics,
arena,
"translation failure",
);
var stdout_buffer: [1024]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var server: std.zig.Server = .{
.out = &stdout_writer.interface,
.in = undefined,
@@ -121,6 +125,7 @@ pub const usage =
fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration: bool) !void {
const gpa = d.comp.gpa;
+ const io = d.comp.io;
const aro_args = args: {
var i: usize = 0;
@@ -128,13 +133,13 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
args[i] = arg;
if (mem.eql(u8, arg, "--help")) {
var stdout_buf: [512]u8 = undefined;
- var stdout = std.fs.File.stdout().writer(&stdout_buf);
+ var stdout = Io.File.stdout().writer(io, &stdout_buf);
try stdout.interface.print(usage, .{args[0]});
try stdout.interface.flush();
return;
} else if (mem.eql(u8, arg, "--version")) {
var stdout_buf: [512]u8 = undefined;
- var stdout = std.fs.File.stdout().writer(&stdout_buf);
+ var stdout = Io.File.stdout().writer(io, &stdout_buf);
// TODO add version
try stdout.interface.writeAll("0.0.0-dev\n");
try stdout.interface.flush();
@@ -224,13 +229,13 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
const dep_file_name = try d.getDepFileName(source, out_buf[0..std.fs.max_name_bytes]);
const file = if (dep_file_name) |path|
- d.comp.cwd.createFile(path, .{}) catch |er|
+ d.comp.cwd.createFile(io, path, .{}) catch |er|
return d.fatal("unable to create dependency file '{s}': {s}", .{ path, aro.Driver.errorDescription(er) })
else
- std.fs.File.stdout();
- defer if (dep_file_name != null) file.close();
+ Io.File.stdout();
+ defer if (dep_file_name != null) file.close(io);
- var file_writer = file.writer(&out_buf);
+ var file_writer = file.writer(io, &out_buf);
dep_file.write(&file_writer.interface) catch
return d.fatal("unable to write dependency file: {s}", .{aro.Driver.errorDescription(file_writer.err.?)});
}
@@ -245,23 +250,23 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
var close_out_file = false;
var out_file_path: []const u8 = "<stdout>";
- var out_file: std.fs.File = .stdout();
- defer if (close_out_file) out_file.close();
+ var out_file: Io.File = .stdout();
+ defer if (close_out_file) out_file.close(io);
if (d.output_name) |path| blk: {
if (std.mem.eql(u8, path, "-")) break :blk;
if (std.fs.path.dirname(path)) |dirname| {
- std.fs.cwd().makePath(dirname) catch |err|
+ Io.Dir.cwd().createDirPath(io, dirname) catch |err|
return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
}
- out_file = std.fs.cwd().createFile(path, .{}) catch |err| {
+ out_file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| {
return d.fatal("failed to create output file '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
};
close_out_file = true;
out_file_path = path;
}
- var out_writer = out_file.writer(&out_buf);
+ var out_writer = out_file.writer(io, &out_buf);
out_writer.interface.writeAll(rendered_zig) catch {};
out_writer.interface.flush() catch {};
if (out_writer.err) |write_err|
diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig
index 0bb427b651..d081aa9294 100644
--- a/lib/compiler_rt/emutls.zig
+++ b/lib/compiler_rt/emutls.zig
@@ -7,7 +7,7 @@ const std = @import("std");
const builtin = @import("builtin");
const common = @import("common.zig");
-const abort = std.posix.abort;
+const abort = std.process.abort;
const assert = std.debug.assert;
const expect = std.testing.expect;
diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig
index 3a48360bf8..a2414744ca 100644
--- a/lib/fuzzer.zig
+++ b/lib/fuzzer.zig
@@ -1,18 +1,22 @@
const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
const std = @import("std");
+const Io = std.Io;
const fatal = std.process.fatal;
const mem = std.mem;
const math = std.math;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const panic = std.debug.panic;
const abi = std.Build.abi.fuzz;
-const native_endian = builtin.cpu.arch.endian();
pub const std_options = std.Options{
.logFn = logOverride,
};
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
+
fn logOverride(
comptime level: std.log.Level,
comptime scope: @EnumLiteral(),
@@ -21,12 +25,12 @@ fn logOverride(
) void {
const f = log_f orelse
panic("attempt to use log before initialization, message:\n" ++ format, args);
- f.lock(.exclusive) catch |e| panic("failed to lock logging file: {t}", .{e});
- defer f.unlock();
+ f.lock(io, .exclusive) catch |e| panic("failed to lock logging file: {t}", .{e});
+ defer f.unlock(io);
var buf: [256]u8 = undefined;
- var fw = f.writer(&buf);
- const end = f.getEndPos() catch |e| panic("failed to get fuzzer log file end: {t}", .{e});
+ var fw = f.writer(io, &buf);
+ const end = f.length(io) catch |e| panic("failed to get fuzzer log file end: {t}", .{e});
fw.seekTo(end) catch |e| panic("failed to seek to fuzzer log file end: {t}", .{e});
const prefix1 = comptime level.asText();
@@ -45,7 +49,7 @@ const gpa = switch (builtin.mode) {
};
/// Part of `exec`, however seperate to allow it to be set before `exec` is.
-var log_f: ?std.fs.File = null;
+var log_f: ?Io.File = null;
var exec: Executable = .preinit;
var inst: Instrumentation = .preinit;
var fuzzer: Fuzzer = undefined;
@@ -59,7 +63,7 @@ const Executable = struct {
/// Tracks the hit count for each pc as updated by the process's instrumentation.
pc_counters: []u8,
- cache_f: std.fs.Dir,
+ cache_f: Io.Dir,
/// Shared copy of all pcs that have been hit stored in a memory-mapped file that can viewed
/// while the fuzzer is running.
shared_seen_pcs: MemoryMappedList,
@@ -76,16 +80,16 @@ const Executable = struct {
.pc_digest = undefined,
};
- fn getCoverageFile(cache_dir: std.fs.Dir, pcs: []const usize, pc_digest: u64) MemoryMappedList {
+ fn getCoverageFile(cache_dir: Io.Dir, pcs: []const usize, pc_digest: u64) MemoryMappedList {
const pc_bitset_usizes = bitsetUsizes(pcs.len);
const coverage_file_name = std.fmt.hex(pc_digest);
comptime assert(abi.SeenPcsHeader.trailing[0] == .pc_bits_usize);
comptime assert(abi.SeenPcsHeader.trailing[1] == .pc_addr);
- var v = cache_dir.makeOpenPath("v", .{}) catch |e|
+ var v = cache_dir.createDirPathOpen(io, "v", .{}) catch |e|
panic("failed to create directory 'v': {t}", .{e});
- defer v.close();
- const coverage_file, const populate = if (v.createFile(&coverage_file_name, .{
+ defer v.close(io);
+ const coverage_file, const populate = if (v.createFile(io, &coverage_file_name, .{
.read = true,
// If we create the file, we want to block other processes while we populate it
.lock = .exclusive,
@@ -93,7 +97,7 @@ const Executable = struct {
})) |f|
.{ f, true }
else |e| switch (e) {
- error.PathAlreadyExists => .{ v.openFile(&coverage_file_name, .{
+ error.PathAlreadyExists => .{ v.openFile(io, &coverage_file_name, .{
.mode = .read_write,
.lock = .shared,
}) catch |e2| panic(
@@ -108,7 +112,7 @@ const Executable = struct {
pcs.len * @sizeOf(usize);
if (populate) {
- defer coverage_file.lock(.shared) catch |e| panic(
+ defer coverage_file.lock(io, .shared) catch |e| panic(
"failed to demote lock for coverage file '{s}': {t}",
.{ &coverage_file_name, e },
);
@@ -130,10 +134,8 @@ const Executable = struct {
}
return map;
} else {
- const size = coverage_file.getEndPos() catch |e| panic(
- "failed to stat coverage file '{s}': {t}",
- .{ &coverage_file_name, e },
- );
+ const size = coverage_file.length(io) catch |e|
+ panic("failed to stat coverage file '{s}': {t}", .{ &coverage_file_name, e });
if (size != coverage_file_len) panic(
"incompatible existing coverage file '{s}' (differing lengths: {} != {})",
.{ &coverage_file_name, size, coverage_file_len },
@@ -165,13 +167,11 @@ const Executable = struct {
pub fn init(cache_dir_path: []const u8) Executable {
var self: Executable = undefined;
- const cache_dir = std.fs.cwd().makeOpenPath(cache_dir_path, .{}) catch |e| panic(
- "failed to open directory '{s}': {t}",
- .{ cache_dir_path, e },
- );
- log_f = cache_dir.createFile("tmp/libfuzzer.log", .{ .truncate = false }) catch |e|
+ const cache_dir = Io.Dir.cwd().createDirPathOpen(io, cache_dir_path, .{}) catch |e|
+ panic("failed to open directory '{s}': {t}", .{ cache_dir_path, e });
+ log_f = cache_dir.createFile(io, "tmp/libfuzzer.log", .{ .truncate = false }) catch |e|
panic("failed to create file 'tmp/libfuzzer.log': {t}", .{e});
- self.cache_f = cache_dir.makeOpenPath("f", .{}) catch |e|
+ self.cache_f = cache_dir.createDirPathOpen(io, "f", .{}) catch |e|
panic("failed to open directory 'f': {t}", .{e});
// Linkers are expected to automatically add symbols prefixed with these for the start and
@@ -391,7 +391,7 @@ const Fuzzer = struct {
mutations: std.ArrayList(Mutation) = .empty,
/// Filesystem directory containing found inputs for future runs
- corpus_dir: std.fs.Dir,
+ corpus_dir: Io.Dir,
corpus_dir_idx: usize = 0,
pub fn init(test_one: abi.TestOne, unit_test_name: []const u8) Fuzzer {
@@ -405,10 +405,10 @@ const Fuzzer = struct {
};
const arena = self.arena_ctx.allocator();
- self.corpus_dir = exec.cache_f.makeOpenPath(unit_test_name, .{}) catch |e|
+ self.corpus_dir = exec.cache_f.createDirPathOpen(io, unit_test_name, .{}) catch |e|
panic("failed to open directory '{s}': {t}", .{ unit_test_name, e });
self.input = in: {
- const f = self.corpus_dir.createFile("in", .{
+ const f = self.corpus_dir.createFile(io, "in", .{
.read = true,
.truncate = false,
// In case any other fuzz tests are running under the same test name,
@@ -419,7 +419,7 @@ const Fuzzer = struct {
error.WouldBlock => @panic("input file 'in' is in use by another fuzzing process"),
else => panic("failed to create input file 'in': {t}", .{e}),
};
- const size = f.getEndPos() catch |e| panic("failed to stat input file 'in': {t}", .{e});
+ const size = f.length(io) catch |e| panic("failed to stat input file 'in': {t}", .{e});
const map = (if (size < std.heap.page_size_max)
MemoryMappedList.create(f, 8, std.heap.page_size_max)
else
@@ -445,6 +445,7 @@ const Fuzzer = struct {
while (true) {
var name_buf: [@sizeOf(usize) * 2]u8 = undefined;
const bytes = self.corpus_dir.readFileAlloc(
+ io,
std.fmt.bufPrint(&name_buf, "{x}", .{self.corpus_dir_idx}) catch unreachable,
arena,
.unlimited,
@@ -466,7 +467,7 @@ const Fuzzer = struct {
self.input.deinit();
self.corpus.deinit(gpa);
self.mutations.deinit(gpa);
- self.corpus_dir.close();
+ self.corpus_dir.close(io);
self.arena_ctx.deinit();
self.* = undefined;
}
@@ -573,17 +574,10 @@ const Fuzzer = struct {
// Write new corpus to cache
var name_buf: [@sizeOf(usize) * 2]u8 = undefined;
- self.corpus_dir.writeFile(.{
- .sub_path = std.fmt.bufPrint(
- &name_buf,
- "{x}",
- .{self.corpus_dir_idx},
- ) catch unreachable,
+ self.corpus_dir.writeFile(io, .{
+ .sub_path = std.fmt.bufPrint(&name_buf, "{x}", .{self.corpus_dir_idx}) catch unreachable,
.data = bytes,
- }) catch |e| panic(
- "failed to write corpus file '{x}': {t}",
- .{ self.corpus_dir_idx, e },
- );
+ }) catch |e| panic("failed to write corpus file '{x}': {t}", .{ self.corpus_dir_idx, e });
self.corpus_dir_idx += 1;
}
}
@@ -1320,9 +1314,9 @@ pub const MemoryMappedList = struct {
/// How many bytes this list can hold without allocating additional memory.
capacity: usize,
/// The file is kept open so that it can be resized.
- file: std.fs.File,
+ file: Io.File,
- pub fn init(file: std.fs.File, length: usize, capacity: usize) !MemoryMappedList {
+ pub fn init(file: Io.File, length: usize, capacity: usize) !MemoryMappedList {
const ptr = try std.posix.mmap(
null,
capacity,
@@ -1338,13 +1332,13 @@ pub const MemoryMappedList = struct {
};
}
- pub fn create(file: std.fs.File, length: usize, capacity: usize) !MemoryMappedList {
- try file.setEndPos(capacity);
+ pub fn create(file: Io.File, length: usize, capacity: usize) !MemoryMappedList {
+ try file.setLength(io, capacity);
return init(file, length, capacity);
}
pub fn deinit(l: *MemoryMappedList) void {
- l.file.close();
+ l.file.close(io);
std.posix.munmap(@volatileCast(l.items.ptr[0..l.capacity]));
l.* = undefined;
}
@@ -1369,7 +1363,7 @@ pub const MemoryMappedList = struct {
if (l.capacity >= new_capacity) return;
std.posix.munmap(@volatileCast(l.items.ptr[0..l.capacity]));
- try l.file.setEndPos(new_capacity);
+ try l.file.setLength(io, new_capacity);
l.* = try init(l.file, l.items.len, new_capacity);
}
diff --git a/lib/init/src/main.zig b/lib/init/src/main.zig
index 88dd8348e1..865e6c5322 100644
--- a/lib/init/src/main.zig
+++ b/lib/init/src/main.zig
@@ -1,10 +1,32 @@
const std = @import("std");
+const Io = std.Io;
+
const _NAME = @import(".NAME");
pub fn main() !void {
- // Prints to stderr, ignoring potential errors.
+ // Prints to stderr, unbuffered, ignoring potential errors.
std.debug.print("All your {s} are belong to us.\n", .{"codebase"});
- try _NAME.bufferedPrint();
+
+ // In order to allocate memory we must construct an `Allocator` instance.
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
+ defer _ = debug_allocator.deinit(); // This checks for leaks.
+ const gpa = debug_allocator.allocator();
+
+ // In order to do I/O operations we must construct an `Io` instance.
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
+ // Stdout is for the actual output of your application, for example if you
+ // are implementing gzip, then only the compressed bytes should be sent to
+ // stdout, not any debugging messages.
+ var stdout_buffer: [1024]u8 = undefined;
+ var stdout_file_writer: Io.File.Writer = .init(.stdout(), io, &stdout_buffer);
+ const stdout_writer = &stdout_file_writer.interface;
+
+ try _NAME.printAnotherMessage(stdout_writer);
+
+ try stdout_writer.flush(); // Don't forget to flush!
}
test "simple test" {
diff --git a/lib/init/src/root.zig b/lib/init/src/root.zig
index 94c7cd0119..5a7125032b 100644
--- a/lib/init/src/root.zig
+++ b/lib/init/src/root.zig
@@ -1,17 +1,12 @@
-//! By convention, root.zig is the root source file when making a library.
+//! By convention, root.zig is the root source file when making a package.
const std = @import("std");
+const Io = std.Io;
-pub fn bufferedPrint() !void {
- // Stdout is for the actual output of your application, for example if you
- // are implementing gzip, then only the compressed bytes should be sent to
- // stdout, not any debugging messages.
- var stdout_buffer: [1024]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
- const stdout = &stdout_writer.interface;
-
- try stdout.print("Run `zig build test` to run the tests.\n", .{});
-
- try stdout.flush(); // Don't forget to flush!
+/// This is a documentation comment to explain the `printAnotherMessage` function below.
+///
+/// Accepting an `Io.Writer` instance is a handy way to write reusable code.
+pub fn printAnotherMessage(writer: *Io.Writer) Io.Writer.Error!void {
+ try writer.print("Run `zig build test` to run the tests.\n", .{});
}
pub fn add(a: i32, b: i32) i32 {
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index 50a2804938..317e4600a4 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1,21 +1,20 @@
+const Build = @This();
const builtin = @import("builtin");
const std = @import("std.zig");
const Io = std.Io;
const fs = std.fs;
const mem = std.mem;
-const debug = std.debug;
const panic = std.debug.panic;
-const assert = debug.assert;
+const assert = std.debug.assert;
const log = std.log;
const StringHashMap = std.StringHashMap;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
const Target = std.Target;
const process = std.process;
const EnvMap = std.process.EnvMap;
-const File = fs.File;
+const File = std.Io.File;
const Sha256 = std.crypto.hash.sha2.Sha256;
-const Build = @This();
const ArrayList = std.ArrayList;
pub const Cache = @import("Build/Cache.zig");
@@ -130,6 +129,9 @@ pub const Graph = struct {
dependency_cache: InitializedDepMap = .empty,
allow_so_scripts: ?bool = null,
time_report: bool,
+ /// Similar to the `Io.Terminal.Mode` returned by `Io.lockStderr`, but also
+ /// respects the '--color' flag.
+ stderr_mode: ?Io.Terminal.Mode = null,
};
const AvailableDeps = []const struct { []const u8, []const u8 };
@@ -1699,21 +1701,20 @@ pub fn addCheckFile(
return Step.CheckFile.create(b, file_source, options);
}
-pub fn truncateFile(b: *Build, dest_path: []const u8) (fs.Dir.MakeError || fs.Dir.StatFileError)!void {
- if (b.verbose) {
- log.info("truncate {s}", .{dest_path});
- }
- const cwd = fs.cwd();
- var src_file = cwd.createFile(dest_path, .{}) catch |err| switch (err) {
+pub fn truncateFile(b: *Build, dest_path: []const u8) (Io.Dir.CreateDirError || Io.Dir.StatFileError)!void {
+ const io = b.graph.io;
+ if (b.verbose) log.info("truncate {s}", .{dest_path});
+ const cwd = Io.Dir.cwd();
+ var src_file = cwd.createFile(io, dest_path, .{}) catch |err| switch (err) {
error.FileNotFound => blk: {
if (fs.path.dirname(dest_path)) |dirname| {
- try cwd.makePath(dirname);
+ try cwd.createDirPath(io, dirname);
}
- break :blk try cwd.createFile(dest_path, .{});
+ break :blk try cwd.createFile(io, dest_path, .{});
},
else => |e| return e,
};
- src_file.close();
+ src_file.close(io);
}
/// References a file or directory relative to the source root.
@@ -1761,7 +1762,10 @@ fn supportedWindowsProgramExtension(ext: []const u8) bool {
}
fn tryFindProgram(b: *Build, full_path: []const u8) ?[]const u8 {
- if (fs.realpathAlloc(b.allocator, full_path)) |p| {
+ const io = b.graph.io;
+ const arena = b.allocator;
+
+ if (Io.Dir.realPathFileAbsoluteAlloc(io, full_path, arena)) |p| {
return p;
} else |err| switch (err) {
error.OutOfMemory => @panic("OOM"),
@@ -1775,7 +1779,11 @@ fn tryFindProgram(b: *Build, full_path: []const u8) ?[]const u8 {
while (it.next()) |ext| {
if (!supportedWindowsProgramExtension(ext)) continue;
- return fs.realpathAlloc(b.allocator, b.fmt("{s}{s}", .{ full_path, ext })) catch |err| switch (err) {
+ return Io.Dir.realPathFileAbsoluteAlloc(
+ io,
+ b.fmt("{s}{s}", .{ full_path, ext }),
+ arena,
+ ) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"),
else => continue,
};
@@ -1839,7 +1847,7 @@ pub fn runAllowFail(
child.env_map = &b.graph.env_map;
try Step.handleVerbose2(b, null, child.env_map, argv);
- try child.spawn();
+ try child.spawn(io);
var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
const stdout = stdout_reader.interface.allocRemaining(b.allocator, .limited(max_output_size)) catch {
@@ -1847,7 +1855,7 @@ pub fn runAllowFail(
};
errdefer b.allocator.free(stdout);
- const term = try child.wait();
+ const term = try child.wait(io);
switch (term) {
.Exited => |code| {
if (code != 0) {
@@ -2091,7 +2099,7 @@ pub fn dependencyFromBuildZig(
}
const full_path = b.pathFromRoot("build.zig.zon");
- debug.panic("'{}' is not a build.zig struct of a dependency in '{s}'", .{ build_zig, full_path });
+ std.debug.panic("'{}' is not a build.zig struct of a dependency in '{s}'", .{ build_zig, full_path });
}
fn userValuesAreSame(lhs: UserValue, rhs: UserValue) bool {
@@ -2185,6 +2193,7 @@ fn dependencyInner(
pkg_deps: AvailableDeps,
args: anytype,
) *Dependency {
+ const io = b.graph.io;
const user_input_options = userInputOptionsFromArgs(b.allocator, args);
if (b.graph.dependency_cache.getContext(.{
.build_root_string = build_root_string,
@@ -2194,7 +2203,7 @@ fn dependencyInner(
const build_root: std.Build.Cache.Directory = .{
.path = build_root_string,
- .handle = fs.cwd().openDir(build_root_string, .{}) catch |err| {
+ .handle = Io.Dir.cwd().openDir(io, build_root_string, .{}) catch |err| {
std.debug.print("unable to open '{s}': {s}\n", .{
build_root_string, @errorName(err),
});
@@ -2239,7 +2248,7 @@ pub const GeneratedFile = struct {
/// This value must be set in the `fn make()` of the `step` and must not be `null` afterwards.
path: ?[]const u8 = null,
- /// Deprecated, see `getPath2`.
+ /// Deprecated, see `getPath3`.
pub fn getPath(gen: GeneratedFile) []const u8 {
return gen.step.owner.pathFromCwd(gen.path orelse std.debug.panic(
"getPath() was called on a GeneratedFile that wasn't built yet. Is there a missing Step dependency on step '{s}'?",
@@ -2247,11 +2256,19 @@ pub const GeneratedFile = struct {
));
}
+ /// Deprecated, see `getPath3`.
pub fn getPath2(gen: GeneratedFile, src_builder: *Build, asking_step: ?*Step) []const u8 {
+ return getPath3(gen, src_builder, asking_step) catch |err| switch (err) {
+ error.Canceled => std.process.exit(1),
+ };
+ }
+
+ pub fn getPath3(gen: GeneratedFile, src_builder: *Build, asking_step: ?*Step) Io.Cancelable![]const u8 {
return gen.path orelse {
- const w, const ttyconf = debug.lockStderrWriter(&.{});
- dumpBadGetPathHelp(gen.step, w, ttyconf, src_builder, asking_step) catch {};
- debug.unlockStderrWriter();
+ const graph = gen.step.owner.graph;
+ const io = graph.io;
+ const stderr = try io.lockStderr(&.{}, graph.stderr_mode);
+ dumpBadGetPathHelp(gen.step, stderr.terminal(), src_builder, asking_step) catch {};
@panic("misconfigured build script");
};
}
@@ -2426,22 +2443,29 @@ pub const LazyPath = union(enum) {
}
}
- /// Deprecated, see `getPath3`.
+ /// Deprecated, see `getPath4`.
pub fn getPath(lazy_path: LazyPath, src_builder: *Build) []const u8 {
return getPath2(lazy_path, src_builder, null);
}
- /// Deprecated, see `getPath3`.
+ /// Deprecated, see `getPath4`.
pub fn getPath2(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) []const u8 {
const p = getPath3(lazy_path, src_builder, asking_step);
return src_builder.pathResolve(&.{ p.root_dir.path orelse ".", p.sub_path });
}
+ /// Deprecated, see `getPath4`.
+ pub fn getPath3(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Cache.Path {
+ return getPath4(lazy_path, src_builder, asking_step) catch |err| switch (err) {
+ error.Canceled => std.process.exit(1),
+ };
+ }
+
/// Intended to be used during the make phase only.
///
/// `asking_step` is only used for debugging purposes; it's the step being
/// run that is asking for the path.
- pub fn getPath3(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Cache.Path {
+ pub fn getPath4(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Io.Cancelable!Cache.Path {
switch (lazy_path) {
.src_path => |sp| return .{
.root_dir = sp.owner.build_root,
@@ -2455,12 +2479,15 @@ pub const LazyPath = union(enum) {
// TODO make gen.file.path not be absolute and use that as the
// basis for not traversing up too many directories.
+ const graph = src_builder.graph;
+
var file_path: Cache.Path = .{
.root_dir = Cache.Directory.cwd(),
.sub_path = gen.file.path orelse {
- const w, const ttyconf = debug.lockStderrWriter(&.{});
- dumpBadGetPathHelp(gen.file.step, w, ttyconf, src_builder, asking_step) catch {};
- debug.unlockStderrWriter();
+ const io = graph.io;
+ const stderr = try io.lockStderr(&.{}, graph.stderr_mode);
+ dumpBadGetPathHelp(gen.file.step, stderr.terminal(), src_builder, asking_step) catch {};
+ io.unlockStderr();
@panic("misconfigured build script");
},
};
@@ -2550,40 +2577,36 @@ fn dumpBadDirnameHelp(
comptime msg: []const u8,
args: anytype,
) anyerror!void {
- const w, const tty_config = debug.lockStderrWriter(&.{});
- defer debug.unlockStderrWriter();
+ const stderr = std.debug.lockStderr(&.{}).terminal();
+ defer std.debug.unlockStderr();
+ const w = stderr.writer;
try w.print(msg, args);
if (fail_step) |s| {
- tty_config.setColor(w, .red) catch {};
+ stderr.setColor(.red) catch {};
try w.writeAll(" The step was created by this stack trace:\n");
- tty_config.setColor(w, .reset) catch {};
+ stderr.setColor(.reset) catch {};
- s.dump(w, tty_config);
+ s.dump(stderr);
}
if (asking_step) |as| {
- tty_config.setColor(w, .red) catch {};
+ stderr.setColor(.red) catch {};
try w.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name});
- tty_config.setColor(w, .reset) catch {};
+ stderr.setColor(.reset) catch {};
- as.dump(w, tty_config);
+ as.dump(stderr);
}
- tty_config.setColor(w, .red) catch {};
- try w.writeAll(" Hope that helps. Proceeding to panic.\n");
- tty_config.setColor(w, .reset) catch {};
+ stderr.setColor(.red) catch {};
+ try w.writeAll(" Proceeding to panic.\n");
+ stderr.setColor(.reset) catch {};
}
/// In this function the stderr mutex has already been locked.
-pub fn dumpBadGetPathHelp(
- s: *Step,
- w: *std.Io.Writer,
- tty_config: std.Io.tty.Config,
- src_builder: *Build,
- asking_step: ?*Step,
-) anyerror!void {
+pub fn dumpBadGetPathHelp(s: *Step, t: Io.Terminal, src_builder: *Build, asking_step: ?*Step) anyerror!void {
+ const w = t.writer;
try w.print(
\\getPath() was called on a GeneratedFile that wasn't built yet.
\\ source package path: {s}
@@ -2594,21 +2617,21 @@ pub fn dumpBadGetPathHelp(
s.name,
});
- tty_config.setColor(w, .red) catch {};
+ t.setColor(.red) catch {};
try w.writeAll(" The step was created by this stack trace:\n");
- tty_config.setColor(w, .reset) catch {};
+ t.setColor(.reset) catch {};
- s.dump(w, tty_config);
+ s.dump(t);
if (asking_step) |as| {
- tty_config.setColor(w, .red) catch {};
+ t.setColor(.red) catch {};
try w.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name});
- tty_config.setColor(w, .reset) catch {};
+ t.setColor(.reset) catch {};
- as.dump(w, tty_config);
+ as.dump(t);
}
- tty_config.setColor(w, .red) catch {};
- try w.writeAll(" Hope that helps. Proceeding to panic.\n");
- tty_config.setColor(w, .reset) catch {};
+ t.setColor(.red) catch {};
+ try w.writeAll(" Proceeding to panic.\n");
+ t.setColor(.reset) catch {};
}
pub const InstallDir = union(enum) {
@@ -2634,13 +2657,12 @@ pub const InstallDir = union(enum) {
/// source of API breakage in the future, so keep that in mind when using this
/// function.
pub fn makeTempPath(b: *Build) []const u8 {
+ const io = b.graph.io;
const rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
const result_path = b.cache_root.join(b.allocator, &.{tmp_dir_sub_path}) catch @panic("OOM");
- b.cache_root.handle.makePath(tmp_dir_sub_path) catch |err| {
- std.debug.print("unable to make tmp path '{s}': {s}\n", .{
- result_path, @errorName(err),
- });
+ b.cache_root.handle.createDirPath(io, tmp_dir_sub_path) catch |err| {
+ std.debug.print("unable to make tmp path '{s}': {t}\n", .{ result_path, err });
};
return result_path;
}
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig
index 5e8412cfcf..b384ab13ff 100644
--- a/lib/std/Build/Cache.zig
+++ b/lib/std/Build/Cache.zig
@@ -8,7 +8,6 @@ const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const crypto = std.crypto;
-const fs = std.fs;
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
@@ -18,7 +17,7 @@ const log = std.log.scoped(.cache);
gpa: Allocator,
io: Io,
-manifest_dir: fs.Dir,
+manifest_dir: Io.Dir,
hash: HashHelper = .{},
/// This value is accessed from multiple threads, protected by mutex.
recent_problematic_timestamp: Io.Timestamp = .zero,
@@ -71,7 +70,7 @@ const PrefixedPath = struct {
fn findPrefix(cache: *const Cache, file_path: []const u8) !PrefixedPath {
const gpa = cache.gpa;
- const resolved_path = try fs.path.resolve(gpa, &.{file_path});
+ const resolved_path = try std.fs.path.resolve(gpa, &.{file_path});
errdefer gpa.free(resolved_path);
return findPrefixResolved(cache, resolved_path);
}
@@ -102,9 +101,9 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath {
}
fn getPrefixSubpath(allocator: Allocator, prefix: []const u8, path: []u8) ![]u8 {
- const relative = try fs.path.relative(allocator, prefix, path);
+ const relative = try std.fs.path.relative(allocator, prefix, path);
errdefer allocator.free(relative);
- var component_iterator = fs.path.NativeComponentIterator.init(relative);
+ var component_iterator = std.fs.path.NativeComponentIterator.init(relative);
if (component_iterator.root() != null) {
return error.NotASubPath;
}
@@ -145,17 +144,17 @@ pub const File = struct {
max_file_size: ?usize,
/// Populated if the user calls `addOpenedFile`.
/// The handle is not owned here.
- handle: ?fs.File,
+ handle: ?Io.File,
stat: Stat,
bin_digest: BinDigest,
contents: ?[]const u8,
pub const Stat = struct {
- inode: fs.File.INode,
+ inode: Io.File.INode,
size: u64,
mtime: Io.Timestamp,
- pub fn fromFs(fs_stat: fs.File.Stat) Stat {
+ pub fn fromFs(fs_stat: Io.File.Stat) Stat {
return .{
.inode = fs_stat.inode,
.size = fs_stat.size,
@@ -178,7 +177,7 @@ pub const File = struct {
file.max_file_size = if (file.max_file_size) |old| @max(old, new) else new;
}
- pub fn updateHandle(file: *File, new_handle: ?fs.File) void {
+ pub fn updateHandle(file: *File, new_handle: ?Io.File) void {
const handle = new_handle orelse return;
file.handle = handle;
}
@@ -293,16 +292,16 @@ pub fn binToHex(bin_digest: BinDigest) HexDigest {
}
pub const Lock = struct {
- manifest_file: fs.File,
+ manifest_file: Io.File,
- pub fn release(lock: *Lock) void {
+ pub fn release(lock: *Lock, io: Io) void {
if (builtin.os.tag == .windows) {
// Windows does not guarantee that locks are immediately unlocked when
// the file handle is closed. See LockFileEx documentation.
- lock.manifest_file.unlock();
+ lock.manifest_file.unlock(io);
}
- lock.manifest_file.close();
+ lock.manifest_file.close(io);
lock.* = undefined;
}
};
@@ -311,7 +310,7 @@ pub const Manifest = struct {
cache: *Cache,
/// Current state for incremental hashing.
hash: HashHelper,
- manifest_file: ?fs.File,
+ manifest_file: ?Io.File,
manifest_dirty: bool,
/// Set this flag to true before calling hit() in order to indicate that
/// upon a cache hit, the code using the cache will not modify the files
@@ -332,9 +331,9 @@ pub const Manifest = struct {
pub const Diagnostic = union(enum) {
none,
- manifest_create: fs.File.OpenError,
- manifest_read: fs.File.ReadError,
- manifest_lock: fs.File.LockError,
+ manifest_create: Io.File.OpenError,
+ manifest_read: Io.File.Reader.Error,
+ manifest_lock: Io.File.LockError,
file_open: FileOp,
file_stat: FileOp,
file_read: FileOp,
@@ -393,10 +392,10 @@ pub const Manifest = struct {
}
/// Same as `addFilePath` except the file has already been opened.
- pub fn addOpenedFile(m: *Manifest, path: Path, handle: ?fs.File, max_file_size: ?usize) !usize {
+ pub fn addOpenedFile(m: *Manifest, path: Path, handle: ?Io.File, max_file_size: ?usize) !usize {
const gpa = m.cache.gpa;
try m.files.ensureUnusedCapacity(gpa, 1);
- const resolved_path = try fs.path.resolve(gpa, &.{
+ const resolved_path = try std.fs.path.resolve(gpa, &.{
path.root_dir.path orelse ".",
path.subPathOrDot(),
});
@@ -417,7 +416,7 @@ pub const Manifest = struct {
return addFileInner(self, prefixed_path, null, max_file_size);
}
- fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, handle: ?fs.File, max_file_size: ?usize) usize {
+ fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, handle: ?Io.File, max_file_size: ?usize) usize {
const gop = self.files.getOrPutAssumeCapacityAdapted(prefixed_path, FilesAdapter{});
if (gop.found_existing) {
self.cache.gpa.free(prefixed_path.sub_path);
@@ -460,7 +459,7 @@ pub const Manifest = struct {
}
}
- pub fn addDepFile(self: *Manifest, dir: fs.Dir, dep_file_sub_path: []const u8) !void {
+ pub fn addDepFile(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void {
assert(self.manifest_file == null);
return self.addDepFileMaybePost(dir, dep_file_sub_path);
}
@@ -503,11 +502,13 @@ pub const Manifest = struct {
@memcpy(manifest_file_path[0..self.hex_digest.len], &self.hex_digest);
manifest_file_path[hex_digest_len..][0..ext.len].* = ext.*;
+ const io = self.cache.io;
+
// We'll try to open the cache with an exclusive lock, but if that would block
// and `want_shared_lock` is set, a shared lock might be sufficient, so we'll
// open with a shared lock instead.
while (true) {
- if (self.cache.manifest_dir.createFile(&manifest_file_path, .{
+ if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{
.read = true,
.truncate = false,
.lock = .exclusive,
@@ -518,7 +519,7 @@ pub const Manifest = struct {
break;
} else |err| switch (err) {
error.WouldBlock => {
- self.manifest_file = self.cache.manifest_dir.openFile(&manifest_file_path, .{
+ self.manifest_file = self.cache.manifest_dir.openFile(io, &manifest_file_path, .{
.mode = .read_write,
.lock = .shared,
}) catch |e| {
@@ -542,7 +543,7 @@ pub const Manifest = struct {
return error.CacheCheckFailed;
}
- if (self.cache.manifest_dir.createFile(&manifest_file_path, .{
+ if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{
.read = true,
.truncate = false,
.lock = .exclusive,
@@ -702,7 +703,7 @@ pub const Manifest = struct {
const file_path = iter.rest();
const stat_size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
- const stat_inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
+ const stat_inode = fmt.parseInt(Io.File.INode, inode, 10) catch return error.InvalidFormat;
const stat_mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
const file_bin_digest = b: {
if (digest_str.len != hex_digest_len) return error.InvalidFormat;
@@ -758,7 +759,7 @@ pub const Manifest = struct {
const pp = cache_hash_file.prefixed_path;
const dir = self.cache.prefixes()[pp.prefix].handle;
- const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) {
+ const this_file = dir.openFile(io, pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) {
error.FileNotFound => {
// Every digest before this one has been populated successfully.
return .{ .miss = .{ .file_digests_populated = idx } };
@@ -772,9 +773,9 @@ pub const Manifest = struct {
return error.CacheCheckFailed;
},
};
- defer this_file.close();
+ defer this_file.close(io);
- const actual_stat = this_file.stat() catch |err| {
+ const actual_stat = this_file.stat(io) catch |err| {
self.diagnostic = .{ .file_stat = .{
.file_index = idx,
.err = err,
@@ -799,7 +800,7 @@ pub const Manifest = struct {
}
var actual_digest: BinDigest = undefined;
- hashFile(this_file, &actual_digest) catch |err| {
+ hashFile(io, this_file, &actual_digest) catch |err| {
self.diagnostic = .{ .file_read = .{
.file_index = idx,
.err = err,
@@ -872,17 +873,17 @@ pub const Manifest = struct {
if (man.want_refresh_timestamp) {
man.want_refresh_timestamp = false;
- var file = man.cache.manifest_dir.createFile("timestamp", .{
+ var file = man.cache.manifest_dir.createFile(io, "timestamp", .{
.read = true,
.truncate = true,
}) catch |err| switch (err) {
error.Canceled => return error.Canceled,
else => return true,
};
- defer file.close();
+ defer file.close(io);
// Save locally and also save globally (we still hold the global lock).
- const stat = file.stat() catch |err| switch (err) {
+ const stat = file.stat(io) catch |err| switch (err) {
error.Canceled => return error.Canceled,
else => return true,
};
@@ -894,19 +895,24 @@ pub const Manifest = struct {
}
fn populateFileHash(self: *Manifest, ch_file: *File) !void {
+ const io = self.cache.io;
+
if (ch_file.handle) |handle| {
return populateFileHashHandle(self, ch_file, handle);
} else {
const pp = ch_file.prefixed_path;
const dir = self.cache.prefixes()[pp.prefix].handle;
- const handle = try dir.openFile(pp.sub_path, .{});
- defer handle.close();
+ const handle = try dir.openFile(io, pp.sub_path, .{});
+ defer handle.close(io);
return populateFileHashHandle(self, ch_file, handle);
}
}
- fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: fs.File) !void {
- const actual_stat = try handle.stat();
+ fn populateFileHashHandle(self: *Manifest, ch_file: *File, io_file: Io.File) !void {
+ const io = self.cache.io;
+ const gpa = self.cache.gpa;
+
+ const actual_stat = try io_file.stat(io);
ch_file.stat = .{
.size = actual_stat.size,
.mtime = actual_stat.mtime,
@@ -920,19 +926,17 @@ pub const Manifest = struct {
}
if (ch_file.max_file_size) |max_file_size| {
- if (ch_file.stat.size > max_file_size) {
- return error.FileTooBig;
- }
+ if (ch_file.stat.size > max_file_size) return error.FileTooBig;
- const contents = try self.cache.gpa.alloc(u8, @as(usize, @intCast(ch_file.stat.size)));
- errdefer self.cache.gpa.free(contents);
+ // Hash while reading from disk, to keep the contents in the cpu
+ // cache while doing hashing.
+ const contents = try gpa.alloc(u8, @intCast(ch_file.stat.size));
+ errdefer gpa.free(contents);
- // Hash while reading from disk, to keep the contents in the cpu cache while
- // doing hashing.
var hasher = hasher_init;
var off: usize = 0;
while (true) {
- const bytes_read = try handle.pread(contents[off..], off);
+ const bytes_read = try io_file.readPositional(io, &.{contents[off..]}, off);
if (bytes_read == 0) break;
hasher.update(contents[off..][0..bytes_read]);
off += bytes_read;
@@ -941,7 +945,7 @@ pub const Manifest = struct {
ch_file.contents = contents;
} else {
- try hashFile(handle, &ch_file.bin_digest);
+ try hashFile(io, io_file, &ch_file.bin_digest);
}
self.hash.hasher.update(&ch_file.bin_digest);
@@ -1064,14 +1068,15 @@ pub const Manifest = struct {
self.hash.hasher.update(&new_file.bin_digest);
}
- pub fn addDepFilePost(self: *Manifest, dir: fs.Dir, dep_file_sub_path: []const u8) !void {
+ pub fn addDepFilePost(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void {
assert(self.manifest_file != null);
return self.addDepFileMaybePost(dir, dep_file_sub_path);
}
- fn addDepFileMaybePost(self: *Manifest, dir: fs.Dir, dep_file_sub_path: []const u8) !void {
+ fn addDepFileMaybePost(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void {
const gpa = self.cache.gpa;
- const dep_file_contents = try dir.readFileAlloc(dep_file_sub_path, gpa, .limited(manifest_file_size_max));
+ const io = self.cache.io;
+ const dep_file_contents = try dir.readFileAlloc(io, dep_file_sub_path, gpa, .limited(manifest_file_size_max));
defer gpa.free(dep_file_contents);
var error_buf: std.ArrayList(u8) = .empty;
@@ -1130,13 +1135,13 @@ pub const Manifest = struct {
/// lock from exclusive to shared.
pub fn writeManifest(self: *Manifest) !void {
assert(self.have_exclusive_lock);
-
+ const io = self.cache.io;
const manifest_file = self.manifest_file.?;
if (self.manifest_dirty) {
self.manifest_dirty = false;
var buffer: [4000]u8 = undefined;
- var fw = manifest_file.writer(&buffer);
+ var fw = manifest_file.writer(io, &buffer);
writeDirtyManifestToStream(self, &fw) catch |err| switch (err) {
error.WriteFailed => return fw.err.?,
else => |e| return e,
@@ -1148,7 +1153,7 @@ pub const Manifest = struct {
}
}
- fn writeDirtyManifestToStream(self: *Manifest, fw: *fs.File.Writer) !void {
+ fn writeDirtyManifestToStream(self: *Manifest, fw: *Io.File.Writer) !void {
try fw.interface.writeAll(manifest_header ++ "\n");
for (self.files.keys()) |file| {
try fw.interface.print("{d} {d} {d} {x} {d} {s}\n", .{
@@ -1165,13 +1170,11 @@ pub const Manifest = struct {
fn downgradeToSharedLock(self: *Manifest) !void {
if (!self.have_exclusive_lock) return;
+ const io = self.cache.io;
- // WASI does not currently support flock, so we bypass it here.
- // TODO: If/when flock is supported on WASI, this check should be removed.
- // See https://github.com/WebAssembly/wasi-filesystem/issues/2
- if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) {
+ if (std.process.can_spawn or !builtin.single_threaded) {
const manifest_file = self.manifest_file.?;
- try manifest_file.downgradeLock();
+ try manifest_file.downgradeLock(io);
}
self.have_exclusive_lock = false;
@@ -1180,16 +1183,14 @@ pub const Manifest = struct {
fn upgradeToExclusiveLock(self: *Manifest) error{CacheCheckFailed}!bool {
if (self.have_exclusive_lock) return false;
assert(self.manifest_file != null);
+ const io = self.cache.io;
- // WASI does not currently support flock, so we bypass it here.
- // TODO: If/when flock is supported on WASI, this check should be removed.
- // See https://github.com/WebAssembly/wasi-filesystem/issues/2
- if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) {
+ if (std.process.can_spawn or !builtin.single_threaded) {
const manifest_file = self.manifest_file.?;
// Here we intentionally have a period where the lock is released, in case there are
// other processes holding a shared lock.
- manifest_file.unlock();
- manifest_file.lock(.exclusive) catch |err| {
+ manifest_file.unlock(io);
+ manifest_file.lock(io, .exclusive) catch |err| {
self.diagnostic = .{ .manifest_lock = err };
return error.CacheCheckFailed;
};
@@ -1202,25 +1203,23 @@ pub const Manifest = struct {
/// The `Manifest` remains safe to deinit.
/// Don't forget to call `writeManifest` before this!
pub fn toOwnedLock(self: *Manifest) Lock {
- const lock: Lock = .{
- .manifest_file = self.manifest_file.?,
- };
-
- self.manifest_file = null;
- return lock;
+ defer self.manifest_file = null;
+ return .{ .manifest_file = self.manifest_file.? };
}
/// Releases the manifest file and frees any memory the Manifest was using.
/// `Manifest.hit` must be called first.
/// Don't forget to call `writeManifest` before this!
pub fn deinit(self: *Manifest) void {
+ const io = self.cache.io;
+
if (self.manifest_file) |file| {
if (builtin.os.tag == .windows) {
// See Lock.release for why this is required on Windows
- file.unlock();
+ file.unlock(io);
}
- file.close();
+ file.close(io);
}
for (self.files.keys()) |*file| {
file.deinit(self.cache.gpa);
@@ -1278,57 +1277,33 @@ pub const Manifest = struct {
}
};
-/// On operating systems that support symlinks, does a readlink. On other operating systems,
-/// uses the file contents. Windows supports symlinks but only with elevated privileges, so
-/// it is treated as not supporting symlinks.
-pub fn readSmallFile(dir: fs.Dir, sub_path: []const u8, buffer: []u8) ![]u8 {
- if (builtin.os.tag == .windows) {
- return dir.readFile(sub_path, buffer);
- } else {
- return dir.readLink(sub_path, buffer);
- }
-}
-
-/// On operating systems that support symlinks, does a symlink. On other operating systems,
-/// uses the file contents. Windows supports symlinks but only with elevated privileges, so
-/// it is treated as not supporting symlinks.
-/// `data` must be a valid UTF-8 encoded file path and 255 bytes or fewer.
-pub fn writeSmallFile(dir: fs.Dir, sub_path: []const u8, data: []const u8) !void {
- assert(data.len <= 255);
- if (builtin.os.tag == .windows) {
- return dir.writeFile(.{ .sub_path = sub_path, .data = data });
- } else {
- return dir.symLink(data, sub_path, .{});
- }
-}
-
-fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) fs.File.PReadError!void {
- var buf: [1024]u8 = undefined;
+fn hashFile(io: Io, file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.ReadPositionalError!void {
+ var buffer: [2048]u8 = undefined;
var hasher = hasher_init;
- var off: u64 = 0;
+ var offset: u64 = 0;
while (true) {
- const bytes_read = try file.pread(&buf, off);
- if (bytes_read == 0) break;
- hasher.update(buf[0..bytes_read]);
- off += bytes_read;
+ const n = try file.readPositional(io, &.{&buffer}, offset);
+ if (n == 0) break;
+ hasher.update(buffer[0..n]);
+ offset += n;
}
hasher.final(bin_digest);
}
// Create/Write a file, close it, then grab its stat.mtime timestamp.
-fn testGetCurrentFileTimestamp(dir: fs.Dir) !Io.Timestamp {
+fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp {
const test_out_file = "test-filetimestamp.tmp";
- var file = try dir.createFile(test_out_file, .{
+ var file = try dir.createFile(io, test_out_file, .{
.read = true,
.truncate = true,
});
defer {
- file.close();
- dir.deleteFile(test_out_file) catch {};
+ file.close(io);
+ dir.deleteFile(io, test_out_file) catch {};
}
- return (try file.stat()).mtime;
+ return (try file.stat(io)).mtime;
}
test "cache file and then recall it" {
@@ -1340,11 +1315,11 @@ test "cache file and then recall it" {
const temp_file = "test.txt";
const temp_manifest_dir = "temp_manifest_dir";
- try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = "Hello, world!\n" });
+ try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = "Hello, world!\n" });
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
- while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) {
+ const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir);
+ while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
@@ -1355,10 +1330,10 @@ test "cache file and then recall it" {
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
- .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}),
};
cache.addPrefix(.{ .path = null, .handle = tmp.dir });
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
{
var ch = cache.obtain();
@@ -1406,11 +1381,11 @@ test "check that changing a file makes cache fail" {
const original_temp_file_contents = "Hello, world!\n";
const updated_temp_file_contents = "Hello, world; but updated!\n";
- try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = original_temp_file_contents });
+ try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = original_temp_file_contents });
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
- while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) {
+ const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir);
+ while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
@@ -1421,10 +1396,10 @@ test "check that changing a file makes cache fail" {
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
- .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}),
};
cache.addPrefix(.{ .path = null, .handle = tmp.dir });
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
{
var ch = cache.obtain();
@@ -1443,7 +1418,7 @@ test "check that changing a file makes cache fail" {
try ch.writeManifest();
}
- try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = updated_temp_file_contents });
+ try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = updated_temp_file_contents });
{
var ch = cache.obtain();
@@ -1481,10 +1456,10 @@ test "no file inputs" {
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
- .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}),
};
cache.addPrefix(.{ .path = null, .handle = tmp.dir });
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
{
var man = cache.obtain();
@@ -1523,12 +1498,12 @@ test "Manifest with files added after initial hash work" {
const temp_file2 = "cache_hash_post_file_test2.txt";
const temp_manifest_dir = "cache_hash_post_file_manifest_dir";
- try tmp.dir.writeFile(.{ .sub_path = temp_file1, .data = "Hello, world!\n" });
- try tmp.dir.writeFile(.{ .sub_path = temp_file2, .data = "Hello world the second!\n" });
+ try tmp.dir.writeFile(io, .{ .sub_path = temp_file1, .data = "Hello, world!\n" });
+ try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second!\n" });
// Wait for file timestamps to tick
- const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
- while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) {
+ const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir);
+ while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
@@ -1540,10 +1515,10 @@ test "Manifest with files added after initial hash work" {
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
- .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
+ .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}),
};
cache.addPrefix(.{ .path = null, .handle = tmp.dir });
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
{
var ch = cache.obtain();
@@ -1575,11 +1550,11 @@ test "Manifest with files added after initial hash work" {
try testing.expect(mem.eql(u8, &digest1, &digest2));
// Modify the file added after initial hash
- try tmp.dir.writeFile(.{ .sub_path = temp_file2, .data = "Hello world the second, updated\n" });
+ try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second, updated\n" });
// Wait for file timestamps to tick
- const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir);
- while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time2.nanoseconds) {
+ const initial_time2 = try testGetCurrentFileTimestamp(io, tmp.dir);
+ while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time2.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
diff --git a/lib/std/Build/Cache/Directory.zig b/lib/std/Build/Cache/Directory.zig
index a105a91ed6..ce5f5b02bb 100644
--- a/lib/std/Build/Cache/Directory.zig
+++ b/lib/std/Build/Cache/Directory.zig
@@ -1,7 +1,9 @@
const Directory = @This();
+
const std = @import("../../std.zig");
-const assert = std.debug.assert;
+const Io = std.Io;
const fs = std.fs;
+const assert = std.debug.assert;
const fmt = std.fmt;
const Allocator = std.mem.Allocator;
@@ -9,7 +11,7 @@ const Allocator = std.mem.Allocator;
/// directly, but it is needed when passing the directory to a child process.
/// `null` means cwd.
path: ?[]const u8,
-handle: fs.Dir,
+handle: Io.Dir,
pub fn clone(d: Directory, arena: Allocator) Allocator.Error!Directory {
return .{
@@ -21,7 +23,7 @@ pub fn clone(d: Directory, arena: Allocator) Allocator.Error!Directory {
pub fn cwd() Directory {
return .{
.path = null,
- .handle = fs.cwd(),
+ .handle = .cwd(),
};
}
@@ -50,8 +52,8 @@ pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) !
/// Whether or not the handle should be closed, or the path should be freed
/// is determined by usage, however this function is provided for convenience
/// if it happens to be what the caller needs.
-pub fn closeAndFree(self: *Directory, gpa: Allocator) void {
- self.handle.close();
+pub fn closeAndFree(self: *Directory, gpa: Allocator, io: Io) void {
+ self.handle.close(io);
if (self.path) |p| gpa.free(p);
self.* = undefined;
}
@@ -64,5 +66,5 @@ pub fn format(self: Directory, writer: *std.Io.Writer) std.Io.Writer.Error!void
}
pub fn eql(self: Directory, other: Directory) bool {
- return self.handle.fd == other.handle.fd;
+ return self.handle.handle == other.handle.handle;
}
diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig
index 92290cfdf4..2b7814c544 100644
--- a/lib/std/Build/Cache/Path.zig
+++ b/lib/std/Build/Cache/Path.zig
@@ -2,8 +2,8 @@ const Path = @This();
const std = @import("../../std.zig");
const Io = std.Io;
-const assert = std.debug.assert;
const fs = std.fs;
+const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
@@ -59,58 +59,56 @@ pub fn joinStringZ(p: Path, gpa: Allocator, sub_path: []const u8) Allocator.Erro
return p.root_dir.joinZ(gpa, parts);
}
-pub fn openFile(
- p: Path,
- sub_path: []const u8,
- flags: fs.File.OpenFlags,
-) !fs.File {
+pub fn openFile(p: Path, io: Io, sub_path: []const u8, flags: Io.File.OpenFlags) !Io.File {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
- return p.root_dir.handle.openFile(joined_path, flags);
+ return p.root_dir.handle.openFile(io, joined_path, flags);
}
pub fn openDir(
p: Path,
+ io: Io,
sub_path: []const u8,
- args: fs.Dir.OpenOptions,
-) fs.Dir.OpenError!fs.Dir {
+ args: Io.Dir.OpenOptions,
+) Io.Dir.OpenError!Io.Dir {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
- return p.root_dir.handle.openDir(joined_path, args);
+ return p.root_dir.handle.openDir(io, joined_path, args);
}
-pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.Dir.OpenOptions) !fs.Dir {
+pub fn createDirPathOpen(p: Path, io: Io, sub_path: []const u8, opts: Io.Dir.OpenOptions) !Io.Dir {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
- return p.root_dir.handle.makeOpenPath(joined_path, opts);
+ return p.root_dir.handle.createDirPathOpen(io, joined_path, opts);
}
-pub fn statFile(p: Path, sub_path: []const u8) !fs.Dir.Stat {
+pub fn statFile(p: Path, io: Io, sub_path: []const u8) !Io.Dir.Stat {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
- return p.root_dir.handle.statFile(joined_path);
+ return p.root_dir.handle.statFile(io, joined_path, .{});
}
pub fn atomicFile(
p: Path,
+ io: Io,
sub_path: []const u8,
- options: fs.Dir.AtomicFileOptions,
+ options: Io.Dir.AtomicFileOptions,
buf: *[fs.max_path_bytes]u8,
) !fs.AtomicFile {
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
@@ -118,27 +116,27 @@ pub fn atomicFile(
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
- return p.root_dir.handle.atomicFile(joined_path, options);
+ return p.root_dir.handle.atomicFile(io, joined_path, options);
}
-pub fn access(p: Path, sub_path: []const u8, flags: Io.Dir.AccessOptions) !void {
+pub fn access(p: Path, io: Io, sub_path: []const u8, flags: Io.Dir.AccessOptions) !void {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
- return p.root_dir.handle.access(joined_path, flags);
+ return p.root_dir.handle.access(io, joined_path, flags);
}
-pub fn makePath(p: Path, sub_path: []const u8) !void {
+pub fn createDirPath(p: Path, io: Io, sub_path: []const u8) !void {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
- return p.root_dir.handle.makePath(joined_path);
+ return p.root_dir.handle.createDirPath(io, joined_path);
}
pub fn toString(p: Path, allocator: Allocator) Allocator.Error![]u8 {
@@ -180,7 +178,7 @@ pub fn formatEscapeChar(path: Path, writer: *Io.Writer) Io.Writer.Error!void {
}
pub fn format(self: Path, writer: *Io.Writer) Io.Writer.Error!void {
- if (std.fs.path.isAbsolute(self.sub_path)) {
+ if (fs.path.isAbsolute(self.sub_path)) {
try writer.writeAll(self.sub_path);
return;
}
@@ -225,9 +223,9 @@ pub const TableAdapter = struct {
pub fn hash(self: TableAdapter, a: Cache.Path) u32 {
_ = self;
- const seed = switch (@typeInfo(@TypeOf(a.root_dir.handle.fd))) {
- .pointer => @intFromPtr(a.root_dir.handle.fd),
- .int => @as(u32, @bitCast(a.root_dir.handle.fd)),
+ const seed = switch (@typeInfo(@TypeOf(a.root_dir.handle.handle))) {
+ .pointer => @intFromPtr(a.root_dir.handle.handle),
+ .int => @as(u32, @bitCast(a.root_dir.handle.handle)),
else => @compileError("unimplemented hash function"),
};
return @truncate(Hash.hash(seed, a.sub_path));
diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig
index 2897b29969..d308efdf70 100644
--- a/lib/std/Build/Fuzz.zig
+++ b/lib/std/Build/Fuzz.zig
@@ -9,14 +9,12 @@ const Allocator = std.mem.Allocator;
const log = std.log;
const Coverage = std.debug.Coverage;
const abi = Build.abi.fuzz;
-const tty = std.Io.tty;
const Fuzz = @This();
const build_runner = @import("root");
gpa: Allocator,
io: Io,
-ttyconf: tty.Config,
mode: Mode,
/// Allocated into `gpa`.
@@ -77,7 +75,6 @@ const CoverageMap = struct {
pub fn init(
gpa: Allocator,
io: Io,
- ttyconf: tty.Config,
all_steps: []const *Build.Step,
root_prog_node: std.Progress.Node,
mode: Mode,
@@ -95,7 +92,7 @@ pub fn init(
if (run.producer == null) continue;
if (run.fuzz_tests.items.len == 0) continue;
try steps.append(gpa, run);
- rebuild_group.async(io, rebuildTestsWorkerRun, .{ run, gpa, ttyconf, rebuild_node });
+ rebuild_group.async(io, rebuildTestsWorkerRun, .{ run, gpa, rebuild_node });
}
if (steps.items.len == 0) fatal("no fuzz tests found", .{});
@@ -115,7 +112,6 @@ pub fn init(
return .{
.gpa = gpa,
.io = io,
- .ttyconf = ttyconf,
.mode = mode,
.run_steps = run_steps,
.group = .init,
@@ -154,14 +150,16 @@ pub fn deinit(fuzz: *Fuzz) void {
fuzz.gpa.free(fuzz.run_steps);
}
-fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: tty.Config, parent_prog_node: std.Progress.Node) void {
- rebuildTestsWorkerRunFallible(run, gpa, ttyconf, parent_prog_node) catch |err| {
+fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, parent_prog_node: std.Progress.Node) void {
+ rebuildTestsWorkerRunFallible(run, gpa, parent_prog_node) catch |err| {
const compile = run.producer.?;
log.err("step '{s}': failed to rebuild in fuzz mode: {t}", .{ compile.step.name, err });
};
}
-fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: tty.Config, parent_prog_node: std.Progress.Node) !void {
+fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, parent_prog_node: std.Progress.Node) !void {
+ const graph = run.step.owner.graph;
+ const io = graph.io;
const compile = run.producer.?;
const prog_node = parent_prog_node.start(compile.step.name, 0);
defer prog_node.end();
@@ -174,9 +172,9 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: tty.Co
if (show_error_msgs or show_compile_errors or show_stderr) {
var buf: [256]u8 = undefined;
- const w, _ = std.debug.lockStderrWriter(&buf);
- defer std.debug.unlockStderrWriter();
- build_runner.printErrorMessages(gpa, &compile.step, .{}, w, ttyconf, .verbose, .indent) catch {};
+ const stderr = try io.lockStderr(&buf, graph.stderr_mode);
+ defer io.unlockStderr();
+ build_runner.printErrorMessages(gpa, &compile.step, .{}, stderr.terminal(), .verbose, .indent) catch {};
}
const rebuilt_bin_path = result catch |err| switch (err) {
@@ -186,12 +184,11 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: tty.Co
run.rebuilt_executable = try rebuilt_bin_path.join(gpa, compile.out_filename);
}
-fn fuzzWorkerRun(
- fuzz: *Fuzz,
- run: *Step.Run,
- unit_test_index: u32,
-) void {
- const gpa = run.step.owner.allocator;
+fn fuzzWorkerRun(fuzz: *Fuzz, run: *Step.Run, unit_test_index: u32) void {
+ const owner = run.step.owner;
+ const gpa = owner.allocator;
+ const graph = owner.graph;
+ const io = graph.io;
const test_name = run.cached_test_metadata.?.testName(unit_test_index);
const prog_node = fuzz.prog_node.start(test_name, 0);
@@ -200,9 +197,11 @@ fn fuzzWorkerRun(
run.rerunInFuzzMode(fuzz, unit_test_index, prog_node) catch |err| switch (err) {
error.MakeFailed => {
var buf: [256]u8 = undefined;
- const w, _ = std.debug.lockStderrWriter(&buf);
- defer std.debug.unlockStderrWriter();
- build_runner.printErrorMessages(gpa, &run.step, .{}, w, fuzz.ttyconf, .verbose, .indent) catch {};
+ const stderr = io.lockStderr(&buf, graph.stderr_mode) catch |e| switch (e) {
+ error.Canceled => return,
+ };
+ defer io.unlockStderr();
+ build_runner.printErrorMessages(gpa, &run.step, .{}, stderr.terminal(), .verbose, .indent) catch {};
return;
},
else => {
@@ -360,12 +359,13 @@ fn coverageRunCancelable(fuzz: *Fuzz) Io.Cancelable!void {
fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutOfMemory, AlreadyReported, Canceled }!void {
assert(fuzz.mode == .forever);
const ws = fuzz.mode.forever.ws;
+ const gpa = fuzz.gpa;
const io = fuzz.io;
try fuzz.coverage_mutex.lock(io);
defer fuzz.coverage_mutex.unlock(io);
- const gop = try fuzz.coverage_files.getOrPut(fuzz.gpa, coverage_id);
+ const gop = try fuzz.coverage_files.getOrPut(gpa, coverage_id);
if (gop.found_existing) {
// We are fuzzing the same executable with multiple threads.
// Perhaps the same unit test; perhaps a different one. In any
@@ -383,12 +383,13 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
.entry_points = .{},
.start_timestamp = ws.now(),
};
- errdefer gop.value_ptr.coverage.deinit(fuzz.gpa);
+ errdefer gop.value_ptr.coverage.deinit(gpa);
const rebuilt_exe_path = run_step.rebuilt_executable.?;
const target = run_step.producer.?.rootModuleTarget();
var debug_info = std.debug.Info.load(
- fuzz.gpa,
+ gpa,
+ io,
rebuilt_exe_path,
&gop.value_ptr.coverage,
target.ofmt,
@@ -399,21 +400,21 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
});
return error.AlreadyReported;
};
- defer debug_info.deinit(fuzz.gpa);
+ defer debug_info.deinit(gpa);
const coverage_file_path: Build.Cache.Path = .{
.root_dir = run_step.step.owner.cache_root,
.sub_path = "v/" ++ std.fmt.hex(coverage_id),
};
- var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
+ var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| {
log.err("step '{s}': failed to load coverage file '{f}': {t}", .{
run_step.step.name, coverage_file_path, err,
});
return error.AlreadyReported;
};
- defer coverage_file.close();
+ defer coverage_file.close(io);
- const file_size = coverage_file.getEndPos() catch |err| {
+ const file_size = coverage_file.length(io) catch |err| {
log.err("unable to check len of coverage file '{f}': {t}", .{ coverage_file_path, err });
return error.AlreadyReported;
};
@@ -433,14 +434,14 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const pcs = header.pcAddrs();
- const source_locations = try fuzz.gpa.alloc(Coverage.SourceLocation, pcs.len);
- errdefer fuzz.gpa.free(source_locations);
+ const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len);
+ errdefer gpa.free(source_locations);
// Unfortunately the PCs array that LLVM gives us from the 8-bit PC
// counters feature is not sorted.
var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{};
- defer sorted_pcs.deinit(fuzz.gpa);
- try sorted_pcs.resize(fuzz.gpa, pcs.len);
+ defer sorted_pcs.deinit(gpa);
+ try sorted_pcs.resize(gpa, pcs.len);
@memcpy(sorted_pcs.items(.pc), pcs);
for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i);
sorted_pcs.sortUnstable(struct {
@@ -451,7 +452,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
}
}{ .addrs = sorted_pcs.items(.pc) });
- debug_info.resolveAddresses(fuzz.gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
+ debug_info.resolveAddresses(gpa, io, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
log.err("failed to resolve addresses to source locations: {t}", .{err});
return error.AlreadyReported;
};
@@ -528,12 +529,12 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
.root_dir = cov.run.step.owner.cache_root,
.sub_path = "v/" ++ std.fmt.hex(cov.id),
};
- var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
+ var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| {
fatal("step '{s}': failed to load coverage file '{f}': {t}", .{
cov.run.step.name, coverage_file_path, err,
});
};
- defer coverage_file.close();
+ defer coverage_file.close(io);
const fuzz_abi = std.Build.abi.fuzz;
var rbuf: [0x1000]u8 = undefined;
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index c247e69461..243dee8604 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -117,7 +117,6 @@ pub const MakeOptions = struct {
// it currently breaks because `std.net.Address` doesn't work there. Work around for now.
.wasm32 => void,
},
- ttyconf: std.Io.tty.Config,
/// If set, this is a timeout to enforce on all individual unit tests, in nanoseconds.
unit_test_timeout_ns: ?u64,
/// Not to be confused with `Build.allocator`, which is an alias of `Build.graph.arena`.
@@ -329,16 +328,17 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
}
/// For debugging purposes, prints identifying information about this Step.
-pub fn dump(step: *Step, w: *Io.Writer, tty_config: Io.tty.Config) void {
+pub fn dump(step: *Step, t: Io.Terminal) void {
+ const w = t.writer;
if (step.debug_stack_trace.instruction_addresses.len > 0) {
w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
- std.debug.writeStackTrace(&step.debug_stack_trace, w, tty_config) catch {};
+ std.debug.writeStackTrace(&step.debug_stack_trace, t) catch {};
} else {
const field = "debug_stack_frames_count";
comptime assert(@hasField(Build, field));
- tty_config.setColor(w, .yellow) catch {};
+ t.setColor(.yellow) catch {};
w.print("name: '{s}'. no stack trace collected for this step, see std.Build." ++ field ++ "\n", .{step.name}) catch {};
- tty_config.setColor(w, .reset) catch {};
+ t.setColor(.reset) catch {};
}
}
@@ -350,6 +350,7 @@ pub fn captureChildProcess(
argv: []const []const u8,
) !std.process.Child.RunResult {
const arena = s.owner.allocator;
+ const io = s.owner.graph.io;
// If an error occurs, it's happened in this command:
assert(s.result_failed_command == null);
@@ -358,8 +359,7 @@ pub fn captureChildProcess(
try handleChildProcUnsupported(s);
try handleVerbose(s.owner, null, argv);
- const result = std.process.Child.run(.{
- .allocator = arena,
+ const result = std.process.Child.run(arena, io, .{
.argv = argv,
.progress_node = progress_node,
}) catch |err| return s.fail("failed to run {s}: {t}", .{ argv[0], err });
@@ -401,6 +401,9 @@ pub fn evalZigProcess(
web_server: ?*Build.WebServer,
gpa: Allocator,
) !?Path {
+ const b = s.owner;
+ const io = b.graph.io;
+
// If an error occurs, it's happened in this command:
assert(s.result_failed_command == null);
s.result_failed_command = try allocPrintCmd(gpa, null, argv);
@@ -411,7 +414,7 @@ pub fn evalZigProcess(
const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) {
error.BrokenPipe => {
// Process restart required.
- const term = zp.child.wait() catch |e| {
+ const term = zp.child.wait(io) catch |e| {
return s.fail("unable to wait for {s}: {t}", .{ argv[0], e });
};
_ = term;
@@ -427,7 +430,7 @@ pub fn evalZigProcess(
if (s.result_error_msgs.items.len > 0 and result == null) {
// Crash detected.
- const term = zp.child.wait() catch |e| {
+ const term = zp.child.wait(io) catch |e| {
return s.fail("unable to wait for {s}: {t}", .{ argv[0], e });
};
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
@@ -439,7 +442,6 @@ pub fn evalZigProcess(
return result;
}
assert(argv.len != 0);
- const b = s.owner;
const arena = b.allocator;
try handleChildProcUnsupported(s);
@@ -453,7 +455,7 @@ pub fn evalZigProcess(
child.request_resource_usage_statistics = true;
child.progress_node = prog_node;
- child.spawn() catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err });
+ child.spawn(io) catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err });
const zp = try gpa.create(ZigProcess);
zp.* = .{
@@ -474,10 +476,10 @@ pub fn evalZigProcess(
if (!watch) {
// Send EOF to stdin.
- zp.child.stdin.?.close();
+ zp.child.stdin.?.close(io);
zp.child.stdin = null;
- const term = zp.child.wait() catch |err| {
+ const term = zp.child.wait(io) catch |err| {
return s.fail("unable to wait for {s}: {t}", .{ argv[0], err });
};
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
@@ -504,36 +506,34 @@ pub fn evalZigProcess(
return result;
}
-/// Wrapper around `std.fs.Dir.updateFile` that handles verbose and error output.
+/// Wrapper around `Io.Dir.updateFile` that handles verbose and error output.
pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u8) !Io.Dir.PrevStatus {
const b = s.owner;
const io = b.graph.io;
const src_path = src_lazy_path.getPath3(b, s);
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{f}", .{src_path}), dest_path });
- return Io.Dir.updateFile(src_path.root_dir.handle.adaptToNewApi(), io, src_path.sub_path, .cwd(), dest_path, .{}) catch |err| {
- return s.fail("unable to update file from '{f}' to '{s}': {t}", .{
- src_path, dest_path, err,
- });
- };
+ return Io.Dir.updateFile(src_path.root_dir.handle, io, src_path.sub_path, .cwd(), dest_path, .{}) catch |err|
+ return s.fail("unable to update file from '{f}' to '{s}': {t}", .{ src_path, dest_path, err });
}
-/// Wrapper around `std.fs.Dir.makePathStatus` that handles verbose and error output.
-pub fn installDir(s: *Step, dest_path: []const u8) !std.fs.Dir.MakePathStatus {
+/// Wrapper around `Io.Dir.createDirPathStatus` that handles verbose and error output.
+pub fn installDir(s: *Step, dest_path: []const u8) !Io.Dir.CreatePathStatus {
const b = s.owner;
+ const io = b.graph.io;
try handleVerbose(b, null, &.{ "install", "-d", dest_path });
- return std.fs.cwd().makePathStatus(dest_path) catch |err| {
+ return Io.Dir.cwd().createDirPathStatus(io, dest_path, .default_dir) catch |err|
return s.fail("unable to create dir '{s}': {t}", .{ dest_path, err });
- };
}
fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.WebServer, gpa: Allocator) !?Path {
const b = s.owner;
const arena = b.allocator;
+ const io = b.graph.io;
var timer = try std.time.Timer.start();
- try sendMessage(zp.child.stdin.?, .update);
- if (!watch) try sendMessage(zp.child.stdin.?, .exit);
+ try sendMessage(io, zp.child.stdin.?, .update);
+ if (!watch) try sendMessage(io, zp.child.stdin.?, .exit);
var result: ?Path = null;
@@ -670,12 +670,12 @@ fn clearZigProcess(s: *Step, gpa: Allocator) void {
}
}
-fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+fn sendMessage(io: Io, file: Io.File, tag: std.zig.Client.Message.Tag) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 0,
};
- var w = file.writer(&.{});
+ var w = file.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => return w.err.?,
};
@@ -898,7 +898,7 @@ pub fn addWatchInput(step: *Step, lazy_file: Build.LazyPath) Allocator.Error!voi
try addWatchInputFromPath(step, .{
.root_dir = .{
.path = null,
- .handle = std.fs.cwd(),
+ .handle = Io.Dir.cwd(),
},
.sub_path = std.fs.path.dirname(path_string) orelse "",
}, std.fs.path.basename(path_string));
@@ -923,7 +923,7 @@ pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Alloc
try addDirectoryWatchInputFromPath(step, .{
.root_dir = .{
.path = null,
- .handle = std.fs.cwd(),
+ .handle = Io.Dir.cwd(),
},
.sub_path = path_string,
});
diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig
index efeedc8b80..1c3813ca82 100644
--- a/lib/std/Build/Step/CheckFile.zig
+++ b/lib/std/Build/Step/CheckFile.zig
@@ -3,7 +3,9 @@
//! TODO: generalize the code in std.testing.expectEqualStrings and make this
//! CheckFile step produce those helpful diagnostics when there is not a match.
const CheckFile = @This();
+
const std = @import("std");
+const Io = std.Io;
const Step = std.Build.Step;
const fs = std.fs;
const mem = std.mem;
@@ -49,11 +51,12 @@ pub fn setName(check_file: *CheckFile, name: []const u8) void {
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const b = step.owner;
+ const io = b.graph.io;
const check_file: *CheckFile = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(check_file.source);
const src_path = check_file.source.getPath2(b, step);
- const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| {
+ const contents = Io.Dir.cwd().readFileAlloc(io, src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| {
return step.fail("unable to read '{s}': {s}", .{
src_path, @errorName(err),
});
diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig
index c6c11ce2b9..ac8fafaa3f 100644
--- a/lib/std/Build/Step/CheckObject.zig
+++ b/lib/std/Build/Step/CheckObject.zig
@@ -547,12 +547,14 @@ pub fn checkComputeCompare(
fn make(step: *Step, make_options: Step.MakeOptions) !void {
_ = make_options;
const b = step.owner;
+ const io = b.graph.io;
const gpa = b.allocator;
const check_object: *CheckObject = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(check_object.source);
const src_path = check_object.source.getPath3(b, step);
const contents = src_path.root_dir.handle.readFileAllocOptions(
+ io,
src_path.sub_path,
gpa,
.limited(check_object.max_bytes),
diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig
index c57c7750be..0454e5b79d 100644
--- a/lib/std/Build/Step/Compile.zig
+++ b/lib/std/Build/Step/Compile.zig
@@ -1,12 +1,15 @@
+const Compile = @This();
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const fs = std.fs;
const assert = std.debug.assert;
const panic = std.debug.panic;
const StringHashMap = std.StringHashMap;
const Sha256 = std.crypto.hash.sha2.Sha256;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
const Step = std.Build.Step;
const LazyPath = std.Build.LazyPath;
const PkgConfigPkg = std.Build.PkgConfigPkg;
@@ -15,7 +18,6 @@ const RunError = std.Build.RunError;
const Module = std.Build.Module;
const InstallDir = std.Build.InstallDir;
const GeneratedFile = std.Build.GeneratedFile;
-const Compile = @This();
const Path = std.Build.Cache.Path;
pub const base_id: Step.Id = .compile;
@@ -920,20 +922,24 @@ const CliNamedModules = struct {
}
};
-fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking_step: ?*Step) []const u8 {
+fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking_step: ?*Step) ![]const u8 {
+ const step = &compile.step;
+ const b = step.owner;
+ const graph = b.graph;
+ const io = graph.io;
const maybe_path: ?*GeneratedFile = @field(compile, tag_name);
const generated_file = maybe_path orelse {
- const w, const ttyconf = std.debug.lockStderrWriter(&.{});
- std.Build.dumpBadGetPathHelp(&compile.step, w, ttyconf, compile.step.owner, asking_step) catch {};
- std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&.{}, graph.stderr_mode);
+ std.Build.dumpBadGetPathHelp(&compile.step, stderr.terminal(), compile.step.owner, asking_step) catch {};
+ io.unlockStderr();
@panic("missing emit option for " ++ tag_name);
};
const path = generated_file.path orelse {
- const w, const ttyconf = std.debug.lockStderrWriter(&.{});
- std.Build.dumpBadGetPathHelp(&compile.step, w, ttyconf, compile.step.owner, asking_step) catch {};
- std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&.{}, graph.stderr_mode);
+ std.Build.dumpBadGetPathHelp(&compile.step, stderr.terminal(), compile.step.owner, asking_step) catch {};
+ io.unlockStderr();
@panic(tag_name ++ " is null. Is there a missing step dependency?");
};
@@ -1147,9 +1153,9 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
// For everything else, we directly link
// against the library file.
const full_path_lib = if (other_produces_implib)
- other.getGeneratedFilePath("generated_implib", &compile.step)
+ try other.getGeneratedFilePath("generated_implib", &compile.step)
else
- other.getGeneratedFilePath("generated_bin", &compile.step);
+ try other.getGeneratedFilePath("generated_bin", &compile.step);
try zig_args.append(full_path_lib);
total_linker_objects += 1;
@@ -1561,19 +1567,22 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
}
// -I and -L arguments that appear after the last --mod argument apply to all modules.
+ const cwd: Io.Dir = .cwd();
+ const io = b.graph.io;
+
for (b.search_prefixes.items) |search_prefix| {
- var prefix_dir = fs.cwd().openDir(search_prefix, .{}) catch |err| {
+ var prefix_dir = cwd.openDir(io, search_prefix, .{}) catch |err| {
return step.fail("unable to open prefix directory '{s}': {s}", .{
search_prefix, @errorName(err),
});
};
- defer prefix_dir.close();
+ defer prefix_dir.close(io);
// Avoid passing -L and -I flags for nonexistent directories.
// This prevents a warning, that should probably be upgraded to an error in Zig's
// CLI parsing code, when the linker sees an -L directory that does not exist.
- if (prefix_dir.access("lib", .{})) |_| {
+ if (prefix_dir.access(io, "lib", .{})) |_| {
try zig_args.appendSlice(&.{
"-L", b.pathJoin(&.{ search_prefix, "lib" }),
});
@@ -1584,7 +1593,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
}),
}
- if (prefix_dir.access("include", .{})) |_| {
+ if (prefix_dir.access(io, "include", .{})) |_| {
try zig_args.appendSlice(&.{
"-I", b.pathJoin(&.{ search_prefix, "include" }),
});
@@ -1660,7 +1669,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
args_length += arg.len + 1; // +1 to account for null terminator
}
if (args_length >= 30 * 1024) {
- try b.cache_root.handle.makePath("args");
+ try b.cache_root.handle.createDirPath(io, "args");
const args_to_escape = zig_args.items[2..];
var escaped_args = try std.array_list.Managed([]const u8).initCapacity(arena, args_to_escape.len);
@@ -1693,18 +1702,18 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
_ = try std.fmt.bufPrint(&args_hex_hash, "{x}", .{&args_hash});
const args_file = "args" ++ fs.path.sep_str ++ args_hex_hash;
- if (b.cache_root.handle.access(args_file, .{})) |_| {
+ if (b.cache_root.handle.access(io, args_file, .{})) |_| {
// The args file is already present from a previous run.
} else |err| switch (err) {
error.FileNotFound => {
- try b.cache_root.handle.makePath("tmp");
+ try b.cache_root.handle.createDirPath(io, "tmp");
const rand_int = std.crypto.random.int(u64);
const tmp_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
- try b.cache_root.handle.writeFile(.{ .sub_path = tmp_path, .data = args });
- defer b.cache_root.handle.deleteFile(tmp_path) catch {
+ try b.cache_root.handle.writeFile(io, .{ .sub_path = tmp_path, .data = args });
+ defer b.cache_root.handle.deleteFile(io, tmp_path) catch {
// It's fine if the temporary file can't be cleaned up.
};
- b.cache_root.handle.rename(tmp_path, args_file) catch |rename_err| switch (rename_err) {
+ b.cache_root.handle.rename(tmp_path, b.cache_root.handle, args_file, io) catch |rename_err| switch (rename_err) {
error.PathAlreadyExists => {
// The args file was created by another concurrent build process.
},
@@ -1816,18 +1825,20 @@ pub fn doAtomicSymLinks(
filename_name_only: []const u8,
) !void {
const b = step.owner;
+ const io = b.graph.io;
const out_dir = fs.path.dirname(output_path) orelse ".";
const out_basename = fs.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
const major_only_path = b.pathJoin(&.{ out_dir, filename_major_only });
- fs.cwd().atomicSymLink(out_basename, major_only_path, .{}) catch |err| {
+ const cwd: Io.Dir = .cwd();
+ cwd.symLinkAtomic(io, out_basename, major_only_path, .{}) catch |err| {
return step.fail("unable to symlink {s} -> {s}: {s}", .{
major_only_path, out_basename, @errorName(err),
});
};
// sym link for libfoo.so to libfoo.so.1
const name_only_path = b.pathJoin(&.{ out_dir, filename_name_only });
- fs.cwd().atomicSymLink(filename_major_only, name_only_path, .{}) catch |err| {
+ cwd.symLinkAtomic(io, filename_major_only, name_only_path, .{}) catch |err| {
return step.fail("Unable to symlink {s} -> {s}: {s}", .{
name_only_path, filename_major_only, @errorName(err),
});
@@ -1897,7 +1908,7 @@ fn checkCompileErrors(compile: *Compile) !void {
try actual_eb.renderToWriter(.{
.include_reference_trace = false,
.include_source_line = false,
- }, &aw.writer, .no_color);
+ }, &aw.writer);
break :ae try aw.toOwnedSlice();
};
diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig
index df2419764d..b55efc0da4 100644
--- a/lib/std/Build/Step/ConfigHeader.zig
+++ b/lib/std/Build/Step/ConfigHeader.zig
@@ -1,5 +1,7 @@
-const std = @import("std");
const ConfigHeader = @This();
+
+const std = @import("std");
+const Io = std.Io;
const Step = std.Build.Step;
const Allocator = std.mem.Allocator;
const Writer = std.Io.Writer;
@@ -182,6 +184,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const gpa = b.allocator;
const arena = b.allocator;
+ const io = b.graph.io;
var man = b.graph.cache.obtain();
defer man.deinit();
@@ -205,7 +208,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
.autoconf_undef, .autoconf_at => |file_source| {
try bw.writeAll(c_generated_line);
const src_path = file_source.getPath2(b, step);
- const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
+ const contents = Io.Dir.cwd().readFileAlloc(io, src_path, arena, .limited(config_header.max_bytes)) catch |err| {
return step.fail("unable to read autoconf input file '{s}': {s}", .{
src_path, @errorName(err),
});
@@ -219,7 +222,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
.cmake => |file_source| {
try bw.writeAll(c_generated_line);
const src_path = file_source.getPath2(b, step);
- const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
+ const contents = Io.Dir.cwd().readFileAlloc(io, src_path, arena, .limited(config_header.max_bytes)) catch |err| {
return step.fail("unable to read cmake input file '{s}': {s}", .{
src_path, @errorName(err),
});
@@ -255,13 +258,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const sub_path = b.pathJoin(&.{ "o", &digest, config_header.include_path });
const sub_path_dirname = std.fs.path.dirname(sub_path).?;
- b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
+ b.cache_root.handle.createDirPath(io, sub_path_dirname) catch |err| {
return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, sub_path_dirname, @errorName(err),
});
};
- b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output }) catch |err| {
+ b.cache_root.handle.writeFile(io, .{ .sub_path = sub_path, .data = output }) catch |err| {
return step.fail("unable to write file '{f}{s}': {s}", .{
b.cache_root, sub_path, @errorName(err),
});
diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig
index c203ae924b..019d465f01 100644
--- a/lib/std/Build/Step/InstallArtifact.zig
+++ b/lib/std/Build/Step/InstallArtifact.zig
@@ -119,6 +119,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const install_artifact: *InstallArtifact = @fieldParentPtr("step", step);
const b = step.owner;
+ const io = b.graph.io;
var all_cached = true;
@@ -163,15 +164,15 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const src_dir_path = dir.source.getPath3(b, step);
const full_h_prefix = b.getInstallPath(h_dir, dir.dest_rel_path);
- var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
+ var src_dir = src_dir_path.root_dir.handle.openDir(io, src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{f}': {s}", .{
src_dir_path, @errorName(err),
});
};
- defer src_dir.close();
+ defer src_dir.close(io);
var it = try src_dir.walk(b.allocator);
- next_entry: while (try it.next()) |entry| {
+ next_entry: while (try it.next(io)) |entry| {
for (dir.options.exclude_extensions) |ext| {
if (std.mem.endsWith(u8, entry.path, ext)) continue :next_entry;
}
diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig
index fd8a7d113f..d03e72ca75 100644
--- a/lib/std/Build/Step/InstallDir.zig
+++ b/lib/std/Build/Step/InstallDir.zig
@@ -58,21 +58,20 @@ pub fn create(owner: *std.Build, options: Options) *InstallDir {
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const b = step.owner;
+ const io = b.graph.io;
const install_dir: *InstallDir = @fieldParentPtr("step", step);
step.clearWatchInputs();
const arena = b.allocator;
const dest_prefix = b.getInstallPath(install_dir.options.install_dir, install_dir.options.install_subdir);
const src_dir_path = install_dir.options.source_dir.getPath3(b, step);
const need_derived_inputs = try step.addDirectoryWatchInput(install_dir.options.source_dir);
- var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
- return step.fail("unable to open source directory '{f}': {s}", .{
- src_dir_path, @errorName(err),
- });
+ var src_dir = src_dir_path.root_dir.handle.openDir(io, src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
+ return step.fail("unable to open source directory '{f}': {t}", .{ src_dir_path, err });
};
- defer src_dir.close();
+ defer src_dir.close(io);
var it = try src_dir.walk(arena);
var all_cached = true;
- next_entry: while (try it.next()) |entry| {
+ next_entry: while (try it.next(io)) |entry| {
for (install_dir.options.exclude_extensions) |ext| {
if (mem.endsWith(u8, entry.path, ext)) continue :next_entry;
}
diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig
index b5f058ddfc..ea0714adf9 100644
--- a/lib/std/Build/Step/ObjCopy.zig
+++ b/lib/std/Build/Step/ObjCopy.zig
@@ -3,7 +3,7 @@ const ObjCopy = @This();
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
-const File = std.fs.File;
+const File = std.Io.File;
const InstallDir = std.Build.InstallDir;
const Step = std.Build.Step;
const elf = std.elf;
@@ -143,6 +143,7 @@ pub fn getOutputSeparatedDebug(objcopy: *const ObjCopy) ?std.Build.LazyPath {
fn make(step: *Step, options: Step.MakeOptions) !void {
const prog_node = options.progress_node;
const b = step.owner;
+ const io = b.graph.io;
const objcopy: *ObjCopy = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(objcopy.input_file);
@@ -176,7 +177,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const cache_path = "o" ++ fs.path.sep_str ++ digest;
const full_dest_path = try b.cache_root.join(b.allocator, &.{ cache_path, objcopy.basename });
const full_dest_path_debug = try b.cache_root.join(b.allocator, &.{ cache_path, b.fmt("{s}.debug", .{objcopy.basename}) });
- b.cache_root.handle.makePath(cache_path) catch |err| {
+ b.cache_root.handle.createDirPath(io, cache_path) catch |err| {
return step.fail("unable to make path {s}: {s}", .{ cache_path, @errorName(err) });
};
diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig
index 441928d5b8..610d417aea 100644
--- a/lib/std/Build/Step/Options.zig
+++ b/lib/std/Build/Step/Options.zig
@@ -1,12 +1,13 @@
-const std = @import("std");
+const Options = @This();
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const fs = std.fs;
const Step = std.Build.Step;
const GeneratedFile = std.Build.GeneratedFile;
const LazyPath = std.Build.LazyPath;
-const Options = @This();
-
pub const base_id: Step.Id = .options;
step: Step,
@@ -441,6 +442,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
_ = make_options;
const b = step.owner;
+ const io = b.graph.io;
const options: *Options = @fieldParentPtr("step", step);
for (options.args.items) |item| {
@@ -468,18 +470,15 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
// Optimize for the hot path. Stat the file, and if it already exists,
// cache hit.
- if (b.cache_root.handle.access(sub_path, .{})) |_| {
+ if (b.cache_root.handle.access(io, sub_path, .{})) |_| {
// This is the hot path, success.
step.result_cached = true;
return;
} else |outer_err| switch (outer_err) {
error.FileNotFound => {
const sub_dirname = fs.path.dirname(sub_path).?;
- b.cache_root.handle.makePath(sub_dirname) catch |e| {
- return step.fail("unable to make path '{f}{s}': {s}", .{
- b.cache_root, sub_dirname, @errorName(e),
- });
- };
+ b.cache_root.handle.createDirPath(io, sub_dirname) catch |e|
+ return step.fail("unable to make path '{f}{s}': {t}", .{ b.cache_root, sub_dirname, e });
const rand_int = std.crypto.random.int(u64);
const tmp_sub_path = "tmp" ++ fs.path.sep_str ++
@@ -487,40 +486,40 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
basename;
const tmp_sub_path_dirname = fs.path.dirname(tmp_sub_path).?;
- b.cache_root.handle.makePath(tmp_sub_path_dirname) catch |err| {
- return step.fail("unable to make temporary directory '{f}{s}': {s}", .{
- b.cache_root, tmp_sub_path_dirname, @errorName(err),
+ b.cache_root.handle.createDirPath(io, tmp_sub_path_dirname) catch |err| {
+ return step.fail("unable to make temporary directory '{f}{s}': {t}", .{
+ b.cache_root, tmp_sub_path_dirname, err,
});
};
- b.cache_root.handle.writeFile(.{ .sub_path = tmp_sub_path, .data = options.contents.items }) catch |err| {
- return step.fail("unable to write options to '{f}{s}': {s}", .{
- b.cache_root, tmp_sub_path, @errorName(err),
+ b.cache_root.handle.writeFile(io, .{ .sub_path = tmp_sub_path, .data = options.contents.items }) catch |err| {
+ return step.fail("unable to write options to '{f}{s}': {t}", .{
+ b.cache_root, tmp_sub_path, err,
});
};
- b.cache_root.handle.rename(tmp_sub_path, sub_path) catch |err| switch (err) {
+ b.cache_root.handle.rename(tmp_sub_path, b.cache_root.handle, sub_path, io) catch |err| switch (err) {
error.PathAlreadyExists => {
// Other process beat us to it. Clean up the temp file.
- b.cache_root.handle.deleteFile(tmp_sub_path) catch |e| {
- try step.addError("warning: unable to delete temp file '{f}{s}': {s}", .{
- b.cache_root, tmp_sub_path, @errorName(e),
+ b.cache_root.handle.deleteFile(io, tmp_sub_path) catch |e| {
+ try step.addError("warning: unable to delete temp file '{f}{s}': {t}", .{
+ b.cache_root, tmp_sub_path, e,
});
};
step.result_cached = true;
return;
},
else => {
- return step.fail("unable to rename options from '{f}{s}' to '{f}{s}': {s}", .{
- b.cache_root, tmp_sub_path,
- b.cache_root, sub_path,
- @errorName(err),
+ return step.fail("unable to rename options from '{f}{s}' to '{f}{s}': {t}", .{
+ b.cache_root, tmp_sub_path,
+ b.cache_root, sub_path,
+ err,
});
},
};
},
- else => |e| return step.fail("unable to access options file '{f}{s}': {s}", .{
- b.cache_root, sub_path, @errorName(e),
+ else => |e| return step.fail("unable to access options file '{f}{s}': {t}", .{
+ b.cache_root, sub_path, e,
}),
}
}
@@ -544,11 +543,11 @@ test Options {
.cache = .{
.io = io,
.gpa = arena.allocator(),
- .manifest_dir = std.fs.cwd(),
+ .manifest_dir = Io.Dir.cwd(),
},
.zig_exe = "test",
.env_map = std.process.EnvMap.init(arena.allocator()),
- .global_cache_root = .{ .path = "test", .handle = std.fs.cwd() },
+ .global_cache_root = .{ .path = "test", .handle = Io.Dir.cwd() },
.host = .{
.query = .{},
.result = try std.zig.system.resolveTargetQuery(io, .{}),
@@ -559,8 +558,8 @@ test Options {
var builder = try std.Build.create(
&graph,
- .{ .path = "test", .handle = std.fs.cwd() },
- .{ .path = "test", .handle = std.fs.cwd() },
+ .{ .path = "test", .handle = Io.Dir.cwd() },
+ .{ .path = "test", .handle = Io.Dir.cwd() },
&.{},
);
diff --git a/lib/std/Build/Step/RemoveDir.zig b/lib/std/Build/Step/RemoveDir.zig
index e2d4c02abc..6f933da9ee 100644
--- a/lib/std/Build/Step/RemoveDir.zig
+++ b/lib/std/Build/Step/RemoveDir.zig
@@ -27,6 +27,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const b = step.owner;
+ const io = b.graph.io;
const remove_dir: *RemoveDir = @fieldParentPtr("step", step);
step.clearWatchInputs();
@@ -34,15 +35,11 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const full_doomed_path = remove_dir.doomed_path.getPath2(b, step);
- b.build_root.handle.deleteTree(full_doomed_path) catch |err| {
+ b.build_root.handle.deleteTree(io, full_doomed_path) catch |err| {
if (b.build_root.path) |base| {
- return step.fail("unable to recursively delete path '{s}/{s}': {s}", .{
- base, full_doomed_path, @errorName(err),
- });
+ return step.fail("unable to recursively delete path '{s}/{s}': {t}", .{ base, full_doomed_path, err });
} else {
- return step.fail("unable to recursively delete path '{s}': {s}", .{
- full_doomed_path, @errorName(err),
- });
+ return step.fail("unable to recursively delete path '{s}': {t}", .{ full_doomed_path, err });
}
};
}
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index 28c09e1faf..c0ba7f0cee 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -1,15 +1,16 @@
-const std = @import("std");
+const Run = @This();
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const Build = std.Build;
-const Step = Build.Step;
-const fs = std.fs;
+const Step = std.Build.Step;
+const Dir = std.Io.Dir;
const mem = std.mem;
const process = std.process;
-const EnvMap = process.EnvMap;
+const EnvMap = std.process.EnvMap;
const assert = std.debug.assert;
-const Path = Build.Cache.Path;
-
-const Run = @This();
+const Path = std.Build.Cache.Path;
pub const base_id: Step.Id = .run;
@@ -25,19 +26,7 @@ cwd: ?Build.LazyPath,
env_map: ?*EnvMap,
/// Controls the `NO_COLOR` and `CLICOLOR_FORCE` environment variables.
-color: enum {
- /// `CLICOLOR_FORCE` is set, and `NO_COLOR` is unset.
- enable,
- /// `NO_COLOR` is set, and `CLICOLOR_FORCE` is unset.
- disable,
- /// If the build runner is using color, equivalent to `.enable`. Otherwise, equivalent to `.disable`.
- inherit,
- /// If stderr is captured or checked, equivalent to `.disable`. Otherwise, equivalent to `.inherit`.
- auto,
- /// The build runner does not modify the `CLICOLOR_FORCE` or `NO_COLOR` environment variables.
- /// They are treated like normal variables, so can be controlled through `setEnvironmentVariable`.
- manual,
-} = .auto,
+color: Color = .auto,
/// When `true` prevents `ZIG_PROGRESS` environment variable from being passed
/// to the child process, which otherwise would be used for the child to send
@@ -111,6 +100,20 @@ rebuilt_executable: ?Path,
/// If this Run step was produced by a Compile step, it is tracked here.
producer: ?*Step.Compile,
+pub const Color = enum {
+ /// `CLICOLOR_FORCE` is set, and `NO_COLOR` is unset.
+ enable,
+ /// `NO_COLOR` is set, and `CLICOLOR_FORCE` is unset.
+ disable,
+ /// If the build runner is using color, equivalent to `.enable`. Otherwise, equivalent to `.disable`.
+ inherit,
+ /// If stderr is captured or checked, equivalent to `.disable`. Otherwise, equivalent to `.inherit`.
+ auto,
+ /// The build runner does not modify the `CLICOLOR_FORCE` or `NO_COLOR` environment variables.
+ /// They are treated like normal variables, so can be controlled through `setEnvironmentVariable`.
+ manual,
+};
+
pub const StdIn = union(enum) {
none,
bytes: []const u8,
@@ -564,7 +567,7 @@ pub fn addPathDir(run: *Run, search_path: []const u8) void {
if (prev_path) |pp| {
const new_path = b.fmt("{s}{c}{s}", .{
pp,
- if (use_wine) fs.path.delimiter_windows else fs.path.delimiter,
+ if (use_wine) Dir.path.delimiter_windows else Dir.path.delimiter,
search_path,
});
env_map.put(key, new_path) catch @panic("OOM");
@@ -747,7 +750,7 @@ fn checksContainStderr(checks: []const StdIo.Check) bool {
fn convertPathArg(run: *Run, path: Build.Cache.Path) []const u8 {
const b = run.step.owner;
const path_str = path.toString(b.graph.arena) catch @panic("OOM");
- if (std.fs.path.isAbsolute(path_str)) {
+ if (Dir.path.isAbsolute(path_str)) {
// Absolute paths don't need changing.
return path_str;
}
@@ -755,19 +758,19 @@ fn convertPathArg(run: *Run, path: Build.Cache.Path) []const u8 {
const child_lazy_cwd = run.cwd orelse break :rel path_str;
const child_cwd = child_lazy_cwd.getPath3(b, &run.step).toString(b.graph.arena) catch @panic("OOM");
// Convert it from relative to *our* cwd, to relative to the *child's* cwd.
- break :rel std.fs.path.relative(b.graph.arena, child_cwd, path_str) catch @panic("OOM");
+ break :rel Dir.path.relative(b.graph.arena, child_cwd, path_str) catch @panic("OOM");
};
// Not every path can be made relative, e.g. if the path and the child cwd are on different
// disk designators on Windows. In that case, `relative` will return an absolute path which we can
// just return.
- if (std.fs.path.isAbsolute(child_cwd_rel)) {
+ if (Dir.path.isAbsolute(child_cwd_rel)) {
return child_cwd_rel;
}
// We're not done yet. In some cases this path must be prefixed with './':
// * On POSIX, the executable name cannot be a single component like 'foo'
// * Some executables might treat a leading '-' like a flag, which we must avoid
// There's no harm in it, so just *always* apply this prefix.
- return std.fs.path.join(b.graph.arena, &.{ ".", child_cwd_rel }) catch @panic("OOM");
+ return Dir.path.join(b.graph.arena, &.{ ".", child_cwd_rel }) catch @panic("OOM");
}
const IndexedOutput = struct {
@@ -845,13 +848,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
errdefer result.deinit();
result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory;
- const file = file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}) catch |err| {
+ const file = file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{}) catch |err| {
return step.fail(
"unable to open input file '{f}': {t}",
.{ file_path, err },
);
};
- defer file.close();
+ defer file.close(io);
var buf: [1024]u8 = undefined;
var file_reader = file.reader(io, &buf);
@@ -964,15 +967,15 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
&digest,
);
- const output_dir_path = "o" ++ fs.path.sep_str ++ &digest;
+ const output_dir_path = "o" ++ Dir.path.sep_str ++ &digest;
for (output_placeholders.items) |placeholder| {
const output_sub_path = b.pathJoin(&.{ output_dir_path, placeholder.output.basename });
const output_sub_dir_path = switch (placeholder.tag) {
- .output_file => fs.path.dirname(output_sub_path).?,
+ .output_file => Dir.path.dirname(output_sub_path).?,
.output_directory => output_sub_path,
else => unreachable,
};
- b.cache_root.handle.makePath(output_sub_dir_path) catch |err| {
+ b.cache_root.handle.createDirPath(io, output_sub_dir_path) catch |err| {
return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, output_sub_dir_path, @errorName(err),
});
@@ -994,17 +997,17 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
// We do not know the final output paths yet, use temp paths to run the command.
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
+ const tmp_dir_path = "tmp" ++ Dir.path.sep_str ++ std.fmt.hex(rand_int);
for (output_placeholders.items) |placeholder| {
const output_components = .{ tmp_dir_path, placeholder.output.basename };
const output_sub_path = b.pathJoin(&output_components);
const output_sub_dir_path = switch (placeholder.tag) {
- .output_file => fs.path.dirname(output_sub_path).?,
+ .output_file => Dir.path.dirname(output_sub_path).?,
.output_directory => output_sub_path,
else => unreachable,
};
- b.cache_root.handle.makePath(output_sub_dir_path) catch |err| {
+ b.cache_root.handle.createDirPath(io, output_sub_dir_path) catch |err| {
return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, output_sub_dir_path, @errorName(err),
});
@@ -1022,7 +1025,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, options, null);
- const dep_file_dir = std.fs.cwd();
+ const dep_file_dir = Dir.cwd();
const dep_file_basename = dep_output_file.generated_file.getPath2(b, step);
if (has_side_effects)
try man.addDepFile(dep_file_dir, dep_file_basename)
@@ -1039,29 +1042,23 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
// Rename into place
if (any_output) {
- const o_sub_path = "o" ++ fs.path.sep_str ++ &digest;
+ const o_sub_path = "o" ++ Dir.path.sep_str ++ &digest;
- b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |err| {
+ b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |err| {
if (err == error.PathAlreadyExists) {
- b.cache_root.handle.deleteTree(o_sub_path) catch |del_err| {
- return step.fail("unable to remove dir '{f}'{s}: {s}", .{
- b.cache_root,
- tmp_dir_path,
- @errorName(del_err),
+ b.cache_root.handle.deleteTree(io, o_sub_path) catch |del_err| {
+ return step.fail("unable to remove dir '{f}'{s}: {t}", .{
+ b.cache_root, tmp_dir_path, del_err,
});
};
- b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |retry_err| {
- return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{
- b.cache_root, tmp_dir_path,
- b.cache_root, o_sub_path,
- @errorName(retry_err),
+ b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |retry_err| {
+ return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{
+ b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, retry_err,
});
};
} else {
- return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{
- b.cache_root, tmp_dir_path,
- b.cache_root, o_sub_path,
- @errorName(err),
+ return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{
+ b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, err,
});
}
};
@@ -1110,8 +1107,8 @@ pub fn rerunInFuzzMode(
errdefer result.deinit();
result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory;
- const file = try file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{});
- defer file.close();
+ const file = try file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{});
+ defer file.close(io);
var buf: [1024]u8 = undefined;
var file_reader = file.reader(io, &buf);
@@ -1144,12 +1141,11 @@ pub fn rerunInFuzzMode(
const has_side_effects = false;
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
+ const tmp_dir_path = "tmp" ++ Dir.path.sep_str ++ std.fmt.hex(rand_int);
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, .{
.progress_node = prog_node,
.watch = undefined, // not used by `runCommand`
.web_server = null, // only needed for time reports
- .ttyconf = fuzz.ttyconf,
.unit_test_timeout_ns = null, // don't time out fuzz tests for now
.gpa = fuzz.gpa,
}, .{
@@ -1240,6 +1236,7 @@ fn runCommand(
const b = step.owner;
const arena = b.allocator;
const gpa = options.gpa;
+ const io = b.graph.io;
const cwd: ?[]const u8 = if (run.cwd) |lazy_cwd| lazy_cwd.getPath2(b, step) else null;
@@ -1260,33 +1257,6 @@ fn runCommand(
};
defer env_map.deinit();
- color: switch (run.color) {
- .manual => {},
- .enable => {
- try env_map.put("CLICOLOR_FORCE", "1");
- env_map.remove("NO_COLOR");
- },
- .disable => {
- try env_map.put("NO_COLOR", "1");
- env_map.remove("CLICOLOR_FORCE");
- },
- .inherit => switch (options.ttyconf) {
- .no_color, .windows_api => continue :color .disable,
- .escape_codes => continue :color .enable,
- },
- .auto => {
- const capture_stderr = run.captured_stderr != null or switch (run.stdio) {
- .check => |checks| checksContainStderr(checks.items),
- .infer_from_args, .inherit, .zig_test => false,
- };
- if (capture_stderr) {
- continue :color .disable;
- } else {
- continue :color .inherit;
- }
- },
- }
-
const opt_generic_result = spawnChildAndCollect(run, argv, &env_map, has_side_effects, options, fuzz_context) catch |err| term: {
// InvalidExe: cpu arch mismatch
// FileNotFound: can happen with a wrong dynamic linker path
@@ -1308,7 +1278,7 @@ fn runCommand(
const need_cross_libc = exe.is_linking_libc and
(root_target.isGnuLibC() or (root_target.isMuslLibC() and exe.linkage == .dynamic));
const other_target = exe.root_module.resolved_target.?.result;
- switch (std.zig.system.getExternalExecutor(&b.graph.host.result, &other_target, .{
+ switch (std.zig.system.getExternalExecutor(io, &b.graph.host.result, &other_target, .{
.qemu_fixes_dl = need_cross_libc and b.libc_runtimes_dir != null,
.link_libc = exe.is_linking_libc,
})) {
@@ -1468,8 +1438,8 @@ fn runCommand(
captured.output.generated_file.path = output_path;
const sub_path = b.pathJoin(&output_components);
- const sub_path_dirname = fs.path.dirname(sub_path).?;
- b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
+ const sub_path_dirname = Dir.path.dirname(sub_path).?;
+ b.cache_root.handle.createDirPath(io, sub_path_dirname) catch |err| {
return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, sub_path_dirname, @errorName(err),
});
@@ -1480,7 +1450,7 @@ fn runCommand(
.leading => mem.trimStart(u8, stream.bytes.?, &std.ascii.whitespace),
.trailing => mem.trimEnd(u8, stream.bytes.?, &std.ascii.whitespace),
};
- b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = data }) catch |err| {
+ b.cache_root.handle.writeFile(io, .{ .sub_path = sub_path, .data = data }) catch |err| {
return step.fail("unable to write file '{f}{s}': {s}", .{
b.cache_root, sub_path, @errorName(err),
});
@@ -1589,6 +1559,8 @@ fn spawnChildAndCollect(
) !?EvalGenericResult {
const b = run.step.owner;
const arena = b.allocator;
+ const graph = b.graph;
+ const io = graph.io;
if (fuzz_context != null) {
assert(!has_side_effects);
@@ -1654,8 +1626,12 @@ fn spawnChildAndCollect(
if (!run.disable_zig_progress and !inherit) {
child.progress_node = options.progress_node;
}
- if (inherit) std.debug.lockStdErr();
- defer if (inherit) std.debug.unlockStdErr();
+ const terminal_mode: Io.Terminal.Mode = if (inherit) m: {
+ const stderr = try io.lockStderr(&.{}, graph.stderr_mode);
+ break :m stderr.terminal_mode;
+ } else .no_color;
+ defer if (inherit) io.unlockStderr();
+ try setColorEnvironmentVariables(run, env_map, terminal_mode);
var timer = try std.time.Timer.start();
const res = try evalGeneric(run, &child);
run.step.result_duration_ns = timer.read();
@@ -1663,6 +1639,35 @@ fn spawnChildAndCollect(
}
}
+fn setColorEnvironmentVariables(run: *Run, env_map: *EnvMap, terminal_mode: Io.Terminal.Mode) !void {
+ color: switch (run.color) {
+ .manual => {},
+ .enable => {
+ try env_map.put("CLICOLOR_FORCE", "1");
+ env_map.remove("NO_COLOR");
+ },
+ .disable => {
+ try env_map.put("NO_COLOR", "1");
+ env_map.remove("CLICOLOR_FORCE");
+ },
+ .inherit => switch (terminal_mode) {
+ .no_color, .windows_api => continue :color .disable,
+ .escape_codes => continue :color .enable,
+ },
+ .auto => {
+ const capture_stderr = run.captured_stderr != null or switch (run.stdio) {
+ .check => |checks| checksContainStderr(checks.items),
+ .infer_from_args, .inherit, .zig_test => false,
+ };
+ if (capture_stderr) {
+ continue :color .disable;
+ } else {
+ continue :color .inherit;
+ }
+ },
+ }
+}
+
const StdioPollEnum = enum { stdout, stderr };
fn evalZigTest(
@@ -1671,8 +1676,10 @@ fn evalZigTest(
options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
) !EvalZigTestResult {
- const gpa = run.step.owner.allocator;
- const arena = run.step.owner.allocator;
+ const step_owner = run.step.owner;
+ const gpa = step_owner.allocator;
+ const arena = step_owner.allocator;
+ const io = step_owner.graph.io;
// We will update this every time a child runs.
run.step.result_peak_rss = 0;
@@ -1691,14 +1698,14 @@ fn evalZigTest(
};
while (true) {
- try child.spawn();
+ try child.spawn(io);
var poller = std.Io.poll(gpa, StdioPollEnum, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
var child_killed = false;
defer if (!child_killed) {
- _ = child.kill() catch {};
+ _ = child.kill(io) catch {};
poller.deinit();
run.step.result_peak_rss = @max(
run.step.result_peak_rss,
@@ -1724,11 +1731,11 @@ fn evalZigTest(
run.step.result_stderr = try arena.dupe(u8, poller.reader(.stderr).buffered());
// Clean up everything and wait for the child to exit.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
poller.deinit();
child_killed = true;
- const term = try child.wait();
+ const term = try child.wait(io);
run.step.result_peak_rss = @max(
run.step.result_peak_rss,
child.resource_usage_statistics.getMaxRss() orelse 0,
@@ -1744,11 +1751,11 @@ fn evalZigTest(
poller.reader(.stderr).tossBuffered();
// Clean up everything and wait for the child to exit.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
poller.deinit();
child_killed = true;
- const term = try child.wait();
+ const term = try child.wait(io);
run.step.result_peak_rss = @max(
run.step.result_peak_rss,
child.resource_usage_statistics.getMaxRss() orelse 0,
@@ -1836,6 +1843,7 @@ fn pollZigTest(
switch (ctx.fuzz.mode) {
.forever => {
sendRunFuzzTestMessage(
+ io,
child.stdin.?,
ctx.unit_test_index,
.forever,
@@ -1844,6 +1852,7 @@ fn pollZigTest(
},
.limit => |limit| {
sendRunFuzzTestMessage(
+ io,
child.stdin.?,
ctx.unit_test_index,
.iterations,
@@ -1853,11 +1862,11 @@ fn pollZigTest(
}
} else if (opt_metadata.*) |*md| {
// Previous unit test process died or was killed; we're continuing where it left off
- requestNextTest(child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
+ requestNextTest(io, child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
} else {
// Running unit tests normally
run.fuzz_tests.clearRetainingCapacity();
- sendMessage(child.stdin.?, .query_test_metadata) catch |err| return .{ .write_failed = err };
+ sendMessage(io, child.stdin.?, .query_test_metadata) catch |err| return .{ .write_failed = err };
}
var active_test_index: ?u32 = null;
@@ -1973,7 +1982,7 @@ fn pollZigTest(
active_test_index = null;
if (timer) |*t| t.reset();
- requestNextTest(child.stdin.?, &opt_metadata.*.?, &sub_prog_node) catch |err| return .{ .write_failed = err };
+ requestNextTest(io, child.stdin.?, &opt_metadata.*.?, &sub_prog_node) catch |err| return .{ .write_failed = err };
},
.test_started => {
active_test_index = opt_metadata.*.?.next_index - 1;
@@ -2022,7 +2031,7 @@ fn pollZigTest(
active_test_index = null;
if (timer) |*t| md.ns_per_test[tr_hdr.index] = t.lap();
- requestNextTest(child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
+ requestNextTest(io, child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
},
.coverage_id => {
coverage_id = body_r.takeInt(u64, .little) catch unreachable;
@@ -2093,7 +2102,7 @@ pub const CachedTestMetadata = struct {
}
};
-fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Progress.Node) !void {
+fn requestNextTest(io: Io, in: Io.File, metadata: *TestMetadata, sub_prog_node: *?std.Progress.Node) !void {
while (metadata.next_index < metadata.names.len) {
const i = metadata.next_index;
metadata.next_index += 1;
@@ -2104,31 +2113,31 @@ fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Pr
if (sub_prog_node.*) |n| n.end();
sub_prog_node.* = metadata.prog_node.start(name, 0);
- try sendRunTestMessage(in, .run_test, i);
+ try sendRunTestMessage(io, in, .run_test, i);
return;
} else {
metadata.next_index = std.math.maxInt(u32); // indicate that all tests are done
- try sendMessage(in, .exit);
+ try sendMessage(io, in, .exit);
}
}
-fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+fn sendMessage(io: Io, file: Io.File, tag: std.zig.Client.Message.Tag) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 0,
};
- var w = file.writer(&.{});
+ var w = file.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => return w.err.?,
};
}
-fn sendRunTestMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag, index: u32) !void {
+fn sendRunTestMessage(io: Io, file: Io.File, tag: std.zig.Client.Message.Tag, index: u32) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 4,
};
- var w = file.writer(&.{});
+ var w = file.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => return w.err.?,
};
@@ -2138,7 +2147,8 @@ fn sendRunTestMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag, index:
}
fn sendRunFuzzTestMessage(
- file: std.fs.File,
+ io: Io,
+ file: Io.File,
index: u32,
kind: std.Build.abi.fuzz.LimitKind,
amount_or_instance: u64,
@@ -2147,7 +2157,7 @@ fn sendRunFuzzTestMessage(
.tag = .start_fuzzing,
.bytes_len = 4 + 1 + 8,
};
- var w = file.writer(&.{});
+ var w = file.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => return w.err.?,
};
@@ -2167,30 +2177,30 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
const io = b.graph.io;
const arena = b.allocator;
- try child.spawn();
- errdefer _ = child.kill() catch {};
+ try child.spawn(io);
+ errdefer _ = child.kill(io) catch {};
try child.waitForSpawn();
switch (run.stdin) {
.bytes => |bytes| {
- child.stdin.?.writeAll(bytes) catch |err| {
- return run.step.fail("unable to write stdin: {s}", .{@errorName(err)});
+ child.stdin.?.writeStreamingAll(io, bytes) catch |err| {
+ return run.step.fail("unable to write stdin: {t}", .{err});
};
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
},
.lazy_path => |lazy_path| {
const path = lazy_path.getPath3(b, &run.step);
- const file = path.root_dir.handle.openFile(path.subPathOrDot(), .{}) catch |err| {
- return run.step.fail("unable to open stdin file: {s}", .{@errorName(err)});
+ const file = path.root_dir.handle.openFile(io, path.subPathOrDot(), .{}) catch |err| {
+ return run.step.fail("unable to open stdin file: {t}", .{err});
};
- defer file.close();
+ defer file.close(io);
// TODO https://github.com/ziglang/zig/issues/23955
var read_buffer: [1024]u8 = undefined;
var file_reader = file.reader(io, &read_buffer);
var write_buffer: [1024]u8 = undefined;
- var stdin_writer = child.stdin.?.writer(&write_buffer);
+ var stdin_writer = child.stdin.?.writer(io, &write_buffer);
_ = stdin_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
error.ReadFailed => return run.step.fail("failed to read from {f}: {t}", .{
path, file_reader.err.?,
@@ -2204,7 +2214,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
stdin_writer.err.?,
}),
};
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
},
.none => {},
@@ -2263,7 +2273,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
run.step.result_peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0;
return .{
- .term = try child.wait(),
+ .term = try child.wait(io),
.stdout = stdout_bytes,
.stderr = stderr_bytes,
};
@@ -2276,7 +2286,7 @@ fn addPathForDynLibs(run: *Run, artifact: *Step.Compile) void {
if (compile.root_module.resolved_target.?.result.os.tag == .windows and
compile.isDynamicLibrary())
{
- addPathDir(run, fs.path.dirname(compile.getEmittedBin().getPath2(b, &run.step)).?);
+ addPathDir(run, Dir.path.dirname(compile.getEmittedBin().getPath2(b, &run.step)).?);
}
}
}
diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig
index 7cdb521d21..1c4c94f9cf 100644
--- a/lib/std/Build/Step/UpdateSourceFiles.zig
+++ b/lib/std/Build/Step/UpdateSourceFiles.zig
@@ -78,13 +78,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
var any_miss = false;
for (usf.output_source_files.items) |output_source_file| {
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
- b.build_root.handle.makePath(dirname) catch |err| {
+ b.build_root.handle.createDirPath(io, dirname) catch |err| {
return step.fail("unable to make path '{f}{s}': {t}", .{ b.build_root, dirname, err });
};
}
switch (output_source_file.contents) {
.bytes => |bytes| {
- b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
+ b.build_root.handle.writeFile(io, .{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{f}{s}': {t}", .{
b.build_root, output_source_file.sub_path, err,
});
@@ -99,7 +99,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
.cwd(),
io,
source_path,
- b.build_root.handle.adaptToNewApi(),
+ b.build_root.handle,
output_source_file.sub_path,
.{},
) catch |err| {
diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig
index 030c7c6811..145c7f9bb3 100644
--- a/lib/std/Build/Step/WriteFile.zig
+++ b/lib/std/Build/Step/WriteFile.zig
@@ -206,9 +206,9 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}
}
- const open_dir_cache = try arena.alloc(fs.Dir, write_file.directories.items.len);
+ const open_dir_cache = try arena.alloc(Io.Dir, write_file.directories.items.len);
var open_dirs_count: usize = 0;
- defer closeDirs(open_dir_cache[0..open_dirs_count]);
+ defer Io.Dir.closeMany(io, open_dir_cache[0..open_dirs_count]);
for (write_file.directories.items, open_dir_cache) |dir, *open_dir_cache_elem| {
man.hash.addBytes(dir.sub_path);
@@ -218,7 +218,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const need_derived_inputs = try step.addDirectoryWatchInput(dir.source);
const src_dir_path = dir.source.getPath3(b, step);
- var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
+ var src_dir = src_dir_path.root_dir.handle.openDir(io, src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{f}': {s}", .{
src_dir_path, @errorName(err),
});
@@ -228,7 +228,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
var it = try src_dir.walk(gpa);
defer it.deinit();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
if (!dir.options.pathIncluded(entry.path)) continue;
switch (entry.kind) {
@@ -259,16 +259,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest });
- var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
- return step.fail("unable to make path '{f}{s}': {s}", .{
- b.cache_root, cache_path, @errorName(err),
- });
- };
- defer cache_dir.close();
+ var cache_dir = b.cache_root.handle.createDirPathOpen(io, cache_path, .{}) catch |err|
+ return step.fail("unable to make path '{f}{s}': {t}", .{ b.cache_root, cache_path, err });
+ defer cache_dir.close(io);
for (write_file.files.items) |file| {
if (fs.path.dirname(file.sub_path)) |dirname| {
- cache_dir.makePath(dirname) catch |err| {
+ cache_dir.createDirPath(io, dirname) catch |err| {
return step.fail("unable to make path '{f}{s}{c}{s}': {t}", .{
b.cache_root, cache_path, fs.path.sep, dirname, err,
});
@@ -276,7 +273,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}
switch (file.contents) {
.bytes => |bytes| {
- cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| {
+ cache_dir.writeFile(io, .{ .sub_path = file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{f}{s}{c}{s}': {t}", .{
b.cache_root, cache_path, fs.path.sep, file.sub_path, err,
});
@@ -284,7 +281,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
},
.copy => |file_source| {
const source_path = file_source.getPath2(b, step);
- const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir.adaptToNewApi(), file.sub_path, .{}) catch |err| {
+ const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir, file.sub_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {t}", .{
source_path, b.cache_root, cache_path, fs.path.sep, file.sub_path, err,
});
@@ -303,7 +300,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const dest_dirname = dir.sub_path;
if (dest_dirname.len != 0) {
- cache_dir.makePath(dest_dirname) catch |err| {
+ cache_dir.createDirPath(io, dest_dirname) catch |err| {
return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{
b.cache_root, cache_path, fs.path.sep, dest_dirname, @errorName(err),
});
@@ -312,19 +309,19 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
var it = try already_open_dir.walk(gpa);
defer it.deinit();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
if (!dir.options.pathIncluded(entry.path)) continue;
const src_entry_path = try src_dir_path.join(arena, entry.path);
const dest_path = b.pathJoin(&.{ dest_dirname, entry.path });
switch (entry.kind) {
- .directory => try cache_dir.makePath(dest_path),
+ .directory => try cache_dir.createDirPath(io, dest_path),
.file => {
const prev_status = Io.Dir.updateFile(
- src_entry_path.root_dir.handle.adaptToNewApi(),
+ src_entry_path.root_dir.handle,
io,
src_entry_path.sub_path,
- cache_dir.adaptToNewApi(),
+ cache_dir,
dest_path,
.{},
) catch |err| {
@@ -341,7 +338,3 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try step.writeManifest(&man);
}
-
-fn closeDirs(dirs: []fs.Dir) void {
- for (dirs) |*d| d.close();
-}
diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig
index ca01376e02..9042241b55 100644
--- a/lib/std/Build/Watch.zig
+++ b/lib/std/Build/Watch.zig
@@ -122,7 +122,7 @@ const Os = switch (builtin.os.tag) {
}) catch return error.NameTooLong;
const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer);
stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle);
- try posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID);
+ try posix.name_to_handle_at(path.root_dir.handle.handle, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID);
const stack_lfh: FileHandle = .{ .handle = stack_ptr };
return stack_lfh.clone(gpa);
}
@@ -222,7 +222,7 @@ const Os = switch (builtin.os.tag) {
posix.fanotify_mark(fan_fd, .{
.ADD = true,
.ONLYDIR = true,
- }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| {
+ }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| {
fatal("unable to watch {f}: {s}", .{ path, @errorName(err) });
};
}
@@ -275,7 +275,7 @@ const Os = switch (builtin.os.tag) {
posix.fanotify_mark(fan_fd, .{
.REMOVE = true,
.ONLYDIR = true,
- }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) {
+ }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| switch (err) {
error.FileNotFound => {}, // Expected, harmless.
else => |e| std.log.warn("unable to unwatch '{f}': {s}", .{ path, @errorName(e) }),
};
@@ -350,10 +350,10 @@ const Os = switch (builtin.os.tag) {
}
fn init(gpa: Allocator, path: Cache.Path) !*@This() {
- // The following code is a drawn out NtCreateFile call. (mostly adapted from std.fs.Dir.makeOpenDirAccessMaskW)
+ // The following code is a drawn out NtCreateFile call. (mostly adapted from Io.Dir.makeOpenDirAccessMaskW)
// It's necessary in order to get the specific flags that are required when calling ReadDirectoryChangesW.
var dir_handle: windows.HANDLE = undefined;
- const root_fd = path.root_dir.handle.fd;
+ const root_fd = path.root_dir.handle.handle;
const sub_path = path.subPathOrDot();
const sub_path_w = try windows.sliceToPrefixedFileW(root_fd, sub_path);
const path_len_bytes = std.math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong;
@@ -681,10 +681,10 @@ const Os = switch (builtin.os.tag) {
if (!gop.found_existing) {
const skip_open_dir = path.sub_path.len == 0;
const dir_fd = if (skip_open_dir)
- path.root_dir.handle.fd
+ path.root_dir.handle.handle
else
- posix.openat(path.root_dir.handle.fd, path.sub_path, dir_open_flags, 0) catch |err| {
- fatal("failed to open directory {f}: {s}", .{ path, @errorName(err) });
+ posix.openat(path.root_dir.handle.handle, path.sub_path, dir_open_flags, 0) catch |err| {
+ fatal("failed to open directory {f}: {t}", .{ path, err });
};
// Empirically the dir has to stay open or else no events are triggered.
errdefer if (!skip_open_dir) posix.close(dir_fd);
@@ -750,7 +750,7 @@ const Os = switch (builtin.os.tag) {
// to access that data via the dir_fd field.
const path = w.dir_table.keys()[i];
const dir_fd = if (path.sub_path.len == 0)
- path.root_dir.handle.fd
+ path.root_dir.handle.handle
else
handles.items(.dir_fd)[i];
assert(dir_fd != -1);
@@ -761,7 +761,7 @@ const Os = switch (builtin.os.tag) {
const last_dir_fd = fd: {
const last_path = w.dir_table.keys()[handles.len - 1];
const last_dir_fd = if (last_path.sub_path.len == 0)
- last_path.root_dir.handle.fd
+ last_path.root_dir.handle.handle
else
handles.items(.dir_fd)[handles.len - 1];
assert(last_dir_fd != -1);
diff --git a/lib/std/Build/Watch/FsEvents.zig b/lib/std/Build/Watch/FsEvents.zig
index 6131663993..2a48534b3a 100644
--- a/lib/std/Build/Watch/FsEvents.zig
+++ b/lib/std/Build/Watch/FsEvents.zig
@@ -102,10 +102,10 @@ pub fn init() error{ OpenFrameworkFailed, MissingCoreServicesSymbol }!FsEvents {
};
}
-pub fn deinit(fse: *FsEvents, gpa: Allocator) void {
+pub fn deinit(fse: *FsEvents, gpa: Allocator, io: Io) void {
dispatch_release(fse.waiting_semaphore);
dispatch_release(fse.dispatch_queue);
- fse.core_services.close();
+ fse.core_services.close(io);
gpa.free(fse.watch_roots);
fse.watch_paths.deinit(gpa);
@@ -487,6 +487,7 @@ const FSEventStreamEventFlags = packed struct(u32) {
};
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const watch_log = std.log.scoped(.watch);
diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig
index 2c865a8889..a2b35e3522 100644
--- a/lib/std/Build/WebServer.zig
+++ b/lib/std/Build/WebServer.zig
@@ -2,7 +2,6 @@ gpa: Allocator,
graph: *const Build.Graph,
all_steps: []const *Build.Step,
listen_address: net.IpAddress,
-ttyconf: Io.tty.Config,
root_prog_node: std.Progress.Node,
watch: bool,
@@ -52,7 +51,6 @@ pub fn notifyUpdate(ws: *WebServer) void {
pub const Options = struct {
gpa: Allocator,
- ttyconf: Io.tty.Config,
graph: *const std.Build.Graph,
all_steps: []const *Build.Step,
root_prog_node: std.Progress.Node,
@@ -98,7 +96,6 @@ pub fn init(opts: Options) WebServer {
return .{
.gpa = opts.gpa,
- .ttyconf = opts.ttyconf,
.graph = opts.graph,
.all_steps = all_steps,
.listen_address = opts.listen_address,
@@ -129,6 +126,7 @@ pub fn init(opts: Options) WebServer {
}
pub fn deinit(ws: *WebServer) void {
const gpa = ws.gpa;
+ const io = ws.graph.io;
gpa.free(ws.step_names_trailing);
gpa.free(ws.step_status_bits);
@@ -139,7 +137,7 @@ pub fn deinit(ws: *WebServer) void {
gpa.free(ws.time_report_update_times);
if (ws.serve_thread) |t| {
- if (ws.tcp_server) |*s| s.stream.close();
+ if (ws.tcp_server) |*s| s.stream.close(io);
t.join();
}
if (ws.tcp_server) |*s| s.deinit();
@@ -217,9 +215,9 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
else => {},
}
if (@bitSizeOf(usize) != 64) {
- // Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
- // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
- // on 32-bit platforms.
+ // Current implementation depends on posix.mmap()'s second
+ // parameter, `length: usize`, being compatible with file system's
+ // u64 return value. This is not the case on 32-bit platforms.
// Affects or affected by issues #5185, #22523, and #22464.
std.process.fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
}
@@ -232,7 +230,6 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
ws.fuzz = Fuzz.init(
ws.gpa,
ws.graph.io,
- ws.ttyconf,
ws.all_steps,
ws.root_prog_node,
.{ .forever = .{ .ws = ws } },
@@ -468,11 +465,12 @@ pub fn serveFile(
content_type: []const u8,
) !void {
const gpa = ws.gpa;
+ const io = ws.graph.io;
// The desired API is actually sendfile, which will require enhancing http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
- const file_contents = path.root_dir.handle.readFileAlloc(path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| {
- log.err("failed to read '{f}': {s}", .{ path, @errorName(err) });
+ const file_contents = path.root_dir.handle.readFileAlloc(io, path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| {
+ log.err("failed to read '{f}': {t}", .{ path, err });
return error.AlreadyReported;
};
defer gpa.free(file_contents);
@@ -503,14 +501,14 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons
var archiver: std.tar.Writer = .{ .underlying_writer = &response.writer };
for (paths) |path| {
- var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| {
+ var file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| {
log.err("failed to open '{f}': {s}", .{ path, @errorName(err) });
continue;
};
- defer file.close();
- const stat = try file.stat();
+ defer file.close(io);
+ const stat = try file.stat(io);
var read_buffer: [1024]u8 = undefined;
- var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size);
+ var file_reader: Io.File.Reader = .initSize(file, io, &read_buffer, stat.size);
// TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can
// be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI:
@@ -578,7 +576,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
- try child.spawn();
+ try child.spawn(io);
var poller = Io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
@@ -586,7 +584,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
});
defer poller.deinit();
- try child.stdin.?.writeAll(@ptrCast(@as([]const std.zig.Client.Message.Header, &.{
+ try child.stdin.?.writeStreamingAll(io, @ptrCast(@as([]const std.zig.Client.Message.Header, &.{
.{ .tag = .update, .bytes_len = 0 },
.{ .tag = .exit, .bytes_len = 0 },
})));
@@ -634,10 +632,10 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
}
// Send EOF to stdin.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
- switch (try child.wait()) {
+ switch (try child.wait(io)) {
.Exited => |code| {
if (code != 0) {
log.err(
@@ -657,7 +655,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
}
if (result_error_bundle.errorMessageCount() > 0) {
- result_error_bundle.renderToStdErr(.{}, .auto);
+ try result_error_bundle.renderToStderr(io, .{}, .auto);
log.err("the following command failed with {d} compilation errors:\n{s}", .{
result_error_bundle.errorMessageCount(),
try Build.Step.allocPrintCmd(arena, null, argv.items),
diff --git a/lib/std/Io.zig b/lib/std/Io.zig
index dbf544c6d5..162fedca5f 100644
--- a/lib/std/Io.zig
+++ b/lib/std/Io.zig
@@ -82,8 +82,6 @@ pub const Limit = enum(usize) {
pub const Reader = @import("Io/Reader.zig");
pub const Writer = @import("Io/Writer.zig");
-pub const tty = @import("Io/tty.zig");
-
pub fn poll(
gpa: Allocator,
comptime StreamEnum: type,
@@ -528,14 +526,13 @@ pub fn Poller(comptime StreamEnum: type) type {
/// Given an enum, returns a struct with fields of that enum, each field
/// representing an I/O stream for polling.
pub fn PollFiles(comptime StreamEnum: type) type {
- return @Struct(.auto, null, std.meta.fieldNames(StreamEnum), &@splat(std.fs.File), &@splat(.{}));
+ return @Struct(.auto, null, std.meta.fieldNames(StreamEnum), &@splat(Io.File), &@splat(.{}));
}
test {
_ = net;
_ = Reader;
_ = Writer;
- _ = tty;
_ = Evented;
_ = Threaded;
_ = @import("Io/test.zig");
@@ -662,27 +659,66 @@ pub const VTable = struct {
futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void,
futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void,
- dirMake: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.Mode) Dir.MakeError!void,
- dirMakePath: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.Mode) Dir.MakeError!void,
- dirMakeOpenPath: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.OpenOptions) Dir.MakeOpenPathError!Dir,
+ dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void,
+ dirCreateDirPath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirPathError!Dir.CreatePathStatus,
+ dirCreateDirPathOpen: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions, Dir.OpenOptions) Dir.CreateDirPathOpenError!Dir,
+ dirOpenDir: *const fn (?*anyopaque, Dir, []const u8, Dir.OpenOptions) Dir.OpenError!Dir,
dirStat: *const fn (?*anyopaque, Dir) Dir.StatError!Dir.Stat,
- dirStatPath: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.StatPathOptions) Dir.StatPathError!File.Stat,
- dirAccess: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.AccessOptions) Dir.AccessError!void,
- dirCreateFile: *const fn (?*anyopaque, Dir, sub_path: []const u8, File.CreateFlags) File.OpenError!File,
- dirOpenFile: *const fn (?*anyopaque, Dir, sub_path: []const u8, File.OpenFlags) File.OpenError!File,
- dirOpenDir: *const fn (?*anyopaque, Dir, sub_path: []const u8, Dir.OpenOptions) Dir.OpenError!Dir,
- dirClose: *const fn (?*anyopaque, Dir) void,
+ dirStatFile: *const fn (?*anyopaque, Dir, []const u8, Dir.StatFileOptions) Dir.StatFileError!File.Stat,
+ dirAccess: *const fn (?*anyopaque, Dir, []const u8, Dir.AccessOptions) Dir.AccessError!void,
+ dirCreateFile: *const fn (?*anyopaque, Dir, []const u8, File.CreateFlags) File.OpenError!File,
+ dirOpenFile: *const fn (?*anyopaque, Dir, []const u8, File.OpenFlags) File.OpenError!File,
+ dirClose: *const fn (?*anyopaque, []const Dir) void,
+ dirRead: *const fn (?*anyopaque, *Dir.Reader, []Dir.Entry) Dir.Reader.Error!usize,
+ dirRealPath: *const fn (?*anyopaque, Dir, out_buffer: []u8) Dir.RealPathError!usize,
+ dirRealPathFile: *const fn (?*anyopaque, Dir, path_name: []const u8, out_buffer: []u8) Dir.RealPathFileError!usize,
+ dirDeleteFile: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteFileError!void,
+ dirDeleteDir: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteDirError!void,
+ dirRename: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenameError!void,
+ dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.SymLinkError!void,
+ dirReadLink: *const fn (?*anyopaque, Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize,
+ dirSetOwner: *const fn (?*anyopaque, Dir, ?File.Uid, ?File.Gid) Dir.SetOwnerError!void,
+ dirSetFileOwner: *const fn (?*anyopaque, Dir, []const u8, ?File.Uid, ?File.Gid, Dir.SetFileOwnerOptions) Dir.SetFileOwnerError!void,
+ dirSetPermissions: *const fn (?*anyopaque, Dir, Dir.Permissions) Dir.SetPermissionsError!void,
+ dirSetFilePermissions: *const fn (?*anyopaque, Dir, []const u8, File.Permissions, Dir.SetFilePermissionsOptions) Dir.SetFilePermissionsError!void,
+ dirSetTimestamps: *const fn (?*anyopaque, Dir, []const u8, last_accessed: Timestamp, last_modified: Timestamp, Dir.SetTimestampsOptions) Dir.SetTimestampsError!void,
+ dirSetTimestampsNow: *const fn (?*anyopaque, Dir, []const u8, Dir.SetTimestampsOptions) Dir.SetTimestampsError!void,
+ dirHardLink: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8, Dir.HardLinkOptions) Dir.HardLinkError!void,
+
fileStat: *const fn (?*anyopaque, File) File.StatError!File.Stat,
- fileClose: *const fn (?*anyopaque, File) void,
- fileWriteStreaming: *const fn (?*anyopaque, File, buffer: [][]const u8) File.WriteStreamingError!usize,
- fileWritePositional: *const fn (?*anyopaque, File, buffer: [][]const u8, offset: u64) File.WritePositionalError!usize,
+ fileLength: *const fn (?*anyopaque, File) File.LengthError!u64,
+ fileClose: *const fn (?*anyopaque, []const File) void,
+ fileWriteStreaming: *const fn (?*anyopaque, File, header: []const u8, data: []const []const u8, splat: usize) File.Writer.Error!usize,
+ fileWritePositional: *const fn (?*anyopaque, File, header: []const u8, data: []const []const u8, splat: usize, offset: u64) File.WritePositionalError!usize,
+ fileWriteFileStreaming: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit) File.Writer.WriteFileError!usize,
+ fileWriteFilePositional: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit, offset: u64) File.WriteFilePositionalError!usize,
/// Returns 0 on end of stream.
- fileReadStreaming: *const fn (?*anyopaque, File, data: [][]u8) File.Reader.Error!usize,
+ fileReadStreaming: *const fn (?*anyopaque, File, data: []const []u8) File.Reader.Error!usize,
/// Returns 0 on end of stream.
- fileReadPositional: *const fn (?*anyopaque, File, data: [][]u8, offset: u64) File.ReadPositionalError!usize,
+ fileReadPositional: *const fn (?*anyopaque, File, data: []const []u8, offset: u64) File.ReadPositionalError!usize,
fileSeekBy: *const fn (?*anyopaque, File, relative_offset: i64) File.SeekError!void,
fileSeekTo: *const fn (?*anyopaque, File, absolute_offset: u64) File.SeekError!void,
- openSelfExe: *const fn (?*anyopaque, File.OpenFlags) File.OpenSelfExeError!File,
+ fileSync: *const fn (?*anyopaque, File) File.SyncError!void,
+ fileIsTty: *const fn (?*anyopaque, File) Cancelable!bool,
+ fileEnableAnsiEscapeCodes: *const fn (?*anyopaque, File) File.EnableAnsiEscapeCodesError!void,
+ fileSupportsAnsiEscapeCodes: *const fn (?*anyopaque, File) Cancelable!bool,
+ fileSetLength: *const fn (?*anyopaque, File, u64) File.SetLengthError!void,
+ fileSetOwner: *const fn (?*anyopaque, File, ?File.Uid, ?File.Gid) File.SetOwnerError!void,
+ fileSetPermissions: *const fn (?*anyopaque, File, File.Permissions) File.SetPermissionsError!void,
+ fileSetTimestamps: *const fn (?*anyopaque, File, last_accessed: Timestamp, last_modified: Timestamp) File.SetTimestampsError!void,
+ fileSetTimestampsNow: *const fn (?*anyopaque, File) File.SetTimestampsError!void,
+ fileLock: *const fn (?*anyopaque, File, File.Lock) File.LockError!void,
+ fileTryLock: *const fn (?*anyopaque, File, File.Lock) File.LockError!bool,
+ fileUnlock: *const fn (?*anyopaque, File) void,
+ fileDowngradeLock: *const fn (?*anyopaque, File) File.DowngradeLockError!void,
+ fileRealPath: *const fn (?*anyopaque, File, out_buffer: []u8) File.RealPathError!usize,
+
+ processExecutableOpen: *const fn (?*anyopaque, File.OpenFlags) std.process.OpenExecutableError!File,
+ processExecutablePath: *const fn (?*anyopaque, buffer: []u8) std.process.ExecutablePathError!usize,
+ lockStderr: *const fn (?*anyopaque, buffer: []u8, ?Terminal.Mode) Cancelable!LockedStderr,
+ tryLockStderr: *const fn (?*anyopaque, buffer: []u8, ?Terminal.Mode) Cancelable!?LockedStderr,
+ unlockStderr: *const fn (?*anyopaque) void,
+ processSetCurrentDir: *const fn (?*anyopaque, Dir) std.process.SetCurrentDirError!void,
now: *const fn (?*anyopaque, Clock) Clock.Error!Timestamp,
sleep: *const fn (?*anyopaque, Timeout) SleepError!void,
@@ -698,7 +734,8 @@ pub const VTable = struct {
/// Returns 0 on end of stream.
netRead: *const fn (?*anyopaque, src: net.Socket.Handle, data: [][]u8) net.Stream.Reader.Error!usize,
netWrite: *const fn (?*anyopaque, dest: net.Socket.Handle, header: []const u8, data: []const []const u8, splat: usize) net.Stream.Writer.Error!usize,
- netClose: *const fn (?*anyopaque, handle: net.Socket.Handle) void,
+ netWriteFile: *const fn (?*anyopaque, net.Socket.Handle, header: []const u8, *Io.File.Reader, Io.Limit) net.Stream.Writer.WriteFileError!usize,
+ netClose: *const fn (?*anyopaque, handle: []const net.Socket.Handle) void,
netInterfaceNameResolve: *const fn (?*anyopaque, *const net.Interface.Name) net.Interface.Name.ResolveError!net.Interface,
netInterfaceName: *const fn (?*anyopaque, net.Interface) net.Interface.NameError!net.Interface.Name,
netLookup: *const fn (?*anyopaque, net.HostName, *Queue(net.HostName.LookupResult), net.HostName.LookupOptions) net.HostName.LookupError!void,
@@ -723,6 +760,7 @@ pub const UnexpectedError = error{
pub const Dir = @import("Io/Dir.zig");
pub const File = @import("Io/File.zig");
+pub const Terminal = @import("Io/Terminal.zig");
pub const Clock = enum {
/// A settable system-wide clock that measures real (i.e. wall-clock)
@@ -1277,17 +1315,21 @@ pub fn futexWait(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, e
/// wakeups are possible. It remains the caller's responsibility to differentiate between these
/// three possible wake-up reasons if necessary.
pub fn futexWaitTimeout(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, expected: T, timeout: Timeout) Cancelable!void {
- comptime assert(@sizeOf(T) == 4);
- const expected_raw: *align(1) const u32 = @ptrCast(&expected);
- return io.vtable.futexWait(io.userdata, @ptrCast(ptr), expected_raw.*, timeout);
+ const expected_int: u32 = switch (@typeInfo(T)) {
+ .@"enum" => @bitCast(@intFromEnum(expected)),
+ else => @bitCast(expected),
+ };
+ return io.vtable.futexWait(io.userdata, @ptrCast(ptr), expected_int, timeout);
}
/// Same as `futexWait`, except does not introduce a cancelation point.
///
/// For a description of cancelation and cancelation points, see `Future.cancel`.
pub fn futexWaitUncancelable(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, expected: T) void {
- comptime assert(@sizeOf(T) == @sizeOf(u32));
- const expected_raw: *align(1) const u32 = @ptrCast(&expected);
- io.vtable.futexWaitUncancelable(io.userdata, @ptrCast(ptr), expected_raw.*);
+ const expected_int: u32 = switch (@typeInfo(T)) {
+ .@"enum" => @bitCast(@intFromEnum(expected)),
+ else => @bitCast(expected),
+ };
+ io.vtable.futexWaitUncancelable(io.userdata, @ptrCast(ptr), expected_int);
}
/// Unblocks pending futex waits on `ptr`, up to a limit of `max_waiters` calls.
pub fn futexWake(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, max_waiters: u32) void {
@@ -1539,10 +1581,12 @@ pub const Event = enum(u32) {
}
}
+ pub const WaitTimeoutError = error{Timeout} || Cancelable;
+
/// Blocks the calling thread until either the logical boolean is set, the timeout expires, or a
/// spurious wakeup occurs. If the timeout expires or a spurious wakeup occurs, `error.Timeout`
/// is returned.
- pub fn waitTimeout(event: *Event, io: Io, timeout: Timeout) (error{Timeout} || Cancelable)!void {
+ pub fn waitTimeout(event: *Event, io: Io, timeout: Timeout) WaitTimeoutError!void {
if (@cmpxchgStrong(Event, event, .unset, .waiting, .acquire, .acquire)) |prev| switch (prev) {
.unset => unreachable,
.waiting => assert(!builtin.single_threaded), // invalid state
@@ -1555,7 +1599,7 @@ pub const Event = enum(u32) {
// waiters would wake up when a *new waiter* was added. So it's easiest to just leave
// the state at `.waiting`---at worst it causes one redundant call to `futexWake`.
}
- io.futexWaitTimeout(Event, event, .waiting, timeout);
+ try io.futexWaitTimeout(Event, event, .waiting, timeout);
switch (@atomicLoad(Event, event, .acquire)) {
.unset => unreachable, // `reset` called before pending `wait` returned
.waiting => return error.Timeout,
@@ -2136,3 +2180,35 @@ pub fn select(io: Io, s: anytype) Cancelable!SelectUnion(@TypeOf(s)) {
else => unreachable,
}
}
+
+pub const LockedStderr = struct {
+ file_writer: *File.Writer,
+ terminal_mode: Terminal.Mode,
+
+ pub fn terminal(ls: LockedStderr) Terminal {
+ return .{
+ .writer = &ls.file_writer.interface,
+ .mode = ls.terminal_mode,
+ };
+ }
+};
+
+/// For doing application-level writes to the standard error stream.
+/// Coordinates also with debug-level writes that are ignorant of Io interface
+/// and implementations. When this returns, `std.process.stderr_thread_mutex`
+/// will be locked.
+///
+/// See also:
+/// * `tryLockStderr`
+pub fn lockStderr(io: Io, buffer: []u8, terminal_mode: ?Terminal.Mode) Cancelable!LockedStderr {
+ return io.vtable.lockStderr(io.userdata, buffer, terminal_mode);
+}
+
+/// Same as `lockStderr` but non-blocking.
+pub fn tryLockStderr(io: Io, buffer: []u8, terminal_mode: ?Terminal.Mode) Cancelable!?LockedStderr {
+ return io.vtable.tryLockStderr(io.userdata, buffer, terminal_mode);
+}
+
+pub fn unlockStderr(io: Io) void {
+ return io.vtable.unlockStderr(io.userdata);
+}
diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig
index 3429fa6fdf..e7b1806616 100644
--- a/lib/std/Io/Dir.zig
+++ b/lib/std/Io/Dir.zig
@@ -1,4 +1,5 @@
const Dir = @This();
+const root = @import("root");
const builtin = @import("builtin");
const native_os = builtin.os.tag;
@@ -6,11 +7,71 @@ const native_os = builtin.os.tag;
const std = @import("../std.zig");
const Io = std.Io;
const File = Io.File;
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
handle: Handle,
-pub const Mode = Io.File.Mode;
-pub const default_mode: Mode = 0o755;
+pub const path = std.fs.path;
+
+/// The maximum length of a file path that the operating system will accept.
+///
+/// Paths, including those returned from file system operations, may be longer
+/// than this length, but such paths cannot be successfully passed back in
+/// other file system operations. However, all path components returned by file
+/// system operations are assumed to fit into a `u8` array of this length.
+///
+/// The byte count includes room for a null sentinel byte.
+///
+/// * On Windows, `[]u8` file paths are encoded as
+/// [WTF-8](https://wtf-8.codeberg.page/).
+/// * On WASI, `[]u8` file paths are encoded as valid UTF-8.
+/// * On other platforms, `[]u8` file paths are opaque sequences of bytes with
+/// no particular encoding.
+pub const max_path_bytes = switch (native_os) {
+ .linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .freebsd, .openbsd, .netbsd, .dragonfly, .haiku, .illumos, .plan9, .emscripten, .wasi, .serenity => std.posix.PATH_MAX,
+ // Each WTF-16LE code unit may be expanded to 3 WTF-8 bytes.
+ // If it would require 4 WTF-8 bytes, then there would be a surrogate
+ // pair in the WTF-16LE, and we (over)account 3 bytes for it that way.
+ // +1 for the null byte at the end, which can be encoded in 1 byte.
+ .windows => std.os.windows.PATH_MAX_WIDE * 3 + 1,
+ else => if (@hasDecl(root, "os") and @hasDecl(root.os, "PATH_MAX"))
+ root.os.PATH_MAX
+ else
+ @compileError("PATH_MAX not implemented for " ++ @tagName(native_os)),
+};
+
+/// This represents the maximum size of a `[]u8` file name component that
+/// the platform's common file systems support. File name components returned by file system
+/// operations are likely to fit into a `u8` array of this length, but
+/// (depending on the platform) this assumption may not hold for every configuration.
+/// The byte count does not include a null sentinel byte.
+/// On Windows, `[]u8` file name components are encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, file name components are encoded as valid UTF-8.
+/// On other platforms, `[]u8` components are an opaque sequence of bytes with no particular encoding.
+pub const max_name_bytes = switch (native_os) {
+ .linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .freebsd, .openbsd, .netbsd, .dragonfly, .illumos, .serenity => std.posix.NAME_MAX,
+ // Haiku's NAME_MAX includes the null terminator, so subtract one.
+ .haiku => std.posix.NAME_MAX - 1,
+ // Each WTF-16LE character may be expanded to 3 WTF-8 bytes.
+ // If it would require 4 WTF-8 bytes, then there would be a surrogate
+ // pair in the WTF-16LE, and we (over)account 3 bytes for it that way.
+ .windows => std.os.windows.NAME_MAX * 3,
+ // For WASI, the MAX_NAME will depend on the host OS, so it needs to be
+ // as large as the largest max_name_bytes (Windows) in order to work on any host OS.
+ // TODO determine if this is a reasonable approach
+ .wasi => std.os.windows.NAME_MAX * 3,
+ else => if (@hasDecl(root, "os") and @hasDecl(root.os, "NAME_MAX"))
+ root.os.NAME_MAX
+ else
+ @compileError("NAME_MAX not implemented for " ++ @tagName(native_os)),
+};
+
+pub const Entry = struct {
+ name: []const u8,
+ kind: File.Kind,
+ inode: File.INode,
+};
/// Returns a handle to the current working directory.
///
@@ -20,6 +81,8 @@ pub const default_mode: Mode = 0o755;
/// Closing the returned `Dir` is checked illegal behavior.
///
/// On POSIX targets, this function is comptime-callable.
+///
+/// On WASI, the value this returns is application-configurable.
pub fn cwd() Dir {
return switch (native_os) {
.windows => .{ .handle = std.os.windows.peb().ProcessParameters.CurrentDirectory.Handle },
@@ -28,9 +91,314 @@ pub fn cwd() Dir {
};
}
+pub const Reader = struct {
+ dir: Dir,
+ state: State,
+ /// Stores I/O implementation specific data.
+ buffer: []align(@alignOf(usize)) u8,
+ /// Index of next entry in `buffer`.
+ index: usize,
+ /// Fill position of `buffer`.
+ end: usize,
+
+ /// A length for `buffer` that allows all implementations to function.
+ pub const min_buffer_len = switch (native_os) {
+ .linux => std.mem.alignForward(usize, @sizeOf(std.os.linux.dirent64), 8) +
+ std.mem.alignForward(usize, max_name_bytes, 8),
+ .windows => len: {
+ const max_info_len = @sizeOf(std.os.windows.FILE_BOTH_DIR_INFORMATION) + std.os.windows.NAME_MAX * 2;
+ const info_align = @alignOf(std.os.windows.FILE_BOTH_DIR_INFORMATION);
+ const reserved_len = std.mem.alignForward(usize, max_name_bytes, info_align) - max_info_len;
+ break :len std.mem.alignForward(usize, reserved_len, info_align) + max_info_len;
+ },
+ .wasi => @sizeOf(std.os.wasi.dirent_t) +
+ std.mem.alignForward(usize, max_name_bytes, @alignOf(std.os.wasi.dirent_t)),
+ else => if (builtin.link_libc) @sizeOf(std.c.dirent) else std.mem.alignForward(usize, max_name_bytes, @alignOf(usize)),
+ };
+
+ pub const State = enum {
+ /// Indicates the next call to `read` should rewind and start over the
+ /// directory listing.
+ reset,
+ reading,
+ finished,
+ };
+
+ pub const Error = error{
+ AccessDenied,
+ PermissionDenied,
+ SystemResources,
+ } || Io.UnexpectedError || Io.Cancelable;
+
+ /// Asserts that `buffer` has length at least `min_buffer_len`.
+ pub fn init(dir: Dir, buffer: []align(@alignOf(usize)) u8) Reader {
+ assert(buffer.len >= min_buffer_len);
+ return .{
+ .dir = dir,
+ .state = .reset,
+ .index = 0,
+ .end = 0,
+ .buffer = buffer,
+ };
+ }
+
+ /// All `Entry.name` are invalidated with the next call to `read` or
+ /// `next`.
+ pub fn read(r: *Reader, io: Io, buffer: []Entry) Error!usize {
+ return io.vtable.dirRead(io.userdata, r, buffer);
+ }
+
+ /// `Entry.name` is invalidated with the next call to `read` or `next`.
+ pub fn next(r: *Reader, io: Io) Error!?Entry {
+ var buffer: [1]Entry = undefined;
+ while (true) {
+ const n = try read(r, io, &buffer);
+ if (n == 1) return buffer[0];
+ if (r.state == .finished) return null;
+ }
+ }
+
+ pub fn reset(r: *Reader) void {
+ r.state = .reset;
+ r.index = 0;
+ r.end = 0;
+ }
+};
+
+/// This API is designed for convenience rather than performance:
+/// * It chooses a buffer size rather than allowing the user to provide one.
+/// * It is movable by only requesting one `Entry` at a time from the `Io`
+/// implementation rather than doing batch operations.
+///
+/// Still, it will do a decent job of minimizing syscall overhead. For a
+/// lower level abstraction, see `Reader`. For a higher level abstraction,
+/// see `Walker`.
+pub const Iterator = struct {
+ reader: Reader,
+ reader_buffer: [reader_buffer_len]u8 align(@alignOf(usize)),
+
+ pub const reader_buffer_len = 2048;
+
+ comptime {
+ assert(reader_buffer_len >= Reader.min_buffer_len);
+ }
+
+ pub const Error = Reader.Error;
+
+ pub fn init(dir: Dir, reader_state: Reader.State) Iterator {
+ return .{
+ .reader = .{
+ .dir = dir,
+ .state = reader_state,
+ .index = 0,
+ .end = 0,
+ .buffer = undefined,
+ },
+ .reader_buffer = undefined,
+ };
+ }
+
+ pub fn next(it: *Iterator, io: Io) Error!?Entry {
+ it.reader.buffer = &it.reader_buffer;
+ return it.reader.next(io);
+ }
+};
+
+pub fn iterate(dir: Dir) Iterator {
+ return .init(dir, .reset);
+}
+
+/// Like `iterate`, but will not reset the directory cursor before the first
+/// iteration. This should only be used in cases where it is known that the
+/// `Dir` has not had its cursor modified yet (e.g. it was just opened).
+pub fn iterateAssumeFirstIteration(dir: Dir) Iterator {
+ return .init(dir, .reading);
+}
+
+pub const SelectiveWalker = struct {
+ stack: std.ArrayList(StackItem),
+ name_buffer: std.ArrayList(u8),
+ allocator: Allocator,
+
+ pub const Error = Iterator.Error || Allocator.Error;
+
+ const StackItem = struct {
+ iter: Iterator,
+ dirname_len: usize,
+ };
+
+ /// After each call to this function, and on deinit(), the memory returned
+ /// from this function becomes invalid. A copy must be made in order to keep
+ /// a reference to the path.
+ pub fn next(self: *SelectiveWalker, io: Io) Error!?Walker.Entry {
+ while (self.stack.items.len > 0) {
+ const top = &self.stack.items[self.stack.items.len - 1];
+ var dirname_len = top.dirname_len;
+ if (top.iter.next(io) catch |err| {
+ // If we get an error, then we want the user to be able to continue
+ // walking if they want, which means that we need to pop the directory
+ // that errored from the stack. Otherwise, all future `next` calls would
+ // likely just fail with the same error.
+ var item = self.stack.pop().?;
+ if (self.stack.items.len != 0) {
+ item.iter.reader.dir.close(io);
+ }
+ return err;
+ }) |entry| {
+ self.name_buffer.shrinkRetainingCapacity(dirname_len);
+ if (self.name_buffer.items.len != 0) {
+ try self.name_buffer.append(self.allocator, path.sep);
+ dirname_len += 1;
+ }
+ try self.name_buffer.ensureUnusedCapacity(self.allocator, entry.name.len + 1);
+ self.name_buffer.appendSliceAssumeCapacity(entry.name);
+ self.name_buffer.appendAssumeCapacity(0);
+ const walker_entry: Walker.Entry = .{
+ .dir = top.iter.reader.dir,
+ .basename = self.name_buffer.items[dirname_len .. self.name_buffer.items.len - 1 :0],
+ .path = self.name_buffer.items[0 .. self.name_buffer.items.len - 1 :0],
+ .kind = entry.kind,
+ };
+ return walker_entry;
+ } else {
+ var item = self.stack.pop().?;
+ if (self.stack.items.len != 0) {
+ item.iter.reader.dir.close(io);
+ }
+ }
+ }
+ return null;
+ }
+
+ /// Traverses into the directory, continuing walking one level down.
+ pub fn enter(self: *SelectiveWalker, io: Io, entry: Walker.Entry) !void {
+ if (entry.kind != .directory) {
+ @branchHint(.cold);
+ return;
+ }
+
+ var new_dir = entry.dir.openDir(io, entry.basename, .{ .iterate = true }) catch |err| {
+ switch (err) {
+ error.NameTooLong => unreachable,
+ else => |e| return e,
+ }
+ };
+ errdefer new_dir.close(io);
+
+ try self.stack.append(self.allocator, .{
+ .iter = new_dir.iterateAssumeFirstIteration(),
+ .dirname_len = self.name_buffer.items.len - 1,
+ });
+ }
+
+ pub fn deinit(self: *SelectiveWalker) void {
+ self.name_buffer.deinit(self.allocator);
+ self.stack.deinit(self.allocator);
+ }
+
+ /// Leaves the current directory, continuing walking one level up.
+ /// If the current entry is a directory entry, then the "current directory"
+ /// will pertain to that entry if `enter` is called before `leave`.
+ pub fn leave(self: *SelectiveWalker, io: Io) void {
+ var item = self.stack.pop().?;
+ if (self.stack.items.len != 0) {
+ @branchHint(.likely);
+ item.iter.reader.dir.close(io);
+ }
+ }
+};
+
+/// Recursively iterates over a directory, but requires the user to
+/// opt-in to recursing into each directory entry.
+///
+/// `dir` must have been opened with `OpenOptions.iterate` set to `true`.
+///
+/// `Walker.deinit` releases allocated memory and directory handles.
+///
+/// The order of returned file system entries is undefined.
+///
+/// `dir` will not be closed after walking it.
+///
+/// See also `walk`.
+pub fn walkSelectively(dir: Dir, allocator: Allocator) !SelectiveWalker {
+ var stack: std.ArrayList(SelectiveWalker.StackItem) = .empty;
+
+ try stack.append(allocator, .{
+ .iter = dir.iterate(),
+ .dirname_len = 0,
+ });
+
+ return .{
+ .stack = stack,
+ .name_buffer = .{},
+ .allocator = allocator,
+ };
+}
+
+pub const Walker = struct {
+ inner: SelectiveWalker,
+
+ pub const Entry = struct {
+ /// The containing directory. This can be used to operate directly on `basename`
+ /// rather than `path`, avoiding `error.NameTooLong` for deeply nested paths.
+ /// The directory remains open until `next` or `deinit` is called.
+ dir: Dir,
+ basename: [:0]const u8,
+ path: [:0]const u8,
+ kind: File.Kind,
+
+ /// Returns the depth of the entry relative to the initial directory.
+ /// Returns 1 for a direct child of the initial directory, 2 for an entry
+ /// within a direct child of the initial directory, etc.
+ pub fn depth(self: Walker.Entry) usize {
+ return std.mem.countScalar(u8, self.path, path.sep) + 1;
+ }
+ };
+
+ /// After each call to this function, and on deinit(), the memory returned
+ /// from this function becomes invalid. A copy must be made in order to keep
+ /// a reference to the path.
+ pub fn next(self: *Walker, io: Io) !?Walker.Entry {
+ const entry = try self.inner.next(io);
+ if (entry != null and entry.?.kind == .directory) {
+ try self.inner.enter(io, entry.?);
+ }
+ return entry;
+ }
+
+ pub fn deinit(self: *Walker) void {
+ self.inner.deinit();
+ }
+
+ /// Leaves the current directory, continuing walking one level up.
+ /// If the current entry is a directory entry, then the "current directory"
+ /// is the directory pertaining to the current entry.
+ pub fn leave(self: *Walker) void {
+ self.inner.leave();
+ }
+};
+
+/// Recursively iterates over a directory.
+///
+/// `dir` must have been opened with `OpenOptions.iterate` set to `true`.
+///
+/// `Walker.deinit` releases allocated memory and directory handles.
+///
+/// The order of returned file system entries is undefined.
+///
+/// `dir` will not be closed after walking it.
+///
+/// See also:
+/// * `walkSelectively`
+pub fn walk(dir: Dir, allocator: Allocator) Allocator.Error!Walker {
+ return .{ .inner = try walkSelectively(dir, allocator) };
+}
+
pub const Handle = std.posix.fd_t;
pub const PathNameError = error{
+ /// Returned when an insufficient buffer is provided that cannot fit the
+ /// path name.
NameTooLong,
/// File system cannot encode the requested file name bytes.
/// Could be due to invalid WTF-8 on Windows, invalid UTF-8 on WASI,
@@ -69,6 +437,11 @@ pub fn access(dir: Dir, io: Io, sub_path: []const u8, options: AccessOptions) Ac
return io.vtable.dirAccess(io.userdata, dir, sub_path, options);
}
+pub fn accessAbsolute(io: Io, absolute_path: []const u8, options: AccessOptions) AccessError!void {
+ assert(path.isAbsolute(absolute_path));
+ return access(.cwd(), io, absolute_path, options);
+}
+
pub const OpenError = error{
FileNotFound,
NotDir,
@@ -108,8 +481,17 @@ pub fn openDir(dir: Dir, io: Io, sub_path: []const u8, options: OpenOptions) Ope
return io.vtable.dirOpenDir(io.userdata, dir, sub_path, options);
}
+pub fn openDirAbsolute(io: Io, absolute_path: []const u8, options: OpenOptions) OpenError!Dir {
+ assert(path.isAbsolute(absolute_path));
+ return openDir(.cwd(), io, absolute_path, options);
+}
+
pub fn close(dir: Dir, io: Io) void {
- return io.vtable.dirClose(io.userdata, dir);
+ return io.vtable.dirClose(io.userdata, (&dir)[0..1]);
+}
+
+pub fn closeMany(io: Io, dirs: []const Dir) void {
+ return io.vtable.dirClose(io.userdata, dirs);
}
/// Opens a file for reading or writing, without attempting to create a new file.
@@ -125,6 +507,11 @@ pub fn openFile(dir: Dir, io: Io, sub_path: []const u8, flags: File.OpenFlags) F
return io.vtable.dirOpenFile(io.userdata, dir, sub_path, flags);
}
+pub fn openFileAbsolute(io: Io, absolute_path: []const u8, flags: File.OpenFlags) File.OpenError!File {
+ assert(path.isAbsolute(absolute_path));
+ return openFile(.cwd(), io, absolute_path, flags);
+}
+
/// Creates, opens, or overwrites a file with write access.
///
/// Allocates a resource to be dellocated with `File.close`.
@@ -136,6 +523,10 @@ pub fn createFile(dir: Dir, io: Io, sub_path: []const u8, flags: File.CreateFlag
return io.vtable.dirCreateFile(io.userdata, dir, sub_path, flags);
}
+pub fn createFileAbsolute(io: Io, absolute_path: []const u8, flags: File.CreateFlags) File.OpenError!File {
+ return createFile(.cwd(), io, absolute_path, flags);
+}
+
pub const WriteFileOptions = struct {
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
@@ -145,13 +536,13 @@ pub const WriteFileOptions = struct {
flags: File.CreateFlags = .{},
};
-pub const WriteFileError = File.WriteError || File.OpenError || Io.Cancelable;
+pub const WriteFileError = File.Writer.Error || File.OpenError;
/// Writes content to the file system, using the file creation flags provided.
pub fn writeFile(dir: Dir, io: Io, options: WriteFileOptions) WriteFileError!void {
var file = try dir.createFile(io, options.sub_path, options.flags);
defer file.close(io);
- try file.writeAll(io, options.data);
+ try file.writeStreamingAll(io, options.data);
}
pub const PrevStatus = enum {
@@ -161,10 +552,10 @@ pub const PrevStatus = enum {
pub const UpdateFileError = File.OpenError;
-/// Check the file size, mtime, and mode of `source_path` and `dest_path`. If
+/// Check the file size, mtime, and permissions of `source_path` and `dest_path`. If
/// they are equal, does nothing. Otherwise, atomically copies `source_path` to
/// `dest_path`, creating the parent directory hierarchy as needed. The
-/// destination file gains the mtime, atime, and mode of the source file so
+/// destination file gains the mtime, atime, and permissions of the source file so
/// that the next call to `updateFile` will not need a copy.
///
/// Returns the previous status of the file before updating.
@@ -179,13 +570,13 @@ pub fn updateFile(
dest_dir: Dir,
/// If directories in this path do not exist, they are created.
dest_path: []const u8,
- options: std.fs.Dir.CopyFileOptions,
+ options: CopyFileOptions,
) !PrevStatus {
var src_file = try source_dir.openFile(io, source_path, .{});
defer src_file.close(io);
const src_stat = try src_file.stat(io);
- const actual_mode = options.override_mode orelse src_stat.mode;
+ const actual_permissions = options.permissions orelse src_stat.permissions;
check_dest_stat: {
const dest_stat = blk: {
var dest_file = dest_dir.openFile(io, dest_path, .{}) catch |err| switch (err) {
@@ -199,19 +590,19 @@ pub fn updateFile(
if (src_stat.size == dest_stat.size and
src_stat.mtime.nanoseconds == dest_stat.mtime.nanoseconds and
- actual_mode == dest_stat.mode)
+ actual_permissions == dest_stat.permissions)
{
return .fresh;
}
}
- if (std.fs.path.dirname(dest_path)) |dirname| {
- try dest_dir.makePath(io, dirname);
+ if (path.dirname(dest_path)) |dirname| {
+ try dest_dir.createDirPath(io, dirname);
}
var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available.
- var atomic_file = try std.fs.Dir.atomicFile(.adaptFromNewApi(dest_dir), dest_path, .{
- .mode = actual_mode,
+ var atomic_file = try dest_dir.atomicFile(io, dest_path, .{
+ .permissions = actual_permissions,
.write_buffer = &buffer,
});
defer atomic_file.deinit();
@@ -224,7 +615,7 @@ pub fn updateFile(
error.WriteFailed => return atomic_file.file_writer.err.?,
};
try atomic_file.flush();
- try atomic_file.file_writer.file.updateTimes(src_stat.atime, src_stat.mtime);
+ try atomic_file.file_writer.file.setTimestamps(io, src_stat.atime, src_stat.mtime);
try atomic_file.renameIntoPlace();
return .stale;
}
@@ -242,7 +633,11 @@ pub const ReadFileError = File.OpenError || File.Reader.Error;
/// * On WASI, `file_path` should be encoded as valid UTF-8.
/// * On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
pub fn readFile(dir: Dir, io: Io, file_path: []const u8, buffer: []u8) ReadFileError![]u8 {
- var file = try dir.openFile(io, file_path, .{});
+ var file = try dir.openFile(io, file_path, .{
+ // We can take advantage of this on Windows since it doesn't involve any extra syscalls,
+ // so we can get error.IsDir during open rather than during the read.
+ .allow_directory = if (native_os == .windows) false else true,
+ });
defer file.close(io);
var reader = file.reader(io, &.{});
@@ -253,7 +648,7 @@ pub fn readFile(dir: Dir, io: Io, file_path: []const u8, buffer: []u8) ReadFileE
return buffer[0..n];
}
-pub const MakeError = error{
+pub const CreateDirError = error{
/// In WASI, this error may occur when the file descriptor does
/// not hold the required rights to create a new directory relative to it.
AccessDenied,
@@ -279,21 +674,36 @@ pub const MakeError = error{
/// * On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
///
/// Related:
-/// * `makePath`
-/// * `makeDirAbsolute`
-pub fn makeDir(dir: Dir, io: Io, sub_path: []const u8) MakeError!void {
- return io.vtable.dirMake(io.userdata, dir, sub_path, default_mode);
+/// * `createDirPath`
+/// * `createDirAbsolute`
+pub fn createDir(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions) CreateDirError!void {
+ return io.vtable.dirCreateDir(io.userdata, dir, sub_path, permissions);
+}
+
+/// Create a new directory, based on an absolute path.
+///
+/// Asserts that the path is absolute. See `createDir` for a function that
+/// operates on both absolute and relative paths.
+///
+/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `absolute_path` should be encoded as valid UTF-8.
+/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
+pub fn createDirAbsolute(io: Io, absolute_path: []const u8, permissions: Permissions) CreateDirError!void {
+ assert(path.isAbsolute(absolute_path));
+ return createDir(.cwd(), io, absolute_path, permissions);
}
-pub const MakePathError = MakeError || StatPathError;
+test createDirAbsolute {}
-/// Calls makeDir iteratively to make an entire path, creating any parent
-/// directories that do not exist.
+pub const CreateDirPathError = CreateDirError || StatFileError;
+
+/// Creates parent directories with default permissions as necessary to ensure
+/// `sub_path` exists as a directory.
///
/// Returns success if the path already exists and is a directory.
///
-/// This function is not atomic, and if it returns an error, the file system
-/// may have been modified regardless.
+/// This function may not be atomic. If it returns an error, the file system
+/// may have been modified.
///
/// Fails on an empty path with `error.BadPathName` as that is not a path that
/// can be created.
@@ -309,48 +719,29 @@ pub const MakePathError = MakeError || StatPathError;
/// - On other platforms, `..` are not resolved before the path is passed to `mkdirat`,
/// meaning a `sub_path` like "first/../second" will create both a `./first`
/// and a `./second` directory.
-pub fn makePath(dir: Dir, io: Io, sub_path: []const u8) MakePathError!void {
- _ = try makePathStatus(dir, io, sub_path);
+///
+/// See also:
+/// * `createDirPathStatus`
+pub fn createDirPath(dir: Dir, io: Io, sub_path: []const u8) CreateDirPathError!void {
+ _ = try io.vtable.dirCreateDirPath(io.userdata, dir, sub_path, .default_dir);
}
-pub const MakePathStatus = enum { existed, created };
+pub const CreatePathStatus = enum { existed, created };
-/// Same as `makePath` except returns whether the path already existed or was
+/// Same as `createDirPath` except returns whether the path already existed or was
/// successfully created.
-pub fn makePathStatus(dir: Dir, io: Io, sub_path: []const u8) MakePathError!MakePathStatus {
- var it = std.fs.path.componentIterator(sub_path);
- var status: MakePathStatus = .existed;
- var component = it.last() orelse return error.BadPathName;
- while (true) {
- if (makeDir(dir, io, component.path)) {
- status = .created;
- } else |err| switch (err) {
- error.PathAlreadyExists => {
- // stat the file and return an error if it's not a directory
- // this is important because otherwise a dangling symlink
- // could cause an infinite loop
- check_dir: {
- // workaround for windows, see https://github.com/ziglang/zig/issues/16738
- const fstat = statPath(dir, io, component.path, .{}) catch |stat_err| switch (stat_err) {
- error.IsDir => break :check_dir,
- else => |e| return e,
- };
- if (fstat.kind != .directory) return error.NotDir;
- }
- },
- error.FileNotFound => |e| {
- component = it.previous() orelse return e;
- continue;
- },
- else => |e| return e,
- }
- component = it.next() orelse return status;
- }
+pub fn createDirPathStatus(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions) CreateDirPathError!CreatePathStatus {
+ return io.vtable.dirCreateDirPath(io.userdata, dir, sub_path, permissions);
}
-pub const MakeOpenPathError = MakeError || OpenError || StatPathError;
+pub const CreateDirPathOpenError = CreateDirError || OpenError || StatFileError;
+
+pub const CreateDirPathOpenOptions = struct {
+ open_options: OpenOptions = .{},
+ permissions: Permissions = .default_dir,
+};
-/// Performs the equivalent of `makePath` followed by `openDir`, atomically if possible.
+/// Performs the equivalent of `createDirPath` followed by `openDir`, atomically if possible.
///
/// When this operation is canceled, it may leave the file system in a
/// partially modified state.
@@ -358,8 +749,8 @@ pub const MakeOpenPathError = MakeError || OpenError || StatPathError;
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-pub fn makeOpenPath(dir: Dir, io: Io, sub_path: []const u8, options: OpenOptions) MakeOpenPathError!Dir {
- return io.vtable.dirMakeOpenPath(io.userdata, dir, sub_path, options);
+pub fn createDirPathOpen(dir: Dir, io: Io, sub_path: []const u8, options: CreateDirPathOpenOptions) CreateDirPathOpenError!Dir {
+ return io.vtable.dirCreateDirPathOpen(io.userdata, dir, sub_path, options.permissions, options.open_options);
}
pub const Stat = File.Stat;
@@ -369,9 +760,9 @@ pub fn stat(dir: Dir, io: Io) StatError!Stat {
return io.vtable.dirStat(io.userdata, dir);
}
-pub const StatPathError = File.OpenError || File.StatError;
+pub const StatFileError = File.OpenError || File.StatError;
-pub const StatPathOptions = struct {
+pub const StatFileOptions = struct {
follow_symlinks: bool = true,
};
@@ -387,6 +778,1074 @@ pub const StatPathOptions = struct {
/// * On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// * On WASI, `sub_path` should be encoded as valid UTF-8.
/// * On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-pub fn statPath(dir: Dir, io: Io, sub_path: []const u8, options: StatPathOptions) StatPathError!Stat {
- return io.vtable.dirStatPath(io.userdata, dir, sub_path, options);
+pub fn statFile(dir: Dir, io: Io, sub_path: []const u8, options: StatFileOptions) StatFileError!Stat {
+ return io.vtable.dirStatFile(io.userdata, dir, sub_path, options);
+}
+
+pub const RealPathError = File.RealPathError;
+
+/// Obtains the canonicalized absolute path name of `sub_path` relative to this
+/// `Dir`. If `sub_path` is absolute, ignores this `Dir` handle and obtains the
+/// canonicalized absolute pathname of `sub_path` argument.
+///
+/// This function has limited platform support, and using it can lead to
+/// unnecessary failures and race conditions. It is generally advisable to
+/// avoid this function entirely.
+pub fn realPath(dir: Dir, io: Io, out_buffer: []u8) RealPathError!usize {
+ return io.vtable.dirRealPath(io.userdata, dir, out_buffer);
+}
+
+pub const RealPathFileError = RealPathError || PathNameError;
+
+/// Obtains the canonicalized absolute path name of `sub_path` relative to this
+/// `Dir`. If `sub_path` is absolute, ignores this `Dir` handle and obtains the
+/// canonicalized absolute pathname of `sub_path` argument.
+///
+/// This function has limited platform support, and using it can lead to
+/// unnecessary failures and race conditions. It is generally advisable to
+/// avoid this function entirely.
+///
+/// See also:
+/// * `realPathFileAlloc`.
+/// * `realPathFileAbsolute`.
+pub fn realPathFile(dir: Dir, io: Io, sub_path: []const u8, out_buffer: []u8) RealPathFileError!usize {
+ return io.vtable.dirRealPathFile(io.userdata, dir, sub_path, out_buffer);
+}
+
+pub const RealPathFileAllocError = RealPathFileError || Allocator.Error;
+
+/// Same as `realPathFile` except allocates result.
+///
+/// This function has limited platform support, and using it can lead to
+/// unnecessary failures and race conditions. It is generally advisable to
+/// avoid this function entirely.
+///
+/// See also:
+/// * `realPathFile`.
+/// * `realPathFileAbsolute`.
+pub fn realPathFileAlloc(dir: Dir, io: Io, sub_path: []const u8, allocator: Allocator) RealPathFileAllocError![:0]u8 {
+ var buffer: [max_path_bytes]u8 = undefined;
+ const n = try realPathFile(dir, io, sub_path, &buffer);
+ return allocator.dupeZ(u8, buffer[0..n]);
+}
+
+/// Same as `realPathFile` except `absolute_path` is asserted to be an absolute
+/// path.
+///
+/// This function has limited platform support, and using it can lead to
+/// unnecessary failures and race conditions. It is generally advisable to
+/// avoid this function entirely.
+///
+/// See also:
+/// * `realPathFile`.
+/// * `realPathFileAlloc`.
+pub fn realPathFileAbsolute(io: Io, absolute_path: []const u8, out_buffer: []u8) RealPathFileError!usize {
+ assert(path.isAbsolute(absolute_path));
+ return io.vtable.dirRealPathFile(io.userdata, .cwd(), absolute_path, out_buffer);
+}
+
+/// Same as `realPathFileAbsolute` except allocates result.
+///
+/// This function has limited platform support, and using it can lead to
+/// unnecessary failures and race conditions. It is generally advisable to
+/// avoid this function entirely.
+///
+/// See also:
+/// * `realPathFileAbsolute`.
+/// * `realPathFile`.
+pub fn realPathFileAbsoluteAlloc(io: Io, absolute_path: []const u8, allocator: Allocator) RealPathFileAllocError![:0]u8 {
+ var buffer: [max_path_bytes]u8 = undefined;
+ const n = try realPathFileAbsolute(io, absolute_path, &buffer);
+ return allocator.dupeZ(u8, buffer[0..n]);
+}
+
+pub const DeleteFileError = error{
+ FileNotFound,
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to unlink a resource by path relative to it.
+ AccessDenied,
+ PermissionDenied,
+ FileBusy,
+ FileSystem,
+ IsDir,
+ SymLinkLoop,
+ NotDir,
+ SystemResources,
+ ReadOnlyFileSystem,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+} || PathNameError || Io.Cancelable || Io.UnexpectedError;
+
+/// Delete a file name and possibly the file it refers to, based on an open directory handle.
+///
+/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `sub_path` should be encoded as valid UTF-8.
+/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
+///
+/// Asserts that the path parameter has no null bytes.
+pub fn deleteFile(dir: Dir, io: Io, sub_path: []const u8) DeleteFileError!void {
+ return io.vtable.dirDeleteFile(io.userdata, dir, sub_path);
+}
+
+pub fn deleteFileAbsolute(io: Io, absolute_path: []const u8) DeleteFileError!void {
+ assert(path.isAbsolute(absolute_path));
+ return deleteFile(.cwd(), io, absolute_path);
+}
+
+test deleteFileAbsolute {}
+
+pub const DeleteDirError = error{
+ DirNotEmpty,
+ FileNotFound,
+ AccessDenied,
+ PermissionDenied,
+ FileBusy,
+ FileSystem,
+ SymLinkLoop,
+ NotDir,
+ SystemResources,
+ ReadOnlyFileSystem,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+} || PathNameError || Io.Cancelable || Io.UnexpectedError;
+
+/// Returns `error.DirNotEmpty` if the directory is not empty.
+///
+/// To delete a directory recursively, see `deleteTree`.
+///
+/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `sub_path` should be encoded as valid UTF-8.
+/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
+pub fn deleteDir(dir: Dir, io: Io, sub_path: []const u8) DeleteDirError!void {
+ return io.vtable.dirDeleteDir(io.userdata, dir, sub_path);
+}
+
+/// Same as `deleteDir` except the path is absolute.
+///
+/// On Windows, `dir_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `dir_path` should be encoded as valid UTF-8.
+/// On other platforms, `dir_path` is an opaque sequence of bytes with no particular encoding.
+pub fn deleteDirAbsolute(io: Io, absolute_path: []const u8) DeleteDirError!void {
+ assert(path.isAbsolute(absolute_path));
+ return deleteDir(.cwd(), io, absolute_path);
+}
+
+pub const RenameError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to rename a resource by path relative to it.
+ ///
+ /// On Windows, this error may be returned instead of PathAlreadyExists when
+ /// renaming a directory over an existing directory.
+ AccessDenied,
+ PermissionDenied,
+ FileBusy,
+ DiskQuota,
+ IsDir,
+ SymLinkLoop,
+ LinkQuotaExceeded,
+ FileNotFound,
+ NotDir,
+ SystemResources,
+ NoSpaceLeft,
+ PathAlreadyExists,
+ ReadOnlyFileSystem,
+ RenameAcrossMountPoints,
+ NoDevice,
+ SharingViolation,
+ PipeBusy,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+ /// On Windows, antivirus software is enabled by default. It can be
+ /// disabled, but Windows Update sometimes ignores the user's preference
+ /// and re-enables it. When enabled, antivirus software on Windows
+ /// intercepts file system operations and makes them significantly slower
+ /// in addition to possibly failing with this error code.
+ AntivirusInterference,
+} || PathNameError || Io.Cancelable || Io.UnexpectedError;
+
+/// Change the name or location of a file or directory.
+///
+/// If `new_sub_path` already exists, it will be replaced.
+///
+/// Renaming a file over an existing directory or a directory over an existing
+/// file will fail with `error.IsDir` or `error.NotDir`
+///
+/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, both paths should be encoded as valid UTF-8.
+/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
+pub fn rename(
+ old_dir: Dir,
+ old_sub_path: []const u8,
+ new_dir: Dir,
+ new_sub_path: []const u8,
+ io: Io,
+) RenameError!void {
+ return io.vtable.dirRename(io.userdata, old_dir, old_sub_path, new_dir, new_sub_path);
+}
+
+pub fn renameAbsolute(old_path: []const u8, new_path: []const u8, io: Io) RenameError!void {
+ assert(path.isAbsolute(old_path));
+ assert(path.isAbsolute(new_path));
+ const my_cwd = cwd();
+ return io.vtable.dirRename(io.userdata, my_cwd, old_path, my_cwd, new_path);
+}
+
+pub const HardLinkOptions = struct {
+ follow_symlinks: bool = true,
+};
+
+pub const HardLinkError = error{
+ AccessDenied,
+ PermissionDenied,
+ DiskQuota,
+ PathAlreadyExists,
+ HardwareFailure,
+ /// Either the OS or the filesystem does not support hard links.
+ OperationUnsupported,
+ SymLinkLoop,
+ LinkQuotaExceeded,
+ FileNotFound,
+ SystemResources,
+ NoSpaceLeft,
+ ReadOnlyFileSystem,
+ NotSameFileSystem,
+ NotDir,
+} || Io.Cancelable || PathNameError || Io.UnexpectedError;
+
+pub fn hardLink(
+ old_dir: Dir,
+ old_sub_path: []const u8,
+ new_dir: Dir,
+ new_sub_path: []const u8,
+ io: Io,
+ options: HardLinkOptions,
+) HardLinkError!void {
+ return io.vtable.dirHardLink(io.userdata, old_dir, old_sub_path, new_dir, new_sub_path, options);
+}
+
+/// Use with `symLink`, `symLinkAtomic`, and `symLinkAbsolute` to
+/// specify whether the symlink will point to a file or a directory. This value
+/// is ignored on all hosts except Windows where creating symlinks to different
+/// resource types, requires different flags. By default, `symLinkAbsolute` is
+/// assumed to point to a file.
+pub const SymLinkFlags = struct {
+ is_directory: bool = false,
+};
+
+pub const SymLinkError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to create a new symbolic link relative to it.
+ AccessDenied,
+ PermissionDenied,
+ DiskQuota,
+ PathAlreadyExists,
+ FileSystem,
+ SymLinkLoop,
+ FileNotFound,
+ SystemResources,
+ NoSpaceLeft,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+ ReadOnlyFileSystem,
+ NotDir,
+} || PathNameError || Io.Cancelable || Io.UnexpectedError;
+
+/// Creates a symbolic link named `sym_link_path` which contains the string `target_path`.
+///
+/// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent
+/// one; the latter case is known as a dangling link.
+///
+/// If `sym_link_path` exists, it will not be overwritten.
+///
+/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, both paths should be encoded as valid UTF-8.
+/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
+pub fn symLink(
+ dir: Dir,
+ io: Io,
+ target_path: []const u8,
+ sym_link_path: []const u8,
+ flags: SymLinkFlags,
+) SymLinkError!void {
+ return io.vtable.dirSymLink(io.userdata, dir, target_path, sym_link_path, flags);
+}
+
+pub fn symLinkAbsolute(
+ io: Io,
+ target_path: []const u8,
+ sym_link_path: []const u8,
+ flags: SymLinkFlags,
+) SymLinkError!void {
+ assert(path.isAbsolute(target_path));
+ assert(path.isAbsolute(sym_link_path));
+ return symLink(.cwd(), io, target_path, sym_link_path, flags);
+}
+
+/// Same as `symLink`, except tries to create the symbolic link until it
+/// succeeds or encounters an error other than `error.PathAlreadyExists`.
+///
+/// * On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// * On WASI, both paths should be encoded as valid UTF-8.
+/// * On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
+pub fn symLinkAtomic(
+ dir: Dir,
+ io: Io,
+ target_path: []const u8,
+ sym_link_path: []const u8,
+ flags: SymLinkFlags,
+) !void {
+ if (dir.symLink(io, target_path, sym_link_path, flags)) {
+ return;
+ } else |err| switch (err) {
+ error.PathAlreadyExists => {},
+ else => |e| return e,
+ }
+
+ const dirname = path.dirname(sym_link_path) orelse ".";
+
+ const rand_len = @sizeOf(u64) * 2;
+ const temp_path_len = dirname.len + 1 + rand_len;
+ var temp_path_buf: [max_path_bytes]u8 = undefined;
+
+ if (temp_path_len > temp_path_buf.len) return error.NameTooLong;
+ @memcpy(temp_path_buf[0..dirname.len], dirname);
+ temp_path_buf[dirname.len] = path.sep;
+
+ const temp_path = temp_path_buf[0..temp_path_len];
+
+ while (true) {
+ const random_integer = std.crypto.random.int(u64);
+ temp_path[dirname.len + 1 ..][0..rand_len].* = std.fmt.hex(random_integer);
+
+ if (dir.symLink(io, target_path, temp_path, flags)) {
+ return dir.rename(temp_path, dir, sym_link_path, io);
+ } else |err| switch (err) {
+ error.PathAlreadyExists => continue,
+ else => |e| return e,
+ }
+ }
+}
+
+pub const ReadLinkError = error{
+ /// In WASI, this error may occur when the file descriptor does
+ /// not hold the required rights to read value of a symbolic link relative to it.
+ AccessDenied,
+ PermissionDenied,
+ FileSystem,
+ SymLinkLoop,
+ FileNotFound,
+ SystemResources,
+ NotLink,
+ NotDir,
+ /// Windows-only. This error may occur if the opened reparse point is
+ /// of unsupported type.
+ UnsupportedReparsePointType,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+ /// On Windows, antivirus software is enabled by default. It can be
+ /// disabled, but Windows Update sometimes ignores the user's preference
+ /// and re-enables it. When enabled, antivirus software on Windows
+ /// intercepts file system operations and makes them significantly slower
+ /// in addition to possibly failing with this error code.
+ AntivirusInterference,
+} || PathNameError || Io.Cancelable || Io.UnexpectedError;
+
+/// Obtain target of a symbolic link.
+///
+/// Returns how many bytes of `buffer` are populated.
+///
+/// Asserts that the path parameter has no null bytes.
+///
+/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `sub_path` should be encoded as valid UTF-8.
+/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
+pub fn readLink(dir: Dir, io: Io, sub_path: []const u8, buffer: []u8) ReadLinkError!usize {
+ return io.vtable.dirReadLink(io.userdata, dir, sub_path, buffer);
+}
+
+/// Same as `readLink`, except it asserts the path is absolute.
+///
+/// On Windows, `path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `path` should be encoded as valid UTF-8.
+/// On other platforms, `path` is an opaque sequence of bytes with no particular encoding.
+pub fn readLinkAbsolute(io: Io, absolute_path: []const u8, buffer: []u8) ReadLinkError!usize {
+ assert(path.isAbsolute(absolute_path));
+ return io.vtable.dirReadLink(io.userdata, .cwd(), absolute_path, buffer);
+}
+
+pub const ReadFileAllocError = File.OpenError || File.Reader.Error || Allocator.Error || error{
+ /// File size reached or exceeded the provided limit.
+ StreamTooLong,
+};
+
+/// Reads all the bytes from the named file. On success, caller owns returned
+/// buffer.
+///
+/// If the file size is already known, a better alternative is to initialize a
+/// `File.Reader`.
+///
+/// If the file size cannot be obtained, an error is returned. If
+/// this is a realistic possibility, a better alternative is to initialize a
+/// `File.Reader` which handles this seamlessly.
+pub fn readFileAlloc(
+ dir: Dir,
+ io: Io,
+ /// On Windows, should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+ /// On WASI, should be encoded as valid UTF-8.
+ /// On other platforms, an opaque sequence of bytes with no particular encoding.
+ sub_path: []const u8,
+ /// Used to allocate the result.
+ gpa: Allocator,
+ /// If reached or exceeded, `error.StreamTooLong` is returned instead.
+ limit: Io.Limit,
+) ReadFileAllocError![]u8 {
+ return readFileAllocOptions(dir, io, sub_path, gpa, limit, .of(u8), null);
+}
+
+/// Reads all the bytes from the named file. On success, caller owns returned
+/// buffer.
+///
+/// If the file size is already known, a better alternative is to initialize a
+/// `File.Reader`.
+pub fn readFileAllocOptions(
+ dir: Dir,
+ io: Io,
+ /// On Windows, should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+ /// On WASI, should be encoded as valid UTF-8.
+ /// On other platforms, an opaque sequence of bytes with no particular encoding.
+ sub_path: []const u8,
+ /// Used to allocate the result.
+ gpa: Allocator,
+ /// If reached or exceeded, `error.StreamTooLong` is returned instead.
+ limit: Io.Limit,
+ comptime alignment: std.mem.Alignment,
+ comptime sentinel: ?u8,
+) ReadFileAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
+ var file = try dir.openFile(io, sub_path, .{
+ // We can take advantage of this on Windows since it doesn't involve any extra syscalls,
+ // so we can get error.IsDir during open rather than during the read.
+ .allow_directory = if (native_os == .windows) false else true,
+ });
+ defer file.close(io);
+ var file_reader = file.reader(io, &.{});
+ return file_reader.interface.allocRemainingAlignedSentinel(gpa, limit, alignment, sentinel) catch |err| switch (err) {
+ error.ReadFailed => return file_reader.err.?,
+ error.OutOfMemory, error.StreamTooLong => |e| return e,
+ };
+}
+
+pub const DeleteTreeError = error{
+ AccessDenied,
+ PermissionDenied,
+ FileTooBig,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ SystemResources,
+ ReadOnlyFileSystem,
+ FileSystem,
+ FileBusy,
+ DeviceBusy,
+ /// One of the path components was not a directory.
+ /// This error is unreachable if `sub_path` does not contain a path separator.
+ NotDir,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+} || PathNameError || Io.Cancelable || Io.UnexpectedError;
+
+/// Whether `sub_path` describes a symlink, file, or directory, this function
+/// removes it. If it cannot be removed because it is a non-empty directory,
+/// this function recursively removes its entries and then tries again.
+///
+/// This operation is not atomic on most file systems.
+///
+/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `sub_path` should be encoded as valid UTF-8.
+/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
+pub fn deleteTree(dir: Dir, io: Io, sub_path: []const u8) DeleteTreeError!void {
+ var initial_iterable_dir = (try dir.deleteTreeOpenInitialSubpath(io, sub_path, .file)) orelse return;
+
+ const StackItem = struct {
+ name: []const u8,
+ parent_dir: Dir,
+ iter: Iterator,
+
+ fn closeAll(inner_io: Io, items: []@This()) void {
+ for (items) |*item| item.iter.reader.dir.close(inner_io);
+ }
+ };
+
+ var stack_buffer: [16]StackItem = undefined;
+ var stack = std.ArrayList(StackItem).initBuffer(&stack_buffer);
+ defer StackItem.closeAll(io, stack.items);
+
+ stack.appendAssumeCapacity(.{
+ .name = sub_path,
+ .parent_dir = dir,
+ .iter = initial_iterable_dir.iterateAssumeFirstIteration(),
+ });
+
+ process_stack: while (stack.items.len != 0) {
+ var top = &stack.items[stack.items.len - 1];
+ while (try top.iter.next(io)) |entry| {
+ var treat_as_dir = entry.kind == .directory;
+ handle_entry: while (true) {
+ if (treat_as_dir) {
+ if (stack.unusedCapacitySlice().len >= 1) {
+ var iterable_dir = top.iter.reader.dir.openDir(io, entry.name, .{
+ .follow_symlinks = false,
+ .iterate = true,
+ }) catch |err| switch (err) {
+ error.NotDir => {
+ treat_as_dir = false;
+ continue :handle_entry;
+ },
+ error.FileNotFound => {
+ // That's fine, we were trying to remove this directory anyway.
+ break :handle_entry;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.ProcessFdQuotaExceeded,
+ error.NameTooLong,
+ error.SystemFdQuotaExceeded,
+ error.NoDevice,
+ error.SystemResources,
+ error.Unexpected,
+ error.BadPathName,
+ error.NetworkNotFound,
+ error.DeviceBusy,
+ error.Canceled,
+ => |e| return e,
+ };
+ stack.appendAssumeCapacity(.{
+ .name = entry.name,
+ .parent_dir = top.iter.reader.dir,
+ .iter = iterable_dir.iterateAssumeFirstIteration(),
+ });
+ continue :process_stack;
+ } else {
+ try top.iter.reader.dir.deleteTreeMinStackSizeWithKindHint(io, entry.name, entry.kind);
+ break :handle_entry;
+ }
+ } else {
+ if (top.iter.reader.dir.deleteFile(io, entry.name)) {
+ break :handle_entry;
+ } else |err| switch (err) {
+ error.FileNotFound => break :handle_entry,
+
+ // Impossible because we do not pass any path separators.
+ error.NotDir => unreachable,
+
+ error.IsDir => {
+ treat_as_dir = true;
+ continue :handle_entry;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.NameTooLong,
+ error.SystemResources,
+ error.ReadOnlyFileSystem,
+ error.FileSystem,
+ error.FileBusy,
+ error.BadPathName,
+ error.NetworkNotFound,
+ error.Canceled,
+ error.Unexpected,
+ => |e| return e,
+ }
+ }
+ }
+ }
+
+ // On Windows, we can't delete until the dir's handle has been closed, so
+ // close it before we try to delete.
+ top.iter.reader.dir.close(io);
+
+ // In order to avoid double-closing the directory when cleaning up
+ // the stack in the case of an error, we save the relevant portions and
+ // pop the value from the stack.
+ const parent_dir = top.parent_dir;
+ const name = top.name;
+ stack.items.len -= 1;
+
+ var need_to_retry: bool = false;
+ parent_dir.deleteDir(io, name) catch |err| switch (err) {
+ error.FileNotFound => {},
+ error.DirNotEmpty => need_to_retry = true,
+ else => |e| return e,
+ };
+
+ if (need_to_retry) {
+ // Since we closed the handle that the previous iterator used, we
+ // need to re-open the dir and re-create the iterator.
+ var iterable_dir = iterable_dir: {
+ var treat_as_dir = true;
+ handle_entry: while (true) {
+ if (treat_as_dir) {
+ break :iterable_dir parent_dir.openDir(io, name, .{
+ .follow_symlinks = false,
+ .iterate = true,
+ }) catch |err| switch (err) {
+ error.NotDir => {
+ treat_as_dir = false;
+ continue :handle_entry;
+ },
+ error.FileNotFound => {
+ // That's fine, we were trying to remove this directory anyway.
+ continue :process_stack;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.ProcessFdQuotaExceeded,
+ error.NameTooLong,
+ error.SystemFdQuotaExceeded,
+ error.NoDevice,
+ error.SystemResources,
+ error.Unexpected,
+ error.BadPathName,
+ error.NetworkNotFound,
+ error.DeviceBusy,
+ error.Canceled,
+ => |e| return e,
+ };
+ } else {
+ if (parent_dir.deleteFile(io, name)) {
+ continue :process_stack;
+ } else |err| switch (err) {
+ error.FileNotFound => continue :process_stack,
+
+ // Impossible because we do not pass any path separators.
+ error.NotDir => unreachable,
+
+ error.IsDir => {
+ treat_as_dir = true;
+ continue :handle_entry;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.NameTooLong,
+ error.SystemResources,
+ error.ReadOnlyFileSystem,
+ error.FileSystem,
+ error.FileBusy,
+ error.BadPathName,
+ error.NetworkNotFound,
+ error.Canceled,
+ error.Unexpected,
+ => |e| return e,
+ }
+ }
+ }
+ };
+ // We know there is room on the stack since we are just re-adding
+ // the StackItem that we previously popped.
+ stack.appendAssumeCapacity(.{
+ .name = name,
+ .parent_dir = parent_dir,
+ .iter = iterable_dir.iterateAssumeFirstIteration(),
+ });
+ continue :process_stack;
+ }
+ }
+}
+
+/// Like `deleteTree`, but only keeps one `Iterator` active at a time to minimize the function's stack size.
+/// This is slower than `deleteTree` but uses less stack space.
+/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `sub_path` should be encoded as valid UTF-8.
+/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
+pub fn deleteTreeMinStackSize(dir: Dir, io: Io, sub_path: []const u8) DeleteTreeError!void {
+ return dir.deleteTreeMinStackSizeWithKindHint(io, sub_path, .file);
+}
+
+fn deleteTreeMinStackSizeWithKindHint(parent: Dir, io: Io, sub_path: []const u8, kind_hint: File.Kind) DeleteTreeError!void {
+ start_over: while (true) {
+ var dir = (try parent.deleteTreeOpenInitialSubpath(io, sub_path, kind_hint)) orelse return;
+ var cleanup_dir_parent: ?Dir = null;
+ defer if (cleanup_dir_parent) |*d| d.close(io);
+
+ var cleanup_dir = true;
+ defer if (cleanup_dir) dir.close(io);
+
+ // Valid use of max_path_bytes because dir_name_buf will only
+ // ever store a single path component that was returned from the
+ // filesystem.
+ var dir_name_buf: [max_path_bytes]u8 = undefined;
+ var dir_name: []const u8 = sub_path;
+
+ // Here we must avoid recursion, in order to provide O(1) memory guarantee of this function.
+ // Go through each entry and if it is not a directory, delete it. If it is a directory,
+ // open it, and close the original directory. Repeat. Then start the entire operation over.
+
+ scan_dir: while (true) {
+ var dir_it = dir.iterateAssumeFirstIteration();
+ dir_it: while (try dir_it.next(io)) |entry| {
+ var treat_as_dir = entry.kind == .directory;
+ handle_entry: while (true) {
+ if (treat_as_dir) {
+ const new_dir = dir.openDir(io, entry.name, .{
+ .follow_symlinks = false,
+ .iterate = true,
+ }) catch |err| switch (err) {
+ error.NotDir => {
+ treat_as_dir = false;
+ continue :handle_entry;
+ },
+ error.FileNotFound => {
+ // That's fine, we were trying to remove this directory anyway.
+ continue :dir_it;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.ProcessFdQuotaExceeded,
+ error.NameTooLong,
+ error.SystemFdQuotaExceeded,
+ error.NoDevice,
+ error.SystemResources,
+ error.Unexpected,
+ error.BadPathName,
+ error.NetworkNotFound,
+ error.DeviceBusy,
+ error.Canceled,
+ => |e| return e,
+ };
+ if (cleanup_dir_parent) |*d| d.close(io);
+ cleanup_dir_parent = dir;
+ dir = new_dir;
+ const result = dir_name_buf[0..entry.name.len];
+ @memcpy(result, entry.name);
+ dir_name = result;
+ continue :scan_dir;
+ } else {
+ if (dir.deleteFile(io, entry.name)) {
+ continue :dir_it;
+ } else |err| switch (err) {
+ error.FileNotFound => continue :dir_it,
+
+ // Impossible because we do not pass any path separators.
+ error.NotDir => unreachable,
+
+ error.IsDir => {
+ treat_as_dir = true;
+ continue :handle_entry;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.NameTooLong,
+ error.SystemResources,
+ error.ReadOnlyFileSystem,
+ error.FileSystem,
+ error.FileBusy,
+ error.BadPathName,
+ error.NetworkNotFound,
+ error.Canceled,
+ error.Unexpected,
+ => |e| return e,
+ }
+ }
+ }
+ }
+ // Reached the end of the directory entries, which means we successfully deleted all of them.
+ // Now to remove the directory itself.
+ dir.close(io);
+ cleanup_dir = false;
+
+ if (cleanup_dir_parent) |d| {
+ d.deleteDir(io, dir_name) catch |err| switch (err) {
+ // These two things can happen due to file system race conditions.
+ error.FileNotFound, error.DirNotEmpty => continue :start_over,
+ else => |e| return e,
+ };
+ continue :start_over;
+ } else {
+ parent.deleteDir(io, sub_path) catch |err| switch (err) {
+ error.FileNotFound => return,
+ error.DirNotEmpty => continue :start_over,
+ else => |e| return e,
+ };
+ return;
+ }
+ }
+ }
+}
+
+/// On successful delete, returns null.
+fn deleteTreeOpenInitialSubpath(dir: Dir, io: Io, sub_path: []const u8, kind_hint: File.Kind) !?Dir {
+ return iterable_dir: {
+ // Treat as a file by default
+ var treat_as_dir = kind_hint == .directory;
+
+ handle_entry: while (true) {
+ if (treat_as_dir) {
+ break :iterable_dir dir.openDir(io, sub_path, .{
+ .follow_symlinks = false,
+ .iterate = true,
+ }) catch |err| switch (err) {
+ error.NotDir => {
+ treat_as_dir = false;
+ continue :handle_entry;
+ },
+ error.FileNotFound => {
+ // That's fine, we were trying to remove this directory anyway.
+ return null;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.ProcessFdQuotaExceeded,
+ error.NameTooLong,
+ error.SystemFdQuotaExceeded,
+ error.NoDevice,
+ error.SystemResources,
+ error.Unexpected,
+ error.BadPathName,
+ error.DeviceBusy,
+ error.NetworkNotFound,
+ error.Canceled,
+ => |e| return e,
+ };
+ } else {
+ if (dir.deleteFile(io, sub_path)) {
+ return null;
+ } else |err| switch (err) {
+ error.FileNotFound => return null,
+
+ error.IsDir => {
+ treat_as_dir = true;
+ continue :handle_entry;
+ },
+
+ error.AccessDenied,
+ error.PermissionDenied,
+ error.SymLinkLoop,
+ error.NameTooLong,
+ error.SystemResources,
+ error.ReadOnlyFileSystem,
+ error.NotDir,
+ error.FileSystem,
+ error.FileBusy,
+ error.BadPathName,
+ error.NetworkNotFound,
+ error.Canceled,
+ error.Unexpected,
+ => |e| return e,
+ }
+ }
+ }
+ };
+}
+
+pub const CopyFileOptions = struct {
+ /// When this is `null` the permissions are copied from the source file.
+ permissions: ?File.Permissions = null,
+};
+
+pub const CopyFileError = File.OpenError || File.StatError ||
+ File.Atomic.InitError || File.Atomic.FinishError ||
+ File.Reader.Error || File.Writer.Error || error{InvalidFileName};
+
+/// Atomically creates a new file at `dest_path` within `dest_dir` with the
+/// same contents as `source_path` within `source_dir`, overwriting any already
+/// existing file.
+///
+/// On Linux, until https://patchwork.kernel.org/patch/9636735/ is merged and
+/// readily available, there is a possibility of power loss or application
+/// termination leaving temporary files present in the same directory as
+/// dest_path.
+///
+/// On Windows, both paths should be encoded as
+/// [WTF-8](https://wtf-8.codeberg.page/). On WASI, both paths should be
+/// encoded as valid UTF-8. On other platforms, both paths are an opaque
+/// sequence of bytes with no particular encoding.
+pub fn copyFile(
+ source_dir: Dir,
+ source_path: []const u8,
+ dest_dir: Dir,
+ dest_path: []const u8,
+ io: Io,
+ options: CopyFileOptions,
+) CopyFileError!void {
+ const file = try source_dir.openFile(io, source_path, .{});
+ var file_reader: File.Reader = .init(.{ .handle = file.handle }, io, &.{});
+ defer file_reader.file.close(io);
+
+ const permissions = options.permissions orelse blk: {
+ const st = try file_reader.file.stat(io);
+ file_reader.size = st.size;
+ break :blk st.permissions;
+ };
+
+ var buffer: [1024]u8 = undefined; // Used only when direct fd-to-fd is not available.
+ var atomic_file = try dest_dir.atomicFile(io, dest_path, .{
+ .permissions = permissions,
+ .write_buffer = &buffer,
+ });
+ defer atomic_file.deinit();
+
+ _ = atomic_file.file_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
+ error.ReadFailed => return file_reader.err.?,
+ error.WriteFailed => return atomic_file.file_writer.err.?,
+ };
+
+ try atomic_file.finish();
+}
+
+/// Same as `copyFile`, except asserts that both `source_path` and `dest_path`
+/// are absolute.
+///
+/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, both paths should be encoded as valid UTF-8.
+/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
+pub fn copyFileAbsolute(
+ source_path: []const u8,
+ dest_path: []const u8,
+ io: Io,
+ options: CopyFileOptions,
+) !void {
+ assert(path.isAbsolute(source_path));
+ assert(path.isAbsolute(dest_path));
+ const my_cwd = cwd();
+ return copyFile(my_cwd, source_path, my_cwd, dest_path, io, options);
+}
+
+test copyFileAbsolute {}
+
+pub const AtomicFileOptions = struct {
+ permissions: File.Permissions = .default_file,
+ make_path: bool = false,
+ write_buffer: []u8,
+};
+
+/// Directly access the `.file` field, and then call `File.Atomic.finish` to
+/// atomically replace `dest_path` with contents.
+///
+/// Always call `File.Atomic.deinit` to clean up, regardless of whether
+/// `File.Atomic.finish` succeeded. `dest_path` must remain valid until
+/// `File.Atomic.deinit` is called.
+///
+/// On Windows, `dest_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On WASI, `dest_path` should be encoded as valid UTF-8.
+/// On other platforms, `dest_path` is an opaque sequence of bytes with no particular encoding.
+pub fn atomicFile(parent: Dir, io: Io, dest_path: []const u8, options: AtomicFileOptions) !File.Atomic {
+ if (path.dirname(dest_path)) |dirname| {
+ const dir = if (options.make_path)
+ try parent.createDirPathOpen(io, dirname, .{})
+ else
+ try parent.openDir(io, dirname, .{});
+
+ return .init(io, path.basename(dest_path), options.permissions, dir, true, options.write_buffer);
+ } else {
+ return .init(io, dest_path, options.permissions, parent, false, options.write_buffer);
+ }
+}
+
+pub const SetPermissionsError = File.SetPermissionsError;
+pub const Permissions = File.Permissions;
+
+/// Also known as "chmod".
+///
+/// The process must have the correct privileges in order to do this
+/// successfully, or must have the effective user ID matching the owner
+/// of the directory. Additionally, the directory must have been opened
+/// with `OpenOptions.iterate` set to `true`.
+pub fn setPermissions(dir: Dir, io: Io, new_permissions: File.Permissions) SetPermissionsError!void {
+ return io.vtable.dirSetPermissions(io.userdata, dir, new_permissions);
+}
+
+pub const SetFilePermissionsError = PathNameError || SetPermissionsError || error{
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
+ /// `SetFilePermissionsOptions.follow_symlinks` was set to false, which is
+ /// not allowed by the file system or operating system.
+ OperationUnsupported,
+};
+
+pub const SetFilePermissionsOptions = struct {
+ follow_symlinks: bool = true,
+};
+
+/// Also known as "fchmodat".
+pub fn setFilePermissions(
+ dir: Dir,
+ io: Io,
+ sub_path: []const u8,
+ new_permissions: File.Permissions,
+ options: SetFilePermissionsOptions,
+) SetFilePermissionsError!void {
+ return io.vtable.dirSetFilePermissions(io.userdata, dir, sub_path, new_permissions, options);
+}
+
+pub const SetOwnerError = File.SetOwnerError;
+
+/// Also known as "chown".
+///
+/// The process must have the correct privileges in order to do this
+/// successfully. The group may be changed by the owner of the directory to
+/// any group of which the owner is a member. Additionally, the directory
+/// must have been opened with `OpenOptions.iterate` set to `true`. If the
+/// owner or group is specified as `null`, the ID is not changed.
+pub fn setOwner(dir: Dir, io: Io, owner: ?File.Uid, group: ?File.Gid) SetOwnerError!void {
+ return io.vtable.dirSetOwner(io.userdata, dir, owner, group);
+}
+
+pub const SetFileOwnerError = PathNameError || SetOwnerError;
+
+pub const SetFileOwnerOptions = struct {
+ follow_symlinks: bool = true,
+};
+
+/// Also known as "fchownat".
+pub fn setFileOwner(
+ dir: Dir,
+ io: Io,
+ sub_path: []const u8,
+ owner: ?File.Uid,
+ group: ?File.Gid,
+ options: SetFileOwnerOptions,
+) SetOwnerError!void {
+ return io.vtable.dirSetFileOwner(io.userdata, dir, sub_path, owner, group, options);
+}
+
+pub const SetTimestampsError = File.SetTimestampsError || PathNameError;
+
+pub const SetTimestampsOptions = struct {
+ follow_symlinks: bool = true,
+};
+
+/// The granularity that ultimately is stored depends on the combination of
+/// operating system and file system. When a value as provided that exceeds
+/// this range, the value is clamped to the maximum.
+pub fn setTimestamps(
+ dir: Dir,
+ io: Io,
+ sub_path: []const u8,
+ last_accessed: Io.Timestamp,
+ last_modified: Io.Timestamp,
+ options: SetTimestampsOptions,
+) SetTimestampsError!void {
+ return io.vtable.dirSetTimestamps(io.userdata, dir, sub_path, last_accessed, last_modified, options);
+}
+
+/// Sets the accessed and modification timestamps of the provided path to the
+/// current wall clock time.
+///
+/// The granularity that ultimately is stored depends on the combination of
+/// operating system and file system.
+pub fn setTimestampsNow(dir: Dir, io: Io, sub_path: []const u8, options: SetTimestampsOptions) SetTimestampsError!void {
+ return io.vtable.fileSetTimestampsNow(io.userdata, dir, sub_path, options);
}
diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig
index 7c90176519..ee30103af0 100644
--- a/lib/std/Io/File.zig
+++ b/lib/std/Io/File.zig
@@ -7,12 +7,19 @@ const is_windows = native_os == .windows;
const std = @import("../std.zig");
const Io = std.Io;
const assert = std.debug.assert;
+const Dir = std.Io.Dir;
handle: Handle,
+pub const Reader = @import("File/Reader.zig");
+pub const Writer = @import("File/Writer.zig");
+pub const Atomic = @import("File/Atomic.zig");
+
pub const Handle = std.posix.fd_t;
-pub const Mode = std.posix.mode_t;
pub const INode = std.posix.ino_t;
+pub const NLink = std.posix.nlink_t;
+pub const Uid = std.posix.uid_t;
+pub const Gid = std.posix.gid_t;
pub const Kind = enum {
block_device,
@@ -41,9 +48,9 @@ pub const Stat = struct {
/// The FileIndex on Windows is similar. It is a number for a file that
/// is unique to each filesystem.
inode: INode,
+ nlink: NLink,
size: u64,
- /// This is available on POSIX systems and is always 0 otherwise.
- mode: Mode,
+ permissions: Permissions,
kind: Kind,
/// Last access time in nanoseconds, relative to UTC 1970-01-01.
atime: Io.Timestamp,
@@ -95,6 +102,26 @@ pub const Lock = enum {
pub const OpenFlags = struct {
mode: OpenMode = .read_only,
+ /// Determines the behavior when opening a path that refers to a directory.
+ ///
+ /// If set to true, directories may be opened, but `error.IsDir` is still
+ /// possible in certain scenarios, e.g. attempting to open a directory with
+ /// write permissions.
+ ///
+ /// If set to false, `error.IsDir` will always be returned when opening a directory.
+ ///
+ /// When set to false:
+ /// * On Windows, the behavior is implemented without any extra syscalls.
+ /// * On other operating systems, the behavior is implemented with an additional
+ /// `fstat` syscall.
+ allow_directory: bool = true,
+ /// Indicates intent for only some operations to be performed on this
+ /// opened file:
+ /// * `close`
+ /// * `stat`
+ /// On Linux and FreeBSD, this corresponds to `std.posix.O.PATH`.
+ path_only: bool = false,
+
/// Open the file with an advisory lock to coordinate with other processes
/// accessing it at the same time. An exclusive lock will prevent other
/// processes from acquiring a lock. A shared lock will prevent other
@@ -141,7 +168,51 @@ pub const OpenFlags = struct {
}
};
-pub const CreateFlags = std.fs.File.CreateFlags;
+pub const CreateFlags = struct {
+ /// Whether the file will be created with read access.
+ read: bool = false,
+
+ /// If the file already exists, and is a regular file, and the access
+ /// mode allows writing, it will be truncated to length 0.
+ truncate: bool = true,
+
+ /// Ensures that this open call creates the file, otherwise causes
+ /// `error.PathAlreadyExists` to be returned.
+ exclusive: bool = false,
+
+ /// Open the file with an advisory lock to coordinate with other processes
+ /// accessing it at the same time. An exclusive lock will prevent other
+ /// processes from acquiring a lock. A shared lock will prevent other
+ /// processes from acquiring a exclusive lock, but does not prevent
+ /// other process from getting their own shared locks.
+ ///
+ /// The lock is advisory, except on Linux in very specific circumstances[1].
+ /// This means that a process that does not respect the locking API can still get access
+ /// to the file, despite the lock.
+ ///
+ /// On these operating systems, the lock is acquired atomically with
+ /// opening the file:
+ /// * Darwin
+ /// * DragonFlyBSD
+ /// * FreeBSD
+ /// * Haiku
+ /// * NetBSD
+ /// * OpenBSD
+ /// On these operating systems, the lock is acquired via a separate syscall
+ /// after opening the file:
+ /// * Linux
+ /// * Windows
+ ///
+ /// [1]: https://www.kernel.org/doc/Documentation/filesystems/mandatory-locking.txt
+ lock: Lock = .none,
+
+ /// Sets whether or not to wait until the file is locked to return. If set to true,
+ /// `error.WouldBlock` will be returned. Otherwise, the file will wait until the file
+ /// is available to proceed.
+ lock_nonblocking: bool = false,
+
+ permissions: Permissions = .default_file,
+};
pub const OpenError = error{
SharingViolation,
@@ -149,7 +220,6 @@ pub const OpenError = error{
NoDevice,
/// On Windows, `\\server` or `\\server\share` was not found.
NetworkNotFound,
- ProcessNotFound,
/// On Windows, antivirus software is enabled by default. It can be
/// disabled, but Windows Update sometimes ignores the user's preference
/// and re-enables it. When enabled, antivirus software on Windows
@@ -178,7 +248,9 @@ pub const OpenError = error{
/// The file is too large to be opened. This error is unreachable
/// for 64-bit targets, as well as when opening directories.
FileTooBig,
- /// The path refers to directory but the `DIRECTORY` flag was not provided.
+ /// Either:
+ /// * The path refers to a directory and write permissions were requested.
+ /// * The path refers to a directory and `allow_directory` was set to false.
IsDir,
/// A new path cannot be created because the device has no room for the new file.
/// This error is only reachable when the `CREAT` flag is provided.
@@ -189,7 +261,7 @@ pub const OpenError = error{
/// The path already exists and the `CREAT` and `EXCL` flags were provided.
PathAlreadyExists,
DeviceBusy,
- FileLocksNotSupported,
+ FileLocksUnsupported,
/// One of these three things:
/// * pathname refers to an executable image which is currently being
/// executed and write access was requested.
@@ -204,451 +276,418 @@ pub const OpenError = error{
} || Io.Dir.PathNameError || Io.Cancelable || Io.UnexpectedError;
pub fn close(file: File, io: Io) void {
- return io.vtable.fileClose(io.userdata, file);
+ return io.vtable.fileClose(io.userdata, (&file)[0..1]);
}
-pub const OpenSelfExeError = OpenError || std.fs.SelfExePathError || std.posix.FlockError;
-
-pub fn openSelfExe(io: Io, flags: OpenFlags) OpenSelfExeError!File {
- return io.vtable.openSelfExe(io.userdata, flags);
+pub fn closeMany(io: Io, files: []const File) void {
+ return io.vtable.fileClose(io.userdata, files);
}
-pub const ReadPositionalError = Reader.Error || error{Unseekable};
+pub const SyncError = error{
+ InputOutput,
+ NoSpaceLeft,
+ DiskQuota,
+ AccessDenied,
+} || Io.Cancelable || Io.UnexpectedError;
-pub fn readPositional(file: File, io: Io, buffer: [][]u8, offset: u64) ReadPositionalError!usize {
- return io.vtable.fileReadPositional(io.userdata, file, buffer, offset);
+/// Blocks until all pending file contents and metadata modifications for the
+/// file have been synchronized with the underlying filesystem.
+///
+/// This does not ensure that metadata for the directory containing the file
+/// has also reached disk.
+pub fn sync(file: File, io: Io) SyncError!void {
+ return io.vtable.fileSync(io.userdata, file);
}
-pub const WriteStreamingError = error{} || Io.UnexpectedError || Io.Cancelable;
-
-pub fn writeStreaming(file: File, io: Io, buffer: [][]const u8) WriteStreamingError!usize {
- return file.fileWriteStreaming(io, buffer);
+/// Test whether the file refers to a terminal (similar to libc "isatty").
+///
+/// See also:
+/// * `enableAnsiEscapeCodes`
+/// * `supportsAnsiEscapeCodes`.
+pub fn isTty(file: File, io: Io) Io.Cancelable!bool {
+ return io.vtable.fileIsTty(io.userdata, file);
}
-pub const WritePositionalError = WriteStreamingError || error{Unseekable};
+pub const EnableAnsiEscapeCodesError = error{
+ NotTerminalDevice,
+} || Io.Cancelable || Io.UnexpectedError;
-pub fn writePositional(file: File, io: Io, buffer: [][]const u8, offset: u64) WritePositionalError!usize {
- return io.vtable.fileWritePositional(io.userdata, file, buffer, offset);
+pub fn enableAnsiEscapeCodes(file: File, io: Io) EnableAnsiEscapeCodesError!void {
+ return io.vtable.fileEnableAnsiEscapeCodes(io.userdata, file);
}
-pub fn openAbsolute(io: Io, absolute_path: []const u8, flags: OpenFlags) OpenError!File {
- assert(std.fs.path.isAbsolute(absolute_path));
- return Io.Dir.cwd().openFile(io, absolute_path, flags);
+/// Test whether ANSI escape codes will be treated as such without
+/// attempting to enable support for ANSI escape codes.
+pub fn supportsAnsiEscapeCodes(file: File, io: Io) Io.Cancelable!bool {
+ return io.vtable.fileSupportsAnsiEscapeCodes(io.userdata, file);
}
-/// Defaults to positional reading; falls back to streaming.
+pub const SetLengthError = error{
+ FileTooBig,
+ InputOutput,
+ FileBusy,
+ AccessDenied,
+ PermissionDenied,
+ NonResizable,
+} || Io.Cancelable || Io.UnexpectedError;
+
+/// Truncates or expands the file, populating any new data with zeroes.
///
-/// Positional is more threadsafe, since the global seek position is not
-/// affected.
-pub fn reader(file: File, io: Io, buffer: []u8) Reader {
- return .init(file, io, buffer);
+/// The file offset after this call is left unchanged.
+pub fn setLength(file: File, io: Io, new_length: u64) SetLengthError!void {
+ return io.vtable.fileSetLength(io.userdata, file, new_length);
}
-/// Positional is more threadsafe, since the global seek position is not
-/// affected, but when such syscalls are not available, preemptively
-/// initializing in streaming mode skips a failed syscall.
-pub fn readerStreaming(file: File, io: Io, buffer: []u8) Reader {
- return .initStreaming(file, io, buffer);
+pub const LengthError = StatError;
+
+/// Retrieve the ending byte index of the file.
+///
+/// Sometimes cheaper than `stat` if only the length is needed.
+pub fn length(file: File, io: Io) LengthError!u64 {
+ return io.vtable.fileLength(io.userdata, file);
}
-pub const SeekError = error{
- Unseekable,
- /// The file descriptor does not hold the required rights to seek on it.
+pub const SetPermissionsError = error{
AccessDenied,
+ PermissionDenied,
+ InputOutput,
+ SymLinkLoop,
+ FileNotFound,
+ SystemResources,
+ ReadOnlyFileSystem,
} || Io.Cancelable || Io.UnexpectedError;
-/// Memoizes key information about a file handle such as:
-/// * The size from calling stat, or the error that occurred therein.
-/// * The current seek position.
-/// * The error that occurred when trying to seek.
-/// * Whether reading should be done positionally or streaming.
-/// * Whether reading should be done via fd-to-fd syscalls (e.g. `sendfile`)
-/// versus plain variants (e.g. `read`).
+/// Also known as "chmod".
///
-/// Fulfills the `Io.Reader` interface.
-pub const Reader = struct {
- io: Io,
- file: File,
- err: ?Error = null,
- mode: Reader.Mode = .positional,
- /// Tracks the true seek position in the file. To obtain the logical
- /// position, use `logicalPos`.
- pos: u64 = 0,
- size: ?u64 = null,
- size_err: ?SizeError = null,
- seek_err: ?Reader.SeekError = null,
- interface: Io.Reader,
-
- pub const Error = error{
- InputOutput,
- SystemResources,
- IsDir,
- BrokenPipe,
- ConnectionResetByPeer,
- Timeout,
- /// In WASI, EBADF is mapped to this error because it is returned when
- /// trying to read a directory file descriptor as if it were a file.
- NotOpenForReading,
- SocketUnconnected,
- /// This error occurs when no global event loop is configured,
- /// and reading from the file descriptor would block.
- WouldBlock,
- /// In WASI, this error occurs when the file descriptor does
- /// not hold the required rights to read from it.
- AccessDenied,
- /// This error occurs in Linux if the process to be read from
- /// no longer exists.
- ProcessNotFound,
- /// Unable to read file due to lock.
- LockViolation,
- } || Io.Cancelable || Io.UnexpectedError;
-
- pub const SizeError = std.os.windows.GetFileSizeError || StatError || error{
- /// Occurs if, for example, the file handle is a network socket and therefore does not have a size.
- Streaming,
- };
-
- pub const SeekError = File.SeekError || error{
- /// Seeking fell back to reading, and reached the end before the requested seek position.
- /// `pos` remains at the end of the file.
- EndOfStream,
- /// Seeking fell back to reading, which failed.
- ReadFailed,
- };
-
- pub const Mode = enum {
- streaming,
- positional,
- /// Avoid syscalls other than `read` and `readv`.
- streaming_reading,
- /// Avoid syscalls other than `pread` and `preadv`.
- positional_reading,
- /// Indicates reading cannot continue because of a seek failure.
- failure,
-
- pub fn toStreaming(m: @This()) @This() {
- return switch (m) {
- .positional, .streaming => .streaming,
- .positional_reading, .streaming_reading => .streaming_reading,
- .failure => .failure,
- };
- }
-
- pub fn toReading(m: @This()) @This() {
- return switch (m) {
- .positional, .positional_reading => .positional_reading,
- .streaming, .streaming_reading => .streaming_reading,
- .failure => .failure,
- };
- }
- };
-
- pub fn initInterface(buffer: []u8) Io.Reader {
- return .{
- .vtable = &.{
- .stream = Reader.stream,
- .discard = Reader.discard,
- .readVec = Reader.readVec,
- },
- .buffer = buffer,
- .seek = 0,
- .end = 0,
- };
- }
+/// The process must have the correct privileges in order to do this
+/// successfully, or must have the effective user ID matching the owner of the
+/// file.
+pub fn setPermissions(file: File, io: Io, new_permissions: Permissions) SetPermissionsError!void {
+ return io.vtable.fileSetPermissions(io.userdata, file, new_permissions);
+}
- pub fn init(file: File, io: Io, buffer: []u8) Reader {
- return .{
- .io = io,
- .file = file,
- .interface = initInterface(buffer),
- };
- }
+pub const SetOwnerError = error{
+ AccessDenied,
+ PermissionDenied,
+ InputOutput,
+ SymLinkLoop,
+ FileNotFound,
+ SystemResources,
+ ReadOnlyFileSystem,
+} || Io.Cancelable || Io.UnexpectedError;
- /// Takes a legacy `std.fs.File` to help with upgrading.
- pub fn initAdapted(file: std.fs.File, io: Io, buffer: []u8) Reader {
- return .init(.{ .handle = file.handle }, io, buffer);
- }
+/// Also known as "chown".
+///
+/// The process must have the correct privileges in order to do this
+/// successfully. The group may be changed by the owner of the file to any
+/// group of which the owner is a member. If the owner or group is specified as
+/// `null`, the ID is not changed.
+pub fn setOwner(file: File, io: Io, owner: ?Uid, group: ?Gid) SetOwnerError!void {
+ return io.vtable.fileSetOwner(io.userdata, file, owner, group);
+}
- pub fn initSize(file: File, io: Io, buffer: []u8, size: ?u64) Reader {
- return .{
- .io = io,
- .file = file,
- .interface = initInterface(buffer),
- .size = size,
- };
- }
+/// Cross-platform representation of permissions on a file.
+///
+/// On POSIX systems this corresponds to "mode" and on Windows this corresponds to "attributes".
+pub const Permissions = std.Options.FilePermissions orelse if (is_windows) enum(std.os.windows.DWORD) {
+ default_file = 0,
+ _,
+
+ pub const default_dir: @This() = .default_file;
+ pub const executable_file: @This() = .default_file;
+ pub const has_executable_bit = false;
+
+ const windows = std.os.windows;
- /// Positional is more threadsafe, since the global seek position is not
- /// affected, but when such syscalls are not available, preemptively
- /// initializing in streaming mode skips a failed syscall.
- pub fn initStreaming(file: File, io: Io, buffer: []u8) Reader {
- return .{
- .io = io,
- .file = file,
- .interface = Reader.initInterface(buffer),
- .mode = .streaming,
- .seek_err = error.Unseekable,
- .size_err = error.Streaming,
- };
+ pub fn toAttributes(self: @This()) windows.FILE.ATTRIBUTE {
+ return @bitCast(@intFromEnum(self));
}
- pub fn getSize(r: *Reader) SizeError!u64 {
- return r.size orelse {
- if (r.size_err) |err| return err;
- if (stat(r.file, r.io)) |st| {
- if (st.kind == .file) {
- r.size = st.size;
- return st.size;
- } else {
- r.mode = r.mode.toStreaming();
- r.size_err = error.Streaming;
- return error.Streaming;
- }
- } else |err| {
- r.size_err = err;
- return err;
- }
- };
+ pub fn readOnly(self: @This()) bool {
+ const attributes = toAttributes(self);
+ return attributes & windows.FILE_ATTRIBUTE_READONLY != 0;
}
- pub fn seekBy(r: *Reader, offset: i64) Reader.SeekError!void {
- const io = r.io;
- switch (r.mode) {
- .positional, .positional_reading => {
- setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
- },
- .streaming, .streaming_reading => {
- const seek_err = r.seek_err orelse e: {
- if (io.vtable.fileSeekBy(io.userdata, r.file, offset)) {
- setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
- return;
- } else |err| {
- r.seek_err = err;
- break :e err;
- }
- };
- var remaining = std.math.cast(u64, offset) orelse return seek_err;
- while (remaining > 0) {
- remaining -= discard(&r.interface, .limited64(remaining)) catch |err| {
- r.seek_err = err;
- return err;
- };
- }
- r.interface.tossBuffered();
- },
- .failure => return r.seek_err.?,
- }
+ pub fn setReadOnly(self: @This(), read_only: bool) @This() {
+ const attributes = toAttributes(self);
+ return @enumFromInt(if (read_only)
+ attributes | windows.FILE_ATTRIBUTE_READONLY
+ else
+ attributes & ~@as(windows.DWORD, windows.FILE_ATTRIBUTE_READONLY));
+ }
+} else if (std.posix.mode_t != u0) enum(std.posix.mode_t) {
+ /// This is the default mode given to POSIX operating systems for creating
+ /// files. `0o666` is "-rw-rw-rw-" which is counter-intuitive at first,
+ /// since most people would expect "-rw-r--r--", for example, when using
+ /// the `touch` command, which would correspond to `0o644`. However, POSIX
+ /// libc implementations use `0o666` inside `fopen` and then rely on the
+ /// process-scoped "umask" setting to adjust this number for file creation.
+ default_file = 0o666,
+ default_dir = 0o755,
+ executable_file = 0o777,
+ _,
+
+ pub const has_executable_bit = native_os != .wasi;
+
+ pub fn toMode(self: @This()) std.posix.mode_t {
+ return @intFromEnum(self);
}
- /// Repositions logical read offset relative to the beginning of the file.
- pub fn seekTo(r: *Reader, offset: u64) Reader.SeekError!void {
- const io = r.io;
- switch (r.mode) {
- .positional, .positional_reading => {
- setLogicalPos(r, offset);
- },
- .streaming, .streaming_reading => {
- const logical_pos = logicalPos(r);
- if (offset >= logical_pos) return Reader.seekBy(r, @intCast(offset - logical_pos));
- if (r.seek_err) |err| return err;
- io.vtable.fileSeekTo(io.userdata, r.file, offset) catch |err| {
- r.seek_err = err;
- return err;
- };
- setLogicalPos(r, offset);
- },
- .failure => return r.seek_err.?,
- }
+ pub fn fromMode(mode: std.posix.mode_t) @This() {
+ return @enumFromInt(mode);
}
- pub fn logicalPos(r: *const Reader) u64 {
- return r.pos - r.interface.bufferedLen();
+ /// Returns `true` if and only if no class has write permissions.
+ pub fn readOnly(self: @This()) bool {
+ const mode = toMode(self);
+ return mode & 0o222 == 0;
}
- fn setLogicalPos(r: *Reader, offset: u64) void {
- const logical_pos = r.logicalPos();
- if (offset < logical_pos or offset >= r.pos) {
- r.interface.tossBuffered();
- r.pos = offset;
- } else r.interface.toss(@intCast(offset - logical_pos));
+ /// Enables write permission for all classes.
+ pub fn setReadOnly(self: @This(), read_only: bool) @This() {
+ const mode = toMode(self);
+ const o222 = @as(std.posix.mode_t, 0o222);
+ return @enumFromInt(if (read_only) mode & ~o222 else mode | o222);
}
+} else enum(u0) {
+ default_file = 0,
+ pub const default_dir: @This() = .default_file;
+ pub const executable_file: @This() = .default_file;
+ pub const has_executable_bit = false;
+};
- /// Number of slices to store on the stack, when trying to send as many byte
- /// vectors through the underlying read calls as possible.
- const max_buffers_len = 16;
+pub const SetTimestampsError = error{
+ /// times is NULL, or both nsec values are UTIME_NOW, and either:
+ /// * the effective user ID of the caller does not match the owner
+ /// of the file, the caller does not have write access to the
+ /// file, and the caller is not privileged (Linux: does not have
+ /// either the CAP_FOWNER or the CAP_DAC_OVERRIDE capability);
+ /// or,
+ /// * the file is marked immutable (see chattr(1)).
+ AccessDenied,
+ /// The caller attempted to change one or both timestamps to a value
+ /// other than the current time, or to change one of the timestamps
+ /// to the current time while leaving the other timestamp unchanged,
+ /// (i.e., times is not NULL, neither nsec field is UTIME_NOW,
+ /// and neither nsec field is UTIME_OMIT) and either:
+ /// * the caller's effective user ID does not match the owner of
+ /// file, and the caller is not privileged (Linux: does not have
+ /// the CAP_FOWNER capability); or,
+ /// * the file is marked append-only or immutable (see chattr(1)).
+ PermissionDenied,
+ ReadOnlyFileSystem,
+} || Io.Cancelable || Io.UnexpectedError;
- fn stream(io_reader: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize {
- const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
- return streamMode(r, w, limit, r.mode);
- }
+/// The granularity that ultimately is stored depends on the combination of
+/// operating system and file system. When a value as provided that exceeds
+/// this range, the value is clamped to the maximum.
+pub fn setTimestamps(
+ file: File,
+ io: Io,
+ last_accessed: Io.Timestamp,
+ last_modified: Io.Timestamp,
+) SetTimestampsError!void {
+ return io.vtable.fileSetTimestamps(io.userdata, file, last_accessed, last_modified);
+}
- pub fn streamMode(r: *Reader, w: *Io.Writer, limit: Io.Limit, mode: Reader.Mode) Io.Reader.StreamError!usize {
- switch (mode) {
- .positional, .streaming => return w.sendFile(r, limit) catch |write_err| switch (write_err) {
- error.Unimplemented => {
- r.mode = r.mode.toReading();
- return 0;
- },
- else => |e| return e,
- },
- .positional_reading => {
- const dest = limit.slice(try w.writableSliceGreedy(1));
- var data: [1][]u8 = .{dest};
- const n = try readVecPositional(r, &data);
- w.advance(n);
- return n;
- },
- .streaming_reading => {
- const dest = limit.slice(try w.writableSliceGreedy(1));
- var data: [1][]u8 = .{dest};
- const n = try readVecStreaming(r, &data);
- w.advance(n);
- return n;
- },
- .failure => return error.ReadFailed,
- }
- }
+/// Sets the accessed and modification timestamps of `file` to the current wall
+/// clock time.
+///
+/// The granularity that ultimately is stored depends on the combination of
+/// operating system and file system.
+pub fn setTimestampsNow(file: File, io: Io) SetTimestampsError!void {
+ return io.vtable.fileSetTimestampsNow(io.userdata, file);
+}
- fn readVec(io_reader: *Io.Reader, data: [][]u8) Io.Reader.Error!usize {
- const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
- switch (r.mode) {
- .positional, .positional_reading => return readVecPositional(r, data),
- .streaming, .streaming_reading => return readVecStreaming(r, data),
- .failure => return error.ReadFailed,
- }
- }
+/// Returns 0 on stream end or if `buffer` has no space available for data.
+///
+/// See also:
+/// * `reader`
+pub fn readStreaming(file: File, io: Io, buffer: []const []u8) Reader.Error!usize {
+ return io.vtable.fileReadStreaming(io.userdata, file, buffer);
+}
- fn readVecPositional(r: *Reader, data: [][]u8) Io.Reader.Error!usize {
- const io = r.io;
- var iovecs_buffer: [max_buffers_len][]u8 = undefined;
- const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data);
- const dest = iovecs_buffer[0..dest_n];
- assert(dest[0].len > 0);
- const n = io.vtable.fileReadPositional(io.userdata, r.file, dest, r.pos) catch |err| switch (err) {
- error.Unseekable => {
- r.mode = r.mode.toStreaming();
- const pos = r.pos;
- if (pos != 0) {
- r.pos = 0;
- r.seekBy(@intCast(pos)) catch {
- r.mode = .failure;
- return error.ReadFailed;
- };
- }
- return 0;
- },
- else => |e| {
- r.err = e;
- return error.ReadFailed;
- },
- };
- if (n == 0) {
- r.size = r.pos;
- return error.EndOfStream;
- }
- r.pos += n;
- if (n > data_size) {
- r.interface.end += n - data_size;
- return data_size;
- }
- return n;
- }
+pub const ReadPositionalError = Reader.Error || error{Unseekable};
- fn readVecStreaming(r: *Reader, data: [][]u8) Io.Reader.Error!usize {
- const io = r.io;
- var iovecs_buffer: [max_buffers_len][]u8 = undefined;
- const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data);
- const dest = iovecs_buffer[0..dest_n];
- assert(dest[0].len > 0);
- const n = io.vtable.fileReadStreaming(io.userdata, r.file, dest) catch |err| {
- r.err = err;
- return error.ReadFailed;
- };
- if (n == 0) {
- r.size = r.pos;
- return error.EndOfStream;
- }
- r.pos += n;
- if (n > data_size) {
- r.interface.end += n - data_size;
- return data_size;
- }
- return n;
- }
+/// Returns 0 on stream end or if `buffer` has no space available for data.
+///
+/// See also:
+/// * `reader`
+pub fn readPositional(file: File, io: Io, buffer: []const []u8, offset: u64) ReadPositionalError!usize {
+ return io.vtable.fileReadPositional(io.userdata, file, buffer, offset);
+}
+
+pub const WritePositionalError = Writer.Error || error{Unseekable};
+
+/// See also:
+/// * `writer`
+pub fn writePositional(file: File, io: Io, buffer: []const []const u8, offset: u64) WritePositionalError!usize {
+ return io.vtable.fileWritePositional(io.userdata, file, &.{}, buffer, 1, offset);
+}
+
+/// Equivalent to creating a positional writer, writing `bytes`, and then flushing.
+pub fn writePositionalAll(file: File, io: Io, bytes: []const u8, offset: u64) WritePositionalError!void {
+ var index: usize = 0;
+ while (index < bytes.len)
+ index += try io.vtable.fileWritePositional(io.userdata, file, &.{}, &.{bytes[index..]}, 1, offset + index);
+}
+
+pub const SeekError = error{
+ Unseekable,
+ /// The file descriptor does not hold the required rights to seek on it.
+ AccessDenied,
+} || Io.Cancelable || Io.UnexpectedError;
+
+pub const WriteFilePositionalError = Writer.WriteFileError || error{Unseekable};
+
+/// Defaults to positional reading; falls back to streaming.
+///
+/// Positional is more threadsafe, since the global seek position is not
+/// affected.
+///
+/// See also:
+/// * `readerStreaming`
+pub fn reader(file: File, io: Io, buffer: []u8) Reader {
+ return .init(file, io, buffer);
+}
- fn discard(io_reader: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize {
- const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
- const io = r.io;
- const file = r.file;
- switch (r.mode) {
- .positional, .positional_reading => {
- const size = r.getSize() catch {
- r.mode = r.mode.toStreaming();
- return 0;
- };
- const logical_pos = logicalPos(r);
- const delta = @min(@intFromEnum(limit), size - logical_pos);
- setLogicalPos(r, logical_pos + delta);
- return delta;
- },
- .streaming, .streaming_reading => {
- // Unfortunately we can't seek forward without knowing the
- // size because the seek syscalls provided to us will not
- // return the true end position if a seek would exceed the
- // end.
- fallback: {
- if (r.size_err == null and r.seek_err == null) break :fallback;
-
- const buffered_len = r.interface.bufferedLen();
- var remaining = @intFromEnum(limit);
- if (remaining <= buffered_len) {
- r.interface.seek += remaining;
- return remaining;
- }
- remaining -= buffered_len;
- r.interface.seek = 0;
- r.interface.end = 0;
-
- var trash_buffer: [128]u8 = undefined;
- var data: [1][]u8 = .{trash_buffer[0..@min(trash_buffer.len, remaining)]};
- var iovecs_buffer: [max_buffers_len][]u8 = undefined;
- const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, &data);
- const dest = iovecs_buffer[0..dest_n];
- assert(dest[0].len > 0);
- const n = io.vtable.fileReadStreaming(io.userdata, file, dest) catch |err| {
- r.err = err;
- return error.ReadFailed;
- };
- if (n == 0) {
- r.size = r.pos;
- return error.EndOfStream;
- }
- r.pos += n;
- if (n > data_size) {
- r.interface.end += n - data_size;
- remaining -= data_size;
- } else {
- remaining -= n;
- }
- return @intFromEnum(limit) - remaining;
- }
- const size = r.getSize() catch return 0;
- const n = @min(size - r.pos, std.math.maxInt(i64), @intFromEnum(limit));
- io.vtable.fileSeekBy(io.userdata, file, n) catch |err| {
- r.seek_err = err;
- return 0;
- };
- r.pos += n;
- return n;
- },
- .failure => return error.ReadFailed,
- }
+/// Equivalent to creating a positional reader and reading multiple times to fill `buffer`.
+///
+/// Returns number of bytes read into `buffer`. If less than `buffer.len`, end of file occurred.
+///
+/// See also:
+/// * `reader`
+pub fn readPositionalAll(file: File, io: Io, buffer: []u8, offset: u64) ReadPositionalError!usize {
+ var index: usize = 0;
+ while (index != buffer.len) {
+ const amt = try file.readPositional(io, &.{buffer[index..]}, offset + index);
+ if (amt == 0) break;
+ index += amt;
}
+ return index;
+}
+
+/// Positional is more threadsafe, since the global seek position is not
+/// affected, but when such syscalls are not available, preemptively
+/// initializing in streaming mode skips a failed syscall.
+///
+/// See also:
+/// * `reader`
+pub fn readerStreaming(file: File, io: Io, buffer: []u8) Reader {
+ return .initStreaming(file, io, buffer);
+}
- /// Returns whether the stream is at the logical end.
- pub fn atEnd(r: *Reader) bool {
- // Even if stat fails, size is set when end is encountered.
- const size = r.size orelse return false;
- return size - logicalPos(r) == 0;
+/// Defaults to positional reading; falls back to streaming.
+///
+/// Positional is more threadsafe, since the global seek position is not
+/// affected.
+pub fn writer(file: File, io: Io, buffer: []u8) Writer {
+ return .init(file, io, buffer);
+}
+
+/// Positional is more threadsafe, since the global seek position is not
+/// affected, but when such syscalls are not available, preemptively
+/// initializing in streaming mode will skip a failed syscall.
+pub fn writerStreaming(file: File, io: Io, buffer: []u8) Writer {
+ return .initStreaming(file, io, buffer);
+}
+
+/// Equivalent to creating a streaming writer, writing `bytes`, and then flushing.
+pub fn writeStreamingAll(file: File, io: Io, bytes: []const u8) Writer.Error!void {
+ var index: usize = 0;
+ while (index < bytes.len) {
+ index += try io.vtable.fileWriteStreaming(io.userdata, file, &.{}, &.{bytes[index..]}, 1);
}
-};
+}
+
+pub const LockError = error{
+ SystemResources,
+ FileLocksUnsupported,
+} || Io.Cancelable || Io.UnexpectedError;
+
+/// Blocks when an incompatible lock is held by another process. A process may
+/// hold only one type of lock (shared or exclusive) on a file. When a process
+/// terminates in any way, the lock is released.
+///
+/// Assumes the file is unlocked.
+pub fn lock(file: File, io: Io, l: Lock) LockError!void {
+ return io.vtable.fileLock(io.userdata, file, l);
+}
+
+/// Assumes the file is locked.
+pub fn unlock(file: File, io: Io) void {
+ return io.vtable.fileUnlock(io.userdata, file);
+}
+
+/// Attempts to obtain a lock, returning `true` if the lock is obtained, and
+/// `false` if there was an existing incompatible lock held. A process may hold
+/// only one type of lock (shared or exclusive) on a file. When a process
+/// terminates in any way, the lock is released.
+///
+/// Assumes the file is unlocked.
+pub fn tryLock(file: File, io: Io, l: Lock) LockError!bool {
+ return io.vtable.fileTryLock(io.userdata, file, l);
+}
+
+pub const DowngradeLockError = Io.Cancelable || Io.UnexpectedError;
+
+/// Assumes the file is already locked in exclusive mode.
+/// Atomically modifies the lock to be in shared mode, without releasing it.
+pub fn downgradeLock(file: File, io: Io) LockError!void {
+ return io.vtable.fileDowngradeLock(io.userdata, file);
+}
+
+pub const RealPathError = error{
+ /// This operating system, file system, or `Io` implementation does not
+ /// support realpath operations.
+ OperationUnsupported,
+ /// The full file system path could not fit into the provided buffer, or
+ /// due to its length could not be obtained via realpath functions no
+ /// matter the buffer size provided.
+ NameTooLong,
+ FileNotFound,
+ AccessDenied,
+ PermissionDenied,
+ NotDir,
+ SymLinkLoop,
+ InputOutput,
+ FileTooBig,
+ IsDir,
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ SystemResources,
+ NoSpaceLeft,
+ FileSystem,
+ DeviceBusy,
+ SharingViolation,
+ PipeBusy,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+ PathAlreadyExists,
+ /// On Windows, antivirus software is enabled by default. It can be
+ /// disabled, but Windows Update sometimes ignores the user's preference
+ /// and re-enables it. When enabled, antivirus software on Windows
+ /// intercepts file system operations and makes them significantly slower
+ /// in addition to possibly failing with this error code.
+ AntivirusInterference,
+ /// On Windows, the volume does not contain a recognized file system. File
+ /// system drivers might not be loaded, or the volume may be corrupt.
+ UnrecognizedVolume,
+} || Io.Cancelable || Io.UnexpectedError;
+
+/// Obtains the canonicalized absolute path name corresponding to an open file
+/// handle.
+///
+/// This function has limited platform support, and using it can lead to
+/// unnecessary failures and race conditions. It is generally advisable to
+/// avoid this function entirely.
+pub fn realPath(file: File, io: Io, out_buffer: []u8) RealPathError!usize {
+ return io.vtable.fileRealPath(io.userdata, file, out_buffer);
+}
+
+test {
+ _ = Reader;
+ _ = Writer;
+ _ = Atomic;
+}
diff --git a/lib/std/fs/AtomicFile.zig b/lib/std/Io/File/Atomic.zig
index 96793aec72..340303ca39 100644
--- a/lib/std/fs/AtomicFile.zig
+++ b/lib/std/Io/File/Atomic.zig
@@ -1,10 +1,10 @@
-const AtomicFile = @This();
-const std = @import("../std.zig");
-const File = std.fs.File;
-const Dir = std.fs.Dir;
-const fs = std.fs;
+const Atomic = @This();
+
+const std = @import("../../std.zig");
+const Io = std.Io;
+const File = std.Io.File;
+const Dir = std.Io.Dir;
const assert = std.debug.assert;
-const posix = std.posix;
file_writer: File.Writer,
random_integer: u64,
@@ -18,21 +18,25 @@ pub const InitError = File.OpenError;
/// Note that the `Dir.atomicFile` API may be more handy than this lower-level function.
pub fn init(
+ io: Io,
dest_basename: []const u8,
- mode: File.Mode,
+ permissions: File.Permissions,
dir: Dir,
close_dir_on_deinit: bool,
write_buffer: []u8,
-) InitError!AtomicFile {
+) InitError!Atomic {
while (true) {
const random_integer = std.crypto.random.int(u64);
const tmp_sub_path = std.fmt.hex(random_integer);
- const file = dir.createFile(&tmp_sub_path, .{ .mode = mode, .exclusive = true }) catch |err| switch (err) {
+ const file = dir.createFile(io, &tmp_sub_path, .{
+ .permissions = permissions,
+ .exclusive = true,
+ }) catch |err| switch (err) {
error.PathAlreadyExists => continue,
else => |e| return e,
};
return .{
- .file_writer = file.writer(write_buffer),
+ .file_writer = file.writer(io, write_buffer),
.random_integer = random_integer,
.dest_basename = dest_basename,
.file_open = true,
@@ -44,51 +48,55 @@ pub fn init(
}
/// Always call deinit, even after a successful finish().
-pub fn deinit(af: *AtomicFile) void {
+pub fn deinit(af: *Atomic) void {
+ const io = af.file_writer.io;
+
if (af.file_open) {
- af.file_writer.file.close();
+ af.file_writer.file.close(io);
af.file_open = false;
}
if (af.file_exists) {
const tmp_sub_path = std.fmt.hex(af.random_integer);
- af.dir.deleteFile(&tmp_sub_path) catch {};
+ af.dir.deleteFile(io, &tmp_sub_path) catch {};
af.file_exists = false;
}
if (af.close_dir_on_deinit) {
- af.dir.close();
+ af.dir.close(io);
}
af.* = undefined;
}
-pub const FlushError = File.WriteError;
+pub const FlushError = File.Writer.Error;
-pub fn flush(af: *AtomicFile) FlushError!void {
+pub fn flush(af: *Atomic) FlushError!void {
af.file_writer.interface.flush() catch |err| switch (err) {
error.WriteFailed => return af.file_writer.err.?,
};
}
-pub const RenameIntoPlaceError = posix.RenameError;
+pub const RenameIntoPlaceError = Dir.RenameError;
/// On Windows, this function introduces a period of time where some file
/// system operations on the destination file will result in
/// `error.AccessDenied`, including rename operations (such as the one used in
/// this function).
-pub fn renameIntoPlace(af: *AtomicFile) RenameIntoPlaceError!void {
+pub fn renameIntoPlace(af: *Atomic) RenameIntoPlaceError!void {
+ const io = af.file_writer.io;
+
assert(af.file_exists);
if (af.file_open) {
- af.file_writer.file.close();
+ af.file_writer.file.close(io);
af.file_open = false;
}
const tmp_sub_path = std.fmt.hex(af.random_integer);
- try posix.renameat(af.dir.fd, &tmp_sub_path, af.dir.fd, af.dest_basename);
+ try af.dir.rename(&tmp_sub_path, af.dir, af.dest_basename, io);
af.file_exists = false;
}
pub const FinishError = FlushError || RenameIntoPlaceError;
/// Combination of `flush` followed by `renameIntoPlace`.
-pub fn finish(af: *AtomicFile) FinishError!void {
+pub fn finish(af: *Atomic) FinishError!void {
try af.flush();
try af.renameIntoPlace();
}
diff --git a/lib/std/Io/File/Reader.zig b/lib/std/Io/File/Reader.zig
new file mode 100644
index 0000000000..0c573c9ae1
--- /dev/null
+++ b/lib/std/Io/File/Reader.zig
@@ -0,0 +1,394 @@
+//! Memoizes key information about a file handle such as:
+//! * The size from calling stat, or the error that occurred therein.
+//! * The current seek position.
+//! * The error that occurred when trying to seek.
+//! * Whether reading should be done positionally or streaming.
+//! * Whether reading should be done via fd-to-fd syscalls (e.g. `sendfile`)
+//! versus plain variants (e.g. `read`).
+//!
+//! Fulfills the `Io.Reader` interface.
+const Reader = @This();
+
+const std = @import("../../std.zig");
+const Io = std.Io;
+const File = std.Io.File;
+const assert = std.debug.assert;
+
+io: Io,
+file: File,
+err: ?Error = null,
+mode: Mode = .positional,
+/// Tracks the true seek position in the file. To obtain the logical position,
+/// use `logicalPos`.
+pos: u64 = 0,
+size: ?u64 = null,
+size_err: ?SizeError = null,
+seek_err: ?SeekError = null,
+interface: Io.Reader,
+
+pub const Error = error{
+ InputOutput,
+ SystemResources,
+ IsDir,
+ BrokenPipe,
+ ConnectionResetByPeer,
+ Timeout,
+ /// In WASI, EBADF is mapped to this error because it is returned when
+ /// trying to read a directory file descriptor as if it were a file.
+ NotOpenForReading,
+ SocketUnconnected,
+ /// Non-blocking has been enabled, and reading from the file descriptor
+ /// would block.
+ WouldBlock,
+ /// In WASI, this error occurs when the file descriptor does
+ /// not hold the required rights to read from it.
+ AccessDenied,
+ /// Unable to read file due to lock. Depending on the `Io` implementation,
+ /// reading from a locked file may return this error, or may ignore the
+ /// lock.
+ LockViolation,
+} || Io.Cancelable || Io.UnexpectedError;
+
+pub const SizeError = std.os.windows.GetFileSizeError || File.StatError || error{
+ /// Occurs if, for example, the file handle is a network socket and therefore does not have a size.
+ Streaming,
+};
+
+pub const SeekError = File.SeekError || error{
+ /// Seeking fell back to reading, and reached the end before the requested seek position.
+ /// `pos` remains at the end of the file.
+ EndOfStream,
+ /// Seeking fell back to reading, which failed.
+ ReadFailed,
+};
+
+pub const Mode = enum {
+ streaming,
+ positional,
+ /// Avoid syscalls other than `read` and `readv`.
+ streaming_simple,
+ /// Avoid syscalls other than `pread` and `preadv`.
+ positional_simple,
+ /// Indicates reading cannot continue because of a seek failure.
+ failure,
+
+ pub fn toStreaming(m: @This()) @This() {
+ return switch (m) {
+ .positional, .streaming => .streaming,
+ .positional_simple, .streaming_simple => .streaming_simple,
+ .failure => .failure,
+ };
+ }
+
+ pub fn toSimple(m: @This()) @This() {
+ return switch (m) {
+ .positional, .positional_simple => .positional_simple,
+ .streaming, .streaming_simple => .streaming_simple,
+ .failure => .failure,
+ };
+ }
+};
+
+pub fn initInterface(buffer: []u8) Io.Reader {
+ return .{
+ .vtable = &.{
+ .stream = stream,
+ .discard = discard,
+ .readVec = readVec,
+ },
+ .buffer = buffer,
+ .seek = 0,
+ .end = 0,
+ };
+}
+
+pub fn init(file: File, io: Io, buffer: []u8) Reader {
+ return .{
+ .io = io,
+ .file = file,
+ .interface = initInterface(buffer),
+ };
+}
+
+pub fn initSize(file: File, io: Io, buffer: []u8, size: ?u64) Reader {
+ return .{
+ .io = io,
+ .file = file,
+ .interface = initInterface(buffer),
+ .size = size,
+ };
+}
+
+/// Positional is more threadsafe, since the global seek position is not
+/// affected, but when such syscalls are not available, preemptively
+/// initializing in streaming mode skips a failed syscall.
+pub fn initStreaming(file: File, io: Io, buffer: []u8) Reader {
+ return .{
+ .io = io,
+ .file = file,
+ .interface = Reader.initInterface(buffer),
+ .mode = .streaming,
+ .seek_err = error.Unseekable,
+ .size_err = error.Streaming,
+ };
+}
+
+pub fn getSize(r: *Reader) SizeError!u64 {
+ return r.size orelse {
+ if (r.size_err) |err| return err;
+ if (r.file.stat(r.io)) |st| {
+ if (st.kind == .file) {
+ r.size = st.size;
+ return st.size;
+ } else {
+ r.mode = r.mode.toStreaming();
+ r.size_err = error.Streaming;
+ return error.Streaming;
+ }
+ } else |err| {
+ r.size_err = err;
+ return err;
+ }
+ };
+}
+
+pub fn seekBy(r: *Reader, offset: i64) SeekError!void {
+ const io = r.io;
+ switch (r.mode) {
+ .positional, .positional_simple => {
+ setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
+ },
+ .streaming, .streaming_simple => {
+ const seek_err = r.seek_err orelse e: {
+ if (io.vtable.fileSeekBy(io.userdata, r.file, offset)) |_| {
+ setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
+ return;
+ } else |err| {
+ r.seek_err = err;
+ break :e err;
+ }
+ };
+ var remaining = std.math.cast(u64, offset) orelse return seek_err;
+ while (remaining > 0) {
+ remaining -= discard(&r.interface, .limited64(remaining)) catch |err| {
+ r.seek_err = err;
+ return err;
+ };
+ }
+ r.interface.tossBuffered();
+ },
+ .failure => return r.seek_err.?,
+ }
+}
+
+/// Repositions logical read offset relative to the beginning of the file.
+pub fn seekTo(r: *Reader, offset: u64) SeekError!void {
+ const io = r.io;
+ switch (r.mode) {
+ .positional, .positional_simple => {
+ setLogicalPos(r, offset);
+ },
+ .streaming, .streaming_simple => {
+ const logical_pos = logicalPos(r);
+ if (offset >= logical_pos) return seekBy(r, @intCast(offset - logical_pos));
+ if (r.seek_err) |err| return err;
+ io.vtable.fileSeekTo(io.userdata, r.file, offset) catch |err| {
+ r.seek_err = err;
+ return err;
+ };
+ setLogicalPos(r, offset);
+ },
+ .failure => return r.seek_err.?,
+ }
+}
+
+pub fn logicalPos(r: *const Reader) u64 {
+ return r.pos - r.interface.bufferedLen();
+}
+
+fn setLogicalPos(r: *Reader, offset: u64) void {
+ const logical_pos = r.logicalPos();
+ if (offset < logical_pos or offset >= r.pos) {
+ r.interface.tossBuffered();
+ r.pos = offset;
+ } else r.interface.toss(@intCast(offset - logical_pos));
+}
+
+/// Number of slices to store on the stack, when trying to send as many byte
+/// vectors through the underlying read calls as possible.
+const max_buffers_len = 16;
+
+fn stream(io_reader: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize {
+ const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
+ return streamMode(r, w, limit, r.mode);
+}
+
+pub fn streamMode(r: *Reader, w: *Io.Writer, limit: Io.Limit, mode: Mode) Io.Reader.StreamError!usize {
+ switch (mode) {
+ .positional, .streaming => return w.sendFile(r, limit) catch |write_err| switch (write_err) {
+ error.Unimplemented => {
+ r.mode = r.mode.toSimple();
+ return 0;
+ },
+ else => |e| return e,
+ },
+ .positional_simple => {
+ const dest = limit.slice(try w.writableSliceGreedy(1));
+ var data: [1][]u8 = .{dest};
+ const n = try readVecPositional(r, &data);
+ w.advance(n);
+ return n;
+ },
+ .streaming_simple => {
+ const dest = limit.slice(try w.writableSliceGreedy(1));
+ var data: [1][]u8 = .{dest};
+ const n = try readVecStreaming(r, &data);
+ w.advance(n);
+ return n;
+ },
+ .failure => return error.ReadFailed,
+ }
+}
+
+fn readVec(io_reader: *Io.Reader, data: [][]u8) Io.Reader.Error!usize {
+ const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
+ switch (r.mode) {
+ .positional, .positional_simple => return readVecPositional(r, data),
+ .streaming, .streaming_simple => return readVecStreaming(r, data),
+ .failure => return error.ReadFailed,
+ }
+}
+
+fn readVecPositional(r: *Reader, data: [][]u8) Io.Reader.Error!usize {
+ const io = r.io;
+ var iovecs_buffer: [max_buffers_len][]u8 = undefined;
+ const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data);
+ const dest = iovecs_buffer[0..dest_n];
+ assert(dest[0].len > 0);
+ const n = io.vtable.fileReadPositional(io.userdata, r.file, dest, r.pos) catch |err| switch (err) {
+ error.Unseekable => {
+ r.mode = r.mode.toStreaming();
+ const pos = r.pos;
+ if (pos != 0) {
+ r.pos = 0;
+ r.seekBy(@intCast(pos)) catch {
+ r.mode = .failure;
+ return error.ReadFailed;
+ };
+ }
+ return 0;
+ },
+ else => |e| {
+ r.err = e;
+ return error.ReadFailed;
+ },
+ };
+ if (n == 0) {
+ r.size = r.pos;
+ return error.EndOfStream;
+ }
+ r.pos += n;
+ if (n > data_size) {
+ r.interface.end += n - data_size;
+ return data_size;
+ }
+ return n;
+}
+
+fn readVecStreaming(r: *Reader, data: [][]u8) Io.Reader.Error!usize {
+ const io = r.io;
+ var iovecs_buffer: [max_buffers_len][]u8 = undefined;
+ const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data);
+ const dest = iovecs_buffer[0..dest_n];
+ assert(dest[0].len > 0);
+ const n = io.vtable.fileReadStreaming(io.userdata, r.file, dest) catch |err| {
+ r.err = err;
+ return error.ReadFailed;
+ };
+ if (n == 0) {
+ r.size = r.pos;
+ return error.EndOfStream;
+ }
+ r.pos += n;
+ if (n > data_size) {
+ r.interface.end += n - data_size;
+ return data_size;
+ }
+ return n;
+}
+
+fn discard(io_reader: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize {
+ const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
+ const io = r.io;
+ const file = r.file;
+ switch (r.mode) {
+ .positional, .positional_simple => {
+ const size = r.getSize() catch {
+ r.mode = r.mode.toStreaming();
+ return 0;
+ };
+ const logical_pos = logicalPos(r);
+ const delta = @min(@intFromEnum(limit), size - logical_pos);
+ setLogicalPos(r, logical_pos + delta);
+ return delta;
+ },
+ .streaming, .streaming_simple => {
+ // Unfortunately we can't seek forward without knowing the
+ // size because the seek syscalls provided to us will not
+ // return the true end position if a seek would exceed the
+ // end.
+ fallback: {
+ if (r.size_err == null and r.seek_err == null) break :fallback;
+
+ const buffered_len = r.interface.bufferedLen();
+ var remaining = @intFromEnum(limit);
+ if (remaining <= buffered_len) {
+ r.interface.seek += remaining;
+ return remaining;
+ }
+ remaining -= buffered_len;
+ r.interface.seek = 0;
+ r.interface.end = 0;
+
+ var trash_buffer: [128]u8 = undefined;
+ var data: [1][]u8 = .{trash_buffer[0..@min(trash_buffer.len, remaining)]};
+ var iovecs_buffer: [max_buffers_len][]u8 = undefined;
+ const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, &data);
+ const dest = iovecs_buffer[0..dest_n];
+ assert(dest[0].len > 0);
+ const n = io.vtable.fileReadStreaming(io.userdata, file, dest) catch |err| {
+ r.err = err;
+ return error.ReadFailed;
+ };
+ if (n == 0) {
+ r.size = r.pos;
+ return error.EndOfStream;
+ }
+ r.pos += n;
+ if (n > data_size) {
+ r.interface.end += n - data_size;
+ remaining -= data_size;
+ } else {
+ remaining -= n;
+ }
+ return @intFromEnum(limit) - remaining;
+ }
+ const size = r.getSize() catch return 0;
+ const n = @min(size - r.pos, std.math.maxInt(i64), @intFromEnum(limit));
+ io.vtable.fileSeekBy(io.userdata, file, n) catch |err| {
+ r.seek_err = err;
+ return 0;
+ };
+ r.pos += n;
+ return n;
+ },
+ .failure => return error.ReadFailed,
+ }
+}
+
+/// Returns whether the stream is at the logical end.
+pub fn atEnd(r: *Reader) bool {
+ // Even if stat fails, size is set when end is encountered.
+ const size = r.size orelse return false;
+ return size - logicalPos(r) == 0;
+}
diff --git a/lib/std/Io/File/Writer.zig b/lib/std/Io/File/Writer.zig
new file mode 100644
index 0000000000..bf8c0bf289
--- /dev/null
+++ b/lib/std/Io/File/Writer.zig
@@ -0,0 +1,274 @@
+const Writer = @This();
+const builtin = @import("builtin");
+const is_windows = builtin.os.tag == .windows;
+
+const std = @import("../../std.zig");
+const Io = std.Io;
+const File = std.Io.File;
+const assert = std.debug.assert;
+
+io: Io,
+file: File,
+err: ?Error = null,
+mode: Mode = .positional,
+/// Tracks the true seek position in the file. To obtain the logical position,
+/// use `logicalPos`.
+pos: u64 = 0,
+write_file_err: ?WriteFileError = null,
+seek_err: ?SeekError = null,
+interface: Io.Writer,
+
+pub const Mode = File.Reader.Mode;
+
+pub const Error = error{
+ DiskQuota,
+ FileTooBig,
+ InputOutput,
+ NoSpaceLeft,
+ DeviceBusy,
+ /// File descriptor does not hold the required rights to write to it.
+ AccessDenied,
+ PermissionDenied,
+ /// File is an unconnected socket, or closed its read end.
+ BrokenPipe,
+ /// Insufficient kernel memory to read from in_fd.
+ SystemResources,
+ NotOpenForWriting,
+ /// The process cannot access the file because another process has locked
+ /// a portion of the file. Windows-only.
+ LockViolation,
+ /// Non-blocking has been enabled and this operation would block.
+ WouldBlock,
+ /// This error occurs when a device gets disconnected before or mid-flush
+ /// while it's being written to - errno(6): No such device or address.
+ NoDevice,
+ FileBusy,
+} || Io.Cancelable || Io.UnexpectedError;
+
+pub const WriteFileError = Error || error{
+ /// Descriptor is not valid or locked, or an mmap(2)-like operation is not available for in_fd.
+ Unimplemented,
+ /// Can happen on FreeBSD when using copy_file_range.
+ CorruptedData,
+ EndOfStream,
+ ReadFailed,
+};
+
+pub const SeekError = Io.File.SeekError;
+
+pub fn init(file: File, io: Io, buffer: []u8) Writer {
+ return .{
+ .io = io,
+ .file = file,
+ .interface = initInterface(buffer),
+ .mode = .positional,
+ };
+}
+
+/// Positional is more threadsafe, since the global seek position is not
+/// affected, but when such syscalls are not available, preemptively
+/// initializing in streaming mode will skip a failed syscall.
+pub fn initStreaming(file: File, io: Io, buffer: []u8) Writer {
+ return .{
+ .io = io,
+ .file = file,
+ .interface = initInterface(buffer),
+ .mode = .streaming,
+ };
+}
+
+/// Detects if `file` is terminal and sets the mode accordingly.
+pub fn initDetect(file: File, io: Io, buffer: []u8) Io.Cancelable!Writer {
+ return .{
+ .io = io,
+ .file = file,
+ .interface = initInterface(buffer),
+ .mode = try .detect(io, file, true, .positional),
+ };
+}
+
+pub fn initInterface(buffer: []u8) Io.Writer {
+ return .{
+ .vtable = &.{
+ .drain = drain,
+ .sendFile = sendFile,
+ },
+ .buffer = buffer,
+ };
+}
+
+pub fn moveToReader(w: *Writer) File.Reader {
+ defer w.* = undefined;
+ return .{
+ .io = w.io,
+ .file = .{ .handle = w.file.handle },
+ .mode = w.mode,
+ .pos = w.pos,
+ .interface = File.Reader.initInterface(w.interface.buffer),
+ .seek_err = w.seek_err,
+ };
+}
+
+pub fn drain(io_w: *Io.Writer, data: []const []const u8, splat: usize) Io.Writer.Error!usize {
+ const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
+ switch (w.mode) {
+ .positional, .positional_simple => return drainPositional(w, data, splat),
+ .streaming, .streaming_simple => return drainStreaming(w, data, splat),
+ .failure => return error.WriteFailed,
+ }
+}
+
+fn drainPositional(w: *Writer, data: []const []const u8, splat: usize) Io.Writer.Error!usize {
+ const io = w.io;
+ const header = w.interface.buffered();
+ const n = io.vtable.fileWritePositional(io.userdata, w.file, header, data, splat, w.pos) catch |err| switch (err) {
+ error.Unseekable => {
+ w.mode = w.mode.toStreaming();
+ const pos = w.pos;
+ if (pos != 0) {
+ w.pos = 0;
+ w.seekTo(@intCast(pos)) catch {
+ w.mode = .failure;
+ return error.WriteFailed;
+ };
+ }
+ return 0;
+ },
+ else => |e| {
+ w.err = e;
+ return error.WriteFailed;
+ },
+ };
+ w.pos += n;
+ return w.interface.consume(n);
+}
+
+fn drainStreaming(w: *Writer, data: []const []const u8, splat: usize) Io.Writer.Error!usize {
+ const io = w.io;
+ const header = w.interface.buffered();
+ const n = io.vtable.fileWriteStreaming(io.userdata, w.file, header, data, splat) catch |err| {
+ w.err = err;
+ return error.WriteFailed;
+ };
+ w.pos += n;
+ return w.interface.consume(n);
+}
+
+pub fn sendFile(io_w: *Io.Writer, file_reader: *Io.File.Reader, limit: Io.Limit) Io.Writer.FileError!usize {
+ const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
+ switch (w.mode) {
+ .positional => return sendFilePositional(w, file_reader, limit),
+ .positional_simple => return error.Unimplemented,
+ .streaming => return sendFileStreaming(w, file_reader, limit),
+ .streaming_simple => return error.Unimplemented,
+ .failure => return error.WriteFailed,
+ }
+}
+
+fn sendFilePositional(w: *Writer, file_reader: *Io.File.Reader, limit: Io.Limit) Io.Writer.FileError!usize {
+ const io = w.io;
+ const header = w.interface.buffered();
+ const n = io.vtable.fileWriteFilePositional(io.userdata, w.file, header, file_reader, limit, w.pos) catch |err| switch (err) {
+ error.Unseekable => {
+ w.mode = w.mode.toStreaming();
+ const pos = w.pos;
+ if (pos != 0) {
+ w.pos = 0;
+ w.seekTo(@intCast(pos)) catch {
+ w.mode = .failure;
+ return error.WriteFailed;
+ };
+ }
+ return 0;
+ },
+ error.Canceled => {
+ w.err = error.Canceled;
+ return error.WriteFailed;
+ },
+ error.EndOfStream => return error.EndOfStream,
+ error.Unimplemented => return error.Unimplemented,
+ error.ReadFailed => return error.ReadFailed,
+ else => |e| {
+ w.write_file_err = e;
+ return error.WriteFailed;
+ },
+ };
+ w.pos += n;
+ return w.interface.consume(n);
+}
+
+fn sendFileStreaming(w: *Writer, file_reader: *Io.File.Reader, limit: Io.Limit) Io.Writer.FileError!usize {
+ const io = w.io;
+ const header = w.interface.buffered();
+ const n = io.vtable.fileWriteFileStreaming(io.userdata, w.file, header, file_reader, limit) catch |err| switch (err) {
+ error.Canceled => {
+ w.err = error.Canceled;
+ return error.WriteFailed;
+ },
+ error.EndOfStream => return error.EndOfStream,
+ error.Unimplemented => return error.Unimplemented,
+ error.ReadFailed => return error.ReadFailed,
+ else => |e| {
+ w.write_file_err = e;
+ return error.WriteFailed;
+ },
+ };
+ w.pos += n;
+ return w.interface.consume(n);
+}
+
+pub fn seekTo(w: *Writer, offset: u64) (SeekError || Io.Writer.Error)!void {
+ try w.interface.flush();
+ try seekToUnbuffered(w, offset);
+}
+
+pub fn logicalPos(w: *const Writer) u64 {
+ return w.pos + w.interface.end;
+}
+
+/// Asserts that no data is currently buffered.
+pub fn seekToUnbuffered(w: *Writer, offset: u64) SeekError!void {
+ assert(w.interface.buffered().len == 0);
+ const io = w.io;
+ switch (w.mode) {
+ .positional, .positional_simple => {
+ w.pos = offset;
+ },
+ .streaming, .streaming_simple => {
+ if (w.seek_err) |err| return err;
+ io.vtable.fileSeekTo(io.userdata, w.file, offset) catch |err| {
+ w.seek_err = err;
+ return err;
+ };
+ w.pos = offset;
+ },
+ .failure => return w.seek_err.?,
+ }
+}
+
+pub const EndError = File.SetLengthError || Io.Writer.Error;
+
+/// Flushes any buffered data and sets the end position of the file.
+///
+/// If not overwriting existing contents, then calling `interface.flush`
+/// directly is sufficient.
+///
+/// Flush failure is handled by setting `err` so that it can be handled
+/// along with other write failures.
+pub fn end(w: *Writer) EndError!void {
+ const io = w.io;
+ try w.interface.flush();
+ switch (w.mode) {
+ .positional,
+ .positional_simple,
+ => w.file.setLength(io, w.pos) catch |err| switch (err) {
+ error.NonResizable => return,
+ else => |e| return e,
+ },
+
+ .streaming,
+ .streaming_simple,
+ .failure,
+ => {},
+ }
+}
diff --git a/lib/std/Io/IoUring.zig b/lib/std/Io/IoUring.zig
index 5561cdebd2..81cdc24201 100644
--- a/lib/std/Io/IoUring.zig
+++ b/lib/std/Io/IoUring.zig
@@ -1093,7 +1093,7 @@ fn createFile(
.PERM => return error.PermissionDenied,
.EXIST => return error.PathAlreadyExists,
.BUSY => return error.DeviceBusy,
- .OPNOTSUPP => return error.FileLocksNotSupported,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
.AGAIN => return error.WouldBlock,
.TXTBSY => return error.FileBusy,
.NXIO => return error.NoDevice,
@@ -1201,7 +1201,7 @@ fn fileOpen(
.PERM => return error.PermissionDenied,
.EXIST => return error.PathAlreadyExists,
.BUSY => return error.DeviceBusy,
- .OPNOTSUPP => return error.FileLocksNotSupported,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
.AGAIN => return error.WouldBlock,
.TXTBSY => return error.FileBusy,
.NXIO => return error.NoDevice,
diff --git a/lib/std/Io/Kqueue.zig b/lib/std/Io/Kqueue.zig
index 5b4f71da08..26b8298cab 100644
--- a/lib/std/Io/Kqueue.zig
+++ b/lib/std/Io/Kqueue.zig
@@ -869,11 +869,11 @@ pub fn io(k: *Kqueue) Io {
.conditionWaitUncancelable = conditionWaitUncancelable,
.conditionWake = conditionWake,
- .dirMake = dirMake,
- .dirMakePath = dirMakePath,
- .dirMakeOpenPath = dirMakeOpenPath,
+ .dirCreateDir = dirCreateDir,
+ .dirCreateDirPath = dirCreateDirPath,
+ .dirCreateDirPathOpen = dirCreateDirPathOpen,
.dirStat = dirStat,
- .dirStatPath = dirStatPath,
+ .dirStatFile = dirStatFile,
.fileStat = fileStat,
.dirAccess = dirAccess,
@@ -888,7 +888,7 @@ pub fn io(k: *Kqueue) Io {
.fileReadPositional = fileReadPositional,
.fileSeekBy = fileSeekBy,
.fileSeekTo = fileSeekTo,
- .openSelfExe = openSelfExe,
+ .openExecutable = openExecutable,
.now = now,
.sleep = sleep,
@@ -1114,7 +1114,7 @@ fn conditionWake(userdata: ?*anyopaque, cond: *Io.Condition, wake: Io.Condition.
k.yield(waiting_fiber, .reschedule);
}
-fn dirMake(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.MakeError!void {
+fn dirCreateDir(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.CreateDirError!void {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
_ = k;
_ = dir;
@@ -1122,7 +1122,7 @@ fn dirMake(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode
_ = mode;
@panic("TODO");
}
-fn dirMakePath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.MakeError!void {
+fn dirCreateDirPath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.CreateDirError!void {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
_ = k;
_ = dir;
@@ -1130,7 +1130,7 @@ fn dirMakePath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.
_ = mode;
@panic("TODO");
}
-fn dirMakeOpenPath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, options: Dir.OpenOptions) Dir.MakeOpenPathError!Dir {
+fn dirCreateDirPathOpen(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, options: Dir.OpenOptions) Dir.CreateDirPathOpenError!Dir {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
_ = k;
_ = dir;
@@ -1144,7 +1144,7 @@ fn dirStat(userdata: ?*anyopaque, dir: Dir) Dir.StatError!Dir.Stat {
_ = dir;
@panic("TODO");
}
-fn dirStatPath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, options: Dir.StatPathOptions) Dir.StatPathError!File.Stat {
+fn dirStatFile(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, options: Dir.StatPathOptions) Dir.StatFileError!File.Stat {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
_ = k;
_ = dir;
@@ -1246,7 +1246,7 @@ fn fileSeekTo(userdata: ?*anyopaque, file: File, absolute_offset: u64) File.Seek
_ = absolute_offset;
@panic("TODO");
}
-fn openSelfExe(userdata: ?*anyopaque, file: File.OpenFlags) File.OpenSelfExeError!File {
+fn openExecutable(userdata: ?*anyopaque, file: File.OpenFlags) File.OpenExecutableError!File {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
_ = k;
_ = file;
diff --git a/lib/std/Io/Terminal.zig b/lib/std/Io/Terminal.zig
new file mode 100644
index 0000000000..beacc4d301
--- /dev/null
+++ b/lib/std/Io/Terminal.zig
@@ -0,0 +1,138 @@
+/// Abstraction for writing to a stream that might support terminal escape
+/// codes.
+const Terminal = @This();
+
+const builtin = @import("builtin");
+const is_windows = builtin.os.tag == .windows;
+
+const std = @import("std");
+const Io = std.Io;
+const File = std.Io.File;
+
+writer: *Io.Writer,
+mode: Mode,
+
+pub const Color = enum {
+ black,
+ red,
+ green,
+ yellow,
+ blue,
+ magenta,
+ cyan,
+ white,
+ bright_black,
+ bright_red,
+ bright_green,
+ bright_yellow,
+ bright_blue,
+ bright_magenta,
+ bright_cyan,
+ bright_white,
+ dim,
+ bold,
+ reset,
+};
+
+pub const Mode = union(enum) {
+ no_color,
+ escape_codes,
+ windows_api: WindowsApi,
+
+ pub const WindowsApi = if (!is_windows) noreturn else struct {
+ handle: File.Handle,
+ reset_attributes: u16,
+ };
+
+ /// Detect suitable TTY configuration options for the given file (commonly
+ /// stdout/stderr).
+ ///
+ /// Will attempt to enable ANSI escape code support if necessary/possible.
+ ///
+ /// * `NO_COLOR` indicates whether "NO_COLOR" environment variable is
+ /// present and non-empty.
+ /// * `CLICOLOR_FORCE` indicates whether "CLICOLOR_FORCE" environment
+ /// variable is present and non-empty.
+ pub fn detect(io: Io, file: File, NO_COLOR: bool, CLICOLOR_FORCE: bool) Io.Cancelable!Mode {
+ const force_color: ?bool = if (NO_COLOR) false else if (CLICOLOR_FORCE) true else null;
+ if (force_color == false) return .no_color;
+
+ if (file.enableAnsiEscapeCodes(io)) |_| {
+ return .escape_codes;
+ } else |err| switch (err) {
+ error.Canceled => return error.Canceled,
+ error.NotTerminalDevice, error.Unexpected => {},
+ }
+
+ if (is_windows and try file.isTty(io)) {
+ const windows = std.os.windows;
+ var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
+ if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != 0) {
+ return .{ .windows_api = .{
+ .handle = file.handle,
+ .reset_attributes = info.wAttributes,
+ } };
+ }
+ }
+ return if (force_color == true) .escape_codes else .no_color;
+ }
+};
+
+pub const SetColorError = std.os.windows.SetConsoleTextAttributeError || Io.Writer.Error;
+
+pub fn setColor(t: Terminal, color: Color) SetColorError!void {
+ switch (t.mode) {
+ .no_color => return,
+ .escape_codes => {
+ const color_string = switch (color) {
+ .black => "\x1b[30m",
+ .red => "\x1b[31m",
+ .green => "\x1b[32m",
+ .yellow => "\x1b[33m",
+ .blue => "\x1b[34m",
+ .magenta => "\x1b[35m",
+ .cyan => "\x1b[36m",
+ .white => "\x1b[37m",
+ .bright_black => "\x1b[90m",
+ .bright_red => "\x1b[91m",
+ .bright_green => "\x1b[92m",
+ .bright_yellow => "\x1b[93m",
+ .bright_blue => "\x1b[94m",
+ .bright_magenta => "\x1b[95m",
+ .bright_cyan => "\x1b[96m",
+ .bright_white => "\x1b[97m",
+ .bold => "\x1b[1m",
+ .dim => "\x1b[2m",
+ .reset => "\x1b[0m",
+ };
+ try t.writer.writeAll(color_string);
+ },
+ .windows_api => |wa| {
+ const windows = std.os.windows;
+ const attributes: windows.WORD = switch (color) {
+ .black => 0,
+ .red => windows.FOREGROUND_RED,
+ .green => windows.FOREGROUND_GREEN,
+ .yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN,
+ .blue => windows.FOREGROUND_BLUE,
+ .magenta => windows.FOREGROUND_RED | windows.FOREGROUND_BLUE,
+ .cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE,
+ .white => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE,
+ .bright_black => windows.FOREGROUND_INTENSITY,
+ .bright_red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY,
+ .bright_green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
+ .bright_yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
+ .bright_blue => windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
+ .bright_magenta => windows.FOREGROUND_RED | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
+ .bright_cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
+ .bright_white, .bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
+ // "dim" is not supported using basic character attributes, but let's still make it do *something*.
+ // This matches the old behavior of TTY.Color before the bright variants were added.
+ .dim => windows.FOREGROUND_INTENSITY,
+ .reset => wa.reset_attributes,
+ };
+ try t.writer.flush();
+ try windows.SetConsoleTextAttribute(wa.handle, attributes);
+ },
+ }
+}
diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig
index 2a38fdf543..d6d44aa775 100644
--- a/lib/std/Io/Threaded.zig
+++ b/lib/std/Io/Threaded.zig
@@ -3,19 +3,22 @@ const Threaded = @This();
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const is_windows = native_os == .windows;
-const windows = std.os.windows;
-const ws2_32 = std.os.windows.ws2_32;
+const is_darwin = native_os.isDarwin();
const is_debug = builtin.mode == .Debug;
const std = @import("../std.zig");
const Io = std.Io;
const net = std.Io.net;
+const File = std.Io.File;
+const Dir = std.Io.Dir;
const HostName = std.Io.net.HostName;
const IpAddress = std.Io.net.IpAddress;
const Allocator = std.mem.Allocator;
const Alignment = std.mem.Alignment;
const assert = std.debug.assert;
const posix = std.posix;
+const windows = std.os.windows;
+const ws2_32 = std.os.windows.ws2_32;
/// Thread-safe.
allocator: Allocator,
@@ -26,23 +29,7 @@ join_requested: bool = false,
stack_size: usize,
/// All threads are spawned detached; this is how we wait until they all exit.
wait_group: std.Thread.WaitGroup = .{},
-/// Maximum thread pool size (excluding main thread) when dispatching async
-/// tasks. Until this limit, calls to `Io.async` when all threads are busy will
-/// cause a new thread to be spawned and permanently added to the pool. After
-/// this limit, calls to `Io.async` when all threads are busy run the task
-/// immediately.
-///
-/// Defaults to a number equal to logical CPU cores.
-///
-/// Protected by `mutex` once the I/O instance is already in use. See
-/// `setAsyncLimit`.
async_limit: Io.Limit,
-/// Maximum thread pool size (excluding main thread) for dispatching concurrent
-/// tasks. Until this limit, calls to `Io.concurrent` will increase the thread
-/// pool size.
-///
-/// concurrent tasks. After this number, calls to `Io.concurrent` return
-/// `error.ConcurrencyUnavailable`.
concurrent_limit: Io.Limit = .unlimited,
/// Error from calling `std.Thread.getCpuCount` in `init`.
cpu_count_error: ?std.Thread.CpuCountError,
@@ -52,17 +39,7 @@ cpu_count_error: ?std.Thread.CpuCountError,
busy_count: usize = 0,
main_thread: Thread,
pid: Pid = .unknown,
-/// When a cancel request is made, blocking syscalls can be unblocked by
-/// issuing a signal. However, if the signal arrives after the check and before
-/// the syscall instruction, it is missed.
-///
-/// This option solves the race condition by retrying the signal delivery
-/// until it is acknowledged, with an exponential backoff.
-///
-/// Unfortunately, trying again until the cancellation request is acknowledged
-/// has been observed to be relatively slow, and usually strong cancellation
-/// guarantees are not needed, so this defaults to off.
-robust_cancel: RobustCancel = .disabled,
+robust_cancel: RobustCancel,
wsa: if (is_windows) Wsa else struct {} = .{},
@@ -70,6 +47,64 @@ have_signal_handler: bool,
old_sig_io: if (have_sig_io) posix.Sigaction else void,
old_sig_pipe: if (have_sig_pipe) posix.Sigaction else void,
+use_sendfile: UseSendfile = .default,
+use_copy_file_range: UseCopyFileRange = .default,
+use_fcopyfile: UseFcopyfile = .default,
+use_fchmodat2: UseFchmodat2 = .default,
+
+stderr_writer: File.Writer = .{
+ .io = undefined,
+ .interface = Io.File.Writer.initInterface(&.{}),
+ .file = if (is_windows) undefined else .stderr(),
+ .mode = .streaming,
+},
+stderr_mode: Io.Terminal.Mode = .no_color,
+stderr_writer_initialized: bool = false,
+
+argv0: Argv0,
+environ: Environ,
+
+pub const Argv0 = switch (native_os) {
+ .openbsd, .haiku => struct {
+ value: ?[*:0]const u8 = null,
+ },
+ else => struct {},
+};
+
+pub const Environ = struct {
+ /// Unmodified data directly from the OS.
+ block: Block = &.{},
+ /// Protected by `mutex`. Determines whether the other fields have been
+ /// memoized based on `block`.
+ initialized: bool = false,
+ /// Protected by `mutex`. Memoized based on `block`. Tracks whether the
+ /// environment variables are present, ignoring their value.
+ exist: Exist = .{},
+ /// Protected by `mutex`. Memoized based on `block`.
+ string: String = .{},
+ /// Protected by `mutex`. Tracks the problem, if any, that occurred when
+ /// trying to scan environment variables.
+ ///
+ /// Errors are only possible on WASI.
+ err: ?Error = null,
+
+ pub const Error = Allocator.Error || Io.UnexpectedError;
+
+ pub const Block = []const [*:0]const u8;
+
+ pub const Exist = struct {
+ NO_COLOR: bool = false,
+ CLICOLOR_FORCE: bool = false,
+ };
+
+ pub const String = switch (native_os) {
+ .openbsd, .haiku => struct {
+ PATH: ?[:0]const u8 = null,
+ },
+ else => struct {},
+ };
+};
+
pub const RobustCancel = if (std.Thread.use_pthreads or native_os == .linux) enum {
enabled,
disabled,
@@ -82,6 +117,42 @@ pub const Pid = if (native_os == .linux) enum(posix.pid_t) {
_,
} else enum(u0) { unknown = 0 };
+pub const UseSendfile = if (have_sendfile) enum {
+ enabled,
+ disabled,
+ pub const default: UseSendfile = .enabled;
+} else enum {
+ disabled,
+ pub const default: UseSendfile = .disabled;
+};
+
+pub const UseCopyFileRange = if (have_copy_file_range) enum {
+ enabled,
+ disabled,
+ pub const default: UseCopyFileRange = .enabled;
+} else enum {
+ disabled,
+ pub const default: UseCopyFileRange = .disabled;
+};
+
+pub const UseFcopyfile = if (have_fcopyfile) enum {
+ enabled,
+ disabled,
+ pub const default: UseFcopyfile = .enabled;
+} else enum {
+ disabled,
+ pub const default: UseFcopyfile = .disabled;
+};
+
+pub const UseFchmodat2 = if (have_fchmodat2 and !have_fchmodat_flags) enum {
+ enabled,
+ disabled,
+ pub const default: UseFchmodat2 = .enabled;
+} else enum {
+ disabled,
+ pub const default: UseFchmodat2 = .disabled;
+};
+
const Thread = struct {
/// The value that needs to be passed to pthread_kill or tgkill in order to
/// send a signal.
@@ -164,13 +235,6 @@ const Thread = struct {
) orelse return;
}
- fn endSyscallCanceled(thread: *Thread) Io.Cancelable {
- if (thread.current_closure) |closure| {
- @atomicStore(CancelStatus, &closure.cancel_status, .acknowledged, .release);
- }
- return error.Canceled;
- }
-
fn currentSignalId() SignaleeId {
return if (std.Thread.use_pthreads) std.c.pthread_self() else std.Thread.getCurrentId();
}
@@ -229,7 +293,7 @@ const Thread = struct {
.INTR => {}, // caller's responsibility to retry
.AGAIN => {}, // ptr.* != expect
.INVAL => {}, // possibly timeout overflow
- .TIMEDOUT => {}, // timeout
+ .TIMEDOUT => {},
.FAULT => recoverableOsBugDetected(), // ptr was invalid
else => recoverableOsBugDetected(),
}
@@ -308,18 +372,25 @@ const Thread = struct {
else => unreachable,
};
},
- else => @compileError("unimplemented: futexWait"),
+ else => if (std.Thread.use_pthreads) {
+ // TODO integrate the following function being called with robust cancelation.
+ return pthreads_futex.wait(ptr, expect, timeout_ns) catch |err| switch (err) {
+ error.Timeout => {},
+ };
+ } else {
+ @compileError("unimplemented: futexWait");
+ },
}
}
fn futexWake(ptr: *const u32, max_waiters: u32) void {
@branchHint(.cold);
+ assert(max_waiters != 0);
if (builtin.single_threaded) return; // nothing to wake up
if (builtin.cpu.arch.isWasm()) {
comptime assert(builtin.cpu.has(.wasm, .atomics));
- assert(max_waiters != 0);
const woken_count = asm volatile (
\\local.get %[ptr]
\\local.get %[waiters]
@@ -364,7 +435,6 @@ const Thread = struct {
}
},
.windows => {
- assert(max_waiters != 0);
switch (max_waiters) {
1 => windows.ntdll.RtlWakeAddressSingle(ptr),
else => windows.ntdll.RtlWakeAddressAll(ptr),
@@ -385,7 +455,11 @@ const Thread = struct {
else => unreachable, // deadlock due to operating system bug
}
},
- else => @compileError("unimplemented: futexWake"),
+ else => if (std.Thread.use_pthreads) {
+ return pthreads_futex.wake(ptr, max_waiters);
+ } else {
+ @compileError("unimplemented: futexWake");
+ },
}
}
};
@@ -505,6 +579,47 @@ const Closure = struct {
}
};
+pub const InitOptions = struct {
+ /// Affects how many bytes are memory-mapped for threads.
+ stack_size: usize = std.Thread.SpawnConfig.default_stack_size,
+ /// Maximum thread pool size (excluding main thread) when dispatching async
+ /// tasks. Until this limit, calls to `Io.async` when all threads are busy will
+ /// cause a new thread to be spawned and permanently added to the pool. After
+ /// this limit, calls to `Io.async` when all threads are busy run the task
+ /// immediately.
+ ///
+ /// Defaults to a number equal to logical CPU cores.
+ ///
+ /// Protected by `Threaded.mutex` once the I/O instance is already in use. See
+ /// `setAsyncLimit`.
+ async_limit: ?Io.Limit = null,
+ /// Maximum thread pool size (excluding main thread) for dispatching concurrent
+ /// tasks. Until this limit, calls to `Io.concurrent` will increase the thread
+ /// pool size.
+ ///
+ /// concurrent tasks. After this number, calls to `Io.concurrent` return
+ /// `error.ConcurrencyUnavailable`.
+ concurrent_limit: Io.Limit = .unlimited,
+ /// When a cancel request is made, blocking syscalls can be unblocked by
+ /// issuing a signal. However, if the signal arrives after the check and before
+ /// the syscall instruction, it is missed.
+ ///
+ /// This option solves the race condition by retrying the signal delivery
+ /// until it is acknowledged, with an exponential backoff.
+ ///
+ /// Unfortunately, trying again until the cancellation request is acknowledged
+ /// has been observed to be relatively slow, and usually strong cancellation
+ /// guarantees are not needed, so this defaults to off.
+ robust_cancel: RobustCancel = .disabled,
+ /// Affects the following operations:
+ /// * `processExecutablePath` on OpenBSD and Haiku.
+ argv0: Argv0 = .{},
+ /// Affects the following operations:
+ /// * `fileIsTty`
+ /// * `processExecutablePath` on OpenBSD and Haiku (observes "PATH").
+ environ: Environ = .{},
+};
+
/// Related:
/// * `init_single_threaded`
pub fn init(
@@ -516,6 +631,7 @@ pub fn init(
/// If these functions are avoided, then `Allocator.failing` may be passed
/// here.
gpa: Allocator,
+ options: InitOptions,
) Threaded {
if (builtin.single_threaded) return .init_single_threaded;
@@ -523,8 +639,9 @@ pub fn init(
var t: Threaded = .{
.allocator = gpa,
- .stack_size = std.Thread.SpawnConfig.default_stack_size,
- .async_limit = if (cpu_count) |n| .limited(n - 1) else |_| .nothing,
+ .stack_size = options.stack_size,
+ .async_limit = options.async_limit orelse if (cpu_count) |n| .limited(n - 1) else |_| .nothing,
+ .concurrent_limit = options.concurrent_limit,
.cpu_count_error = if (cpu_count) |_| null else |e| e,
.old_sig_io = undefined,
.old_sig_pipe = undefined,
@@ -532,8 +649,11 @@ pub fn init(
.main_thread = .{
.signal_id = Thread.currentSignalId(),
.current_closure = null,
- .cancel_protection = undefined,
+ .cancel_protection = .unblocked,
},
+ .argv0 = options.argv0,
+ .environ = options.environ,
+ .robust_cancel = options.robust_cancel,
};
if (posix.Sigaction != void) {
@@ -570,10 +690,26 @@ pub const init_single_threaded: Threaded = .{
.main_thread = .{
.signal_id = undefined,
.current_closure = null,
- .cancel_protection = undefined,
+ .cancel_protection = .unblocked,
},
+ .robust_cancel = .disabled,
+ .argv0 = .{},
+ .environ = .{},
};
+var global_single_threaded_instance: Threaded = .init_single_threaded;
+
+/// In general, the application is responsible for choosing the `Io`
+/// implementation and library code should accept an `Io` parameter rather than
+/// accessing this declaration. Most code should avoid referencing this
+/// declaration entirely.
+///
+/// However, in some cases such as debugging, it is desirable to hardcode a
+/// reference to this `Io` implementation.
+///
+/// This instance does not support concurrency or cancelation.
+pub const global_single_threaded: *Threaded = &global_single_threaded_instance;
+
pub fn setAsyncLimit(t: *Threaded, new_limit: Io.Limit) void {
t.mutex.lock();
defer t.mutex.unlock();
@@ -607,7 +743,7 @@ fn worker(t: *Threaded) void {
var thread: Thread = .{
.signal_id = Thread.currentSignalId(),
.current_closure = null,
- .cancel_protection = undefined,
+ .cancel_protection = .unblocked,
};
Thread.current = &thread;
@@ -652,25 +788,64 @@ pub fn io(t: *Threaded) Io {
.futexWaitUncancelable = futexWaitUncancelable,
.futexWake = futexWake,
- .dirMake = dirMake,
- .dirMakePath = dirMakePath,
- .dirMakeOpenPath = dirMakeOpenPath,
+ .dirCreateDir = dirCreateDir,
+ .dirCreateDirPath = dirCreateDirPath,
+ .dirCreateDirPathOpen = dirCreateDirPathOpen,
.dirStat = dirStat,
- .dirStatPath = dirStatPath,
- .fileStat = fileStat,
+ .dirStatFile = dirStatFile,
.dirAccess = dirAccess,
.dirCreateFile = dirCreateFile,
.dirOpenFile = dirOpenFile,
.dirOpenDir = dirOpenDir,
.dirClose = dirClose,
+ .dirRead = dirRead,
+ .dirRealPath = dirRealPath,
+ .dirRealPathFile = dirRealPathFile,
+ .dirDeleteFile = dirDeleteFile,
+ .dirDeleteDir = dirDeleteDir,
+ .dirRename = dirRename,
+ .dirSymLink = dirSymLink,
+ .dirReadLink = dirReadLink,
+ .dirSetOwner = dirSetOwner,
+ .dirSetFileOwner = dirSetFileOwner,
+ .dirSetPermissions = dirSetPermissions,
+ .dirSetFilePermissions = dirSetFilePermissions,
+ .dirSetTimestamps = dirSetTimestamps,
+ .dirSetTimestampsNow = dirSetTimestampsNow,
+ .dirHardLink = dirHardLink,
+
+ .fileStat = fileStat,
+ .fileLength = fileLength,
.fileClose = fileClose,
.fileWriteStreaming = fileWriteStreaming,
.fileWritePositional = fileWritePositional,
+ .fileWriteFileStreaming = fileWriteFileStreaming,
+ .fileWriteFilePositional = fileWriteFilePositional,
.fileReadStreaming = fileReadStreaming,
.fileReadPositional = fileReadPositional,
.fileSeekBy = fileSeekBy,
.fileSeekTo = fileSeekTo,
- .openSelfExe = openSelfExe,
+ .fileSync = fileSync,
+ .fileIsTty = fileIsTty,
+ .fileEnableAnsiEscapeCodes = fileEnableAnsiEscapeCodes,
+ .fileSupportsAnsiEscapeCodes = fileSupportsAnsiEscapeCodes,
+ .fileSetLength = fileSetLength,
+ .fileSetOwner = fileSetOwner,
+ .fileSetPermissions = fileSetPermissions,
+ .fileSetTimestamps = fileSetTimestamps,
+ .fileSetTimestampsNow = fileSetTimestampsNow,
+ .fileLock = fileLock,
+ .fileTryLock = fileTryLock,
+ .fileUnlock = fileUnlock,
+ .fileDowngradeLock = fileDowngradeLock,
+ .fileRealPath = fileRealPath,
+
+ .processExecutableOpen = processExecutableOpen,
+ .processExecutablePath = processExecutablePath,
+ .lockStderr = lockStderr,
+ .tryLockStderr = tryLockStderr,
+ .unlockStderr = unlockStderr,
+ .processSetCurrentDir = processSetCurrentDir,
.now = now,
.sleep = sleep,
@@ -708,6 +883,7 @@ pub fn io(t: *Threaded) Io {
.windows => netWriteWindows,
else => netWritePosix,
},
+ .netWriteFile = netWriteFile,
.netSend = switch (native_os) {
.windows => netSendWindows,
else => netSendPosix,
@@ -748,25 +924,64 @@ pub fn ioBasic(t: *Threaded) Io {
.futexWaitUncancelable = futexWaitUncancelable,
.futexWake = futexWake,
- .dirMake = dirMake,
- .dirMakePath = dirMakePath,
- .dirMakeOpenPath = dirMakeOpenPath,
+ .dirCreateDir = dirCreateDir,
+ .dirCreateDirPath = dirCreateDirPath,
+ .dirCreateDirPathOpen = dirCreateDirPathOpen,
.dirStat = dirStat,
- .dirStatPath = dirStatPath,
- .fileStat = fileStat,
+ .dirStatFile = dirStatFile,
.dirAccess = dirAccess,
.dirCreateFile = dirCreateFile,
.dirOpenFile = dirOpenFile,
.dirOpenDir = dirOpenDir,
.dirClose = dirClose,
+ .dirRead = dirRead,
+ .dirRealPath = dirRealPath,
+ .dirRealPathFile = dirRealPathFile,
+ .dirDeleteFile = dirDeleteFile,
+ .dirDeleteDir = dirDeleteDir,
+ .dirRename = dirRename,
+ .dirSymLink = dirSymLink,
+ .dirReadLink = dirReadLink,
+ .dirSetOwner = dirSetOwner,
+ .dirSetFileOwner = dirSetFileOwner,
+ .dirSetPermissions = dirSetPermissions,
+ .dirSetFilePermissions = dirSetFilePermissions,
+ .dirSetTimestamps = dirSetTimestamps,
+ .dirSetTimestampsNow = dirSetTimestampsNow,
+ .dirHardLink = dirHardLink,
+
+ .fileStat = fileStat,
+ .fileLength = fileLength,
.fileClose = fileClose,
.fileWriteStreaming = fileWriteStreaming,
.fileWritePositional = fileWritePositional,
+ .fileWriteFileStreaming = fileWriteFileStreaming,
+ .fileWriteFilePositional = fileWriteFilePositional,
.fileReadStreaming = fileReadStreaming,
.fileReadPositional = fileReadPositional,
.fileSeekBy = fileSeekBy,
.fileSeekTo = fileSeekTo,
- .openSelfExe = openSelfExe,
+ .fileSync = fileSync,
+ .fileIsTty = fileIsTty,
+ .fileEnableAnsiEscapeCodes = fileEnableAnsiEscapeCodes,
+ .fileSupportsAnsiEscapeCodes = fileSupportsAnsiEscapeCodes,
+ .fileSetLength = fileSetLength,
+ .fileSetOwner = fileSetOwner,
+ .fileSetPermissions = fileSetPermissions,
+ .fileSetTimestamps = fileSetTimestamps,
+ .fileSetTimestampsNow = fileSetTimestampsNow,
+ .fileLock = fileLock,
+ .fileTryLock = fileTryLock,
+ .fileUnlock = fileUnlock,
+ .fileDowngradeLock = fileDowngradeLock,
+ .fileRealPath = fileRealPath,
+
+ .processExecutableOpen = processExecutableOpen,
+ .processExecutablePath = processExecutablePath,
+ .lockStderr = lockStderr,
+ .tryLockStderr = tryLockStderr,
+ .unlockStderr = unlockStderr,
+ .processSetCurrentDir = processSetCurrentDir,
.now = now,
.sleep = sleep,
@@ -780,6 +995,7 @@ pub fn ioBasic(t: *Threaded) Io {
.netClose = netCloseUnavailable,
.netRead = netReadUnavailable,
.netWrite = netWriteUnavailable,
+ .netWriteFile = netWriteFileUnavailable,
.netSend = netSendUnavailable,
.netReceive = netReceiveUnavailable,
.netInterfaceNameResolve = netInterfaceNameResolveUnavailable,
@@ -789,7 +1005,7 @@ pub fn ioBasic(t: *Threaded) Io {
};
}
-pub const socket_flags_unsupported = native_os.isDarwin() or native_os == .haiku;
+pub const socket_flags_unsupported = is_darwin or native_os == .haiku;
const have_accept4 = !socket_flags_unsupported;
const have_flock_open_flags = @hasField(posix.O, "EXLOCK");
const have_networking = native_os != .wasi;
@@ -805,12 +1021,47 @@ const have_preadv = switch (native_os) {
};
const have_sig_io = posix.SIG != void and @hasField(posix.SIG, "IO");
const have_sig_pipe = posix.SIG != void and @hasField(posix.SIG, "PIPE");
+const have_sendfile = if (builtin.link_libc) @TypeOf(std.c.sendfile) != void else native_os == .linux;
+const have_copy_file_range = switch (native_os) {
+ .linux, .freebsd => true,
+ else => false,
+};
+const have_fcopyfile = is_darwin;
+const have_fchmodat2 = native_os == .linux and
+ (builtin.os.isAtLeast(.linux, .{ .major = 6, .minor = 6, .patch = 0 }) orelse true) and
+ (builtin.abi.isAndroid() or !std.c.versionCheck(.{ .major = 2, .minor = 32, .patch = 0 }));
+const have_fchmodat_flags = native_os != .linux or
+ (!builtin.abi.isAndroid() and std.c.versionCheck(.{ .major = 2, .minor = 32, .patch = 0 }));
+
+const have_fchown = switch (native_os) {
+ .wasi, .windows => false,
+ else => true,
+};
+
+const have_fchmod = switch (native_os) {
+ .windows => false,
+ .wasi => builtin.link_libc,
+ else => true,
+};
const openat_sym = if (posix.lfs64_abi) posix.system.openat64 else posix.system.openat;
const fstat_sym = if (posix.lfs64_abi) posix.system.fstat64 else posix.system.fstat;
const fstatat_sym = if (posix.lfs64_abi) posix.system.fstatat64 else posix.system.fstatat;
const lseek_sym = if (posix.lfs64_abi) posix.system.lseek64 else posix.system.lseek;
const preadv_sym = if (posix.lfs64_abi) posix.system.preadv64 else posix.system.preadv;
+const ftruncate_sym = if (posix.lfs64_abi) posix.system.ftruncate64 else posix.system.ftruncate;
+const pwritev_sym = if (posix.lfs64_abi) posix.system.pwritev64 else posix.system.pwritev;
+const sendfile_sym = if (posix.lfs64_abi) posix.system.sendfile64 else posix.system.sendfile;
+const linux_copy_file_range_use_c = std.c.versionCheck(if (builtin.abi.isAndroid()) .{
+ .major = 34,
+ .minor = 0,
+ .patch = 0,
+} else .{
+ .major = 2,
+ .minor = 27,
+ .patch = 0,
+});
+const linux_copy_file_range_sys = if (linux_copy_file_range_use_c) std.c else std.os.linux;
/// Trailing data:
/// 1. context
@@ -1018,7 +1269,6 @@ const GroupClosure = struct {
const group = gc.group;
const group_state: *std.atomic.Value(usize) = @ptrCast(&group.state);
const event: *Io.Event = @ptrCast(&group.context);
-
current_thread.current_closure = closure;
current_thread.cancel_protection = .unblocked;
@@ -1304,6 +1554,7 @@ fn cancel(
}
fn futexWait(userdata: ?*anyopaque, ptr: *const u32, expected: u32, timeout: Io.Timeout) Io.Cancelable!void {
+ if (builtin.single_threaded) unreachable; // Deadlock.
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const t_io = ioBasic(t);
@@ -1311,37 +1562,30 @@ fn futexWait(userdata: ?*anyopaque, ptr: *const u32, expected: u32, timeout: Io.
const d = (timeout.toDurationFromNow(t_io) catch break :ns 10) orelse break :ns null;
break :ns std.math.lossyCast(u64, d.raw.toNanoseconds());
};
- switch (native_os) {
- .illumos, .netbsd, .openbsd => @panic("TODO"),
- else => try current_thread.futexWaitTimed(ptr, expected, timeout_ns),
- }
+ return Thread.futexWaitTimed(current_thread, ptr, expected, timeout_ns);
}
fn futexWaitUncancelable(userdata: ?*anyopaque, ptr: *const u32, expected: u32) void {
+ if (builtin.single_threaded) unreachable; // Deadlock.
const t: *Threaded = @ptrCast(@alignCast(userdata));
_ = t;
- switch (native_os) {
- .illumos, .netbsd, .openbsd => @panic("TODO"),
- else => Thread.futexWaitUncancelable(ptr, expected),
- }
+ Thread.futexWaitUncancelable(ptr, expected);
}
fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void {
+ if (builtin.single_threaded) unreachable; // Nothing to wake up.
const t: *Threaded = @ptrCast(@alignCast(userdata));
_ = t;
- switch (native_os) {
- .illumos, .netbsd, .openbsd => @panic("TODO"),
- else => Thread.futexWake(ptr, max_waiters),
- }
+ Thread.futexWake(ptr, max_waiters);
}
-const dirMake = switch (native_os) {
- .windows => dirMakeWindows,
- .wasi => dirMakeWasi,
- else => dirMakePosix,
+const dirCreateDir = switch (native_os) {
+ .windows => dirCreateDirWindows,
+ .wasi => dirCreateDirWasi,
+ else => dirCreateDirPosix,
};
-fn dirMakePosix(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: Io.Dir.Mode) Io.Dir.MakeError!void {
+fn dirCreateDirPosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.CreateDirError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -1350,7 +1594,7 @@ fn dirMakePosix(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode:
try current_thread.beginSyscall();
while (true) {
- switch (posix.errno(posix.system.mkdirat(dir.handle, sub_path_posix, mode))) {
+ switch (posix.errno(posix.system.mkdirat(dir.handle, sub_path_posix, permissions.toMode()))) {
.SUCCESS => {
current_thread.endSyscall();
return;
@@ -1359,7 +1603,6 @@ fn dirMakePosix(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode:
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1387,8 +1630,8 @@ fn dirMakePosix(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode:
}
}
-fn dirMakeWasi(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: Io.Dir.Mode) Io.Dir.MakeError!void {
- if (builtin.link_libc) return dirMakePosix(userdata, dir, sub_path, mode);
+fn dirCreateDirWasi(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.CreateDirError!void {
+ if (builtin.link_libc) return dirCreateDirPosix(userdata, dir, sub_path, permissions);
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
try current_thread.beginSyscall();
@@ -1402,7 +1645,6 @@ fn dirMakeWasi(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: I
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1429,13 +1671,13 @@ fn dirMakeWasi(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: I
}
}
-fn dirMakeWindows(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: Io.Dir.Mode) Io.Dir.MakeError!void {
+fn dirCreateDirWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.CreateDirError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
try current_thread.checkCancel();
const sub_path_w = try windows.sliceToPrefixedFileW(dir.handle, sub_path);
- _ = mode;
+ _ = permissions; // TODO use this value
const sub_dir_handle = windows.OpenFile(sub_path_w.span(), .{
.dir = dir.handle,
.access_mask = .{
@@ -1455,62 +1697,75 @@ fn dirMakeWindows(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode
windows.CloseHandle(sub_dir_handle);
}
-const dirMakePath = switch (native_os) {
- .windows => dirMakePathWindows,
- else => dirMakePathPosix,
-};
-
-fn dirMakePathPosix(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: Io.Dir.Mode) Io.Dir.MakeError!void {
+fn dirCreateDirPath(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ sub_path: []const u8,
+ permissions: Dir.Permissions,
+) Dir.CreateDirPathError!Dir.CreatePathStatus {
const t: *Threaded = @ptrCast(@alignCast(userdata));
- _ = t;
- _ = dir;
- _ = sub_path;
- _ = mode;
- @panic("TODO implement dirMakePathPosix");
-}
-fn dirMakePathWindows(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: Io.Dir.Mode) Io.Dir.MakeError!void {
- const t: *Threaded = @ptrCast(@alignCast(userdata));
- _ = t;
- _ = dir;
- _ = sub_path;
- _ = mode;
- @panic("TODO implement dirMakePathWindows");
+ var it = std.fs.path.componentIterator(sub_path);
+ var status: Dir.CreatePathStatus = .existed;
+ var component = it.last() orelse return error.BadPathName;
+ while (true) {
+ if (dirCreateDir(t, dir, component.path, permissions)) |_| {
+ status = .created;
+ } else |err| switch (err) {
+ error.PathAlreadyExists => {
+ // stat the file and return an error if it's not a directory
+ // this is important because otherwise a dangling symlink
+ // could cause an infinite loop
+ const fstat = try dirStatFile(t, dir, component.path, .{});
+ if (fstat.kind != .directory) return error.NotDir;
+ },
+ error.FileNotFound => |e| {
+ component = it.previous() orelse return e;
+ continue;
+ },
+ else => |e| return e,
+ }
+ component = it.next() orelse return status;
+ }
}
-const dirMakeOpenPath = switch (native_os) {
- .windows => dirMakeOpenPathWindows,
- .wasi => dirMakeOpenPathWasi,
- else => dirMakeOpenPathPosix,
+const dirCreateDirPathOpen = switch (native_os) {
+ .windows => dirCreateDirPathOpenWindows,
+ .wasi => dirCreateDirPathOpenWasi,
+ else => dirCreateDirPathOpenPosix,
};
-fn dirMakeOpenPathPosix(
+fn dirCreateDirPathOpenPosix(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.OpenOptions,
-) Io.Dir.MakeOpenPathError!Io.Dir {
+ permissions: Dir.Permissions,
+ options: Dir.OpenOptions,
+) Dir.CreateDirPathOpenError!Dir {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const t_io = ioBasic(t);
return dirOpenDirPosix(t, dir, sub_path, options) catch |err| switch (err) {
error.FileNotFound => {
- try dir.makePath(t_io, sub_path);
+ _ = try dir.createDirPathStatus(t_io, sub_path, permissions);
return dirOpenDirPosix(t, dir, sub_path, options);
},
else => |e| return e,
};
}
-fn dirMakeOpenPathWindows(
+fn dirCreateDirPathOpenWindows(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.OpenOptions,
-) Io.Dir.MakeOpenPathError!Io.Dir {
+ permissions: Dir.Permissions,
+ options: Dir.OpenOptions,
+) Dir.CreateDirPathOpenError!Dir {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const w = windows;
+ _ = permissions; // TODO apply these permissions
+
var it = std.fs.path.componentIterator(sub_path);
// If there are no components in the path, then create a dummy component with the full path.
var component: std.fs.path.NativeComponentIterator.Component = it.last() orelse .{
@@ -1526,7 +1781,7 @@ fn dirMakeOpenPathWindows(
const is_last = it.peekNext() == null;
const create_disposition: w.FILE.CREATE_DISPOSITION = if (is_last) .OPEN_IF else .CREATE;
- var result: Io.Dir = .{ .handle = undefined };
+ var result: Dir = .{ .handle = undefined };
const path_len_bytes: u16 = @intCast(sub_path_w.len * 2);
var nt_name: w.UNICODE_STRING = .{
@@ -1584,16 +1839,10 @@ fn dirMakeOpenPathWindows(
// stat the file and return an error if it's not a directory
// this is important because otherwise a dangling symlink
// could cause an infinite loop
- check_dir: {
- // workaround for windows, see https://github.com/ziglang/zig/issues/16738
- const fstat = dirStatPathWindows(t, dir, component.path, .{
- .follow_symlinks = options.follow_symlinks,
- }) catch |stat_err| switch (stat_err) {
- error.IsDir => break :check_dir,
- else => |e| return e,
- };
- if (fstat.kind != .directory) return error.NotDir;
- }
+ const fstat = try dirStatFileWindows(t, dir, component.path, .{
+ .follow_symlinks = options.follow_symlinks,
+ });
+ if (fstat.kind != .directory) return error.NotDir;
component = it.next().?;
continue;
@@ -1616,46 +1865,51 @@ fn dirMakeOpenPathWindows(
}
}
-fn dirMakeOpenPathWasi(
+fn dirCreateDirPathOpenWasi(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.OpenOptions,
-) Io.Dir.MakeOpenPathError!Io.Dir {
+ permissions: Dir.Permissions,
+ options: Dir.OpenOptions,
+) Dir.CreateDirPathOpenError!Dir {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const t_io = ioBasic(t);
return dirOpenDirWasi(t, dir, sub_path, options) catch |err| switch (err) {
error.FileNotFound => {
- try dir.makePath(t_io, sub_path);
+ _ = try dir.createDirPathStatus(t_io, sub_path, permissions);
return dirOpenDirWasi(t, dir, sub_path, options);
},
else => |e| return e,
};
}
-fn dirStat(userdata: ?*anyopaque, dir: Io.Dir) Io.Dir.StatError!Io.Dir.Stat {
+fn dirStat(userdata: ?*anyopaque, dir: Dir) Dir.StatError!Dir.Stat {
const t: *Threaded = @ptrCast(@alignCast(userdata));
- _ = t;
- _ = dir;
- @panic("TODO implement dirStat");
+ const file: File = .{ .handle = dir.handle };
+ return fileStat(t, file);
}
-const dirStatPath = switch (native_os) {
- .linux => dirStatPathLinux,
- .windows => dirStatPathWindows,
- .wasi => dirStatPathWasi,
- else => dirStatPathPosix,
+const dirStatFile = switch (native_os) {
+ .linux => dirStatFileLinux,
+ .windows => dirStatFileWindows,
+ .wasi => dirStatFileWasi,
+ else => dirStatFilePosix,
};
-fn dirStatPathLinux(
+fn dirStatFileLinux(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.StatPathOptions,
-) Io.Dir.StatPathError!Io.File.Stat {
+ options: Dir.StatFileOptions,
+) Dir.StatFileError!File.Stat {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const linux = std.os.linux;
+ const use_c = std.c.versionCheck(if (builtin.abi.isAndroid())
+ .{ .major = 30, .minor = 0, .patch = 0 }
+ else
+ .{ .major = 2, .minor = 28, .patch = 0 });
+ const sys = if (use_c) std.c else std.os.linux;
var path_buffer: [posix.PATH_MAX]u8 = undefined;
const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
@@ -1666,30 +1920,15 @@ fn dirStatPathLinux(
try current_thread.beginSyscall();
while (true) {
var statx = std.mem.zeroes(linux.Statx);
- const rc = linux.statx(
- dir.handle,
- sub_path_posix,
- flags,
- .{ .TYPE = true, .MODE = true, .ATIME = true, .MTIME = true, .CTIME = true, .INO = true, .SIZE = true },
- &statx,
- );
- switch (linux.errno(rc)) {
+ switch (sys.errno(sys.statx(dir.handle, sub_path_posix, flags, linux_statx_mask, &statx))) {
.SUCCESS => {
current_thread.endSyscall();
- assert(statx.mask.TYPE);
- assert(statx.mask.MODE);
- assert(statx.mask.ATIME);
- assert(statx.mask.MTIME);
- assert(statx.mask.CTIME);
- assert(statx.mask.INO);
- assert(statx.mask.SIZE);
return statFromLinux(&statx);
},
.INTR => {
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1709,12 +1948,12 @@ fn dirStatPathLinux(
}
}
-fn dirStatPathPosix(
+fn dirStatFilePosix(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.StatPathOptions,
-) Io.Dir.StatPathError!Io.File.Stat {
+ options: Dir.StatFileOptions,
+) Dir.StatFileError!File.Stat {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -1723,10 +1962,14 @@ fn dirStatPathPosix(
const flags: u32 = if (!options.follow_symlinks) posix.AT.SYMLINK_NOFOLLOW else 0;
+ return posixStatFile(current_thread, dir.handle, sub_path_posix, flags);
+}
+
+fn posixStatFile(current_thread: *Thread, dir_fd: posix.fd_t, sub_path: [:0]const u8, flags: u32) Dir.StatFileError!File.Stat {
try current_thread.beginSyscall();
while (true) {
var stat = std.mem.zeroes(posix.Stat);
- switch (posix.errno(fstatat_sym(dir.handle, sub_path_posix, &stat, flags))) {
+ switch (posix.errno(fstatat_sym(dir_fd, sub_path, &stat, flags))) {
.SUCCESS => {
current_thread.endSyscall();
return statFromPosix(&stat);
@@ -1735,7 +1978,6 @@ fn dirStatPathPosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1757,12 +1999,12 @@ fn dirStatPathPosix(
}
}
-fn dirStatPathWindows(
+fn dirStatFileWindows(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.StatPathOptions,
-) Io.Dir.StatPathError!Io.File.Stat {
+ options: Dir.StatFileOptions,
+) Dir.StatFileError!File.Stat {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const file = try dirOpenFileWindows(t, dir, sub_path, .{
.follow_symlinks = options.follow_symlinks,
@@ -1771,13 +2013,13 @@ fn dirStatPathWindows(
return fileStatWindows(t, file);
}
-fn dirStatPathWasi(
+fn dirStatFileWasi(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.StatPathOptions,
-) Io.Dir.StatPathError!Io.File.Stat {
- if (builtin.link_libc) return dirStatPathPosix(userdata, dir, sub_path, options);
+ options: Dir.StatFileOptions,
+) Dir.StatFileError!File.Stat {
+ if (builtin.link_libc) return dirStatFilePosix(userdata, dir, sub_path, options);
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const wasi = std.os.wasi;
@@ -1796,7 +2038,6 @@ fn dirStatPathWasi(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1817,6 +2058,51 @@ fn dirStatPathWasi(
}
}
+fn fileLength(userdata: ?*anyopaque, file: File) File.LengthError!u64 {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+
+ if (native_os == .linux) {
+ const current_thread = Thread.getCurrent(t);
+ const linux = std.os.linux;
+
+ try current_thread.beginSyscall();
+ while (true) {
+ var statx = std.mem.zeroes(linux.Statx);
+ switch (linux.errno(linux.statx(file.handle, "", linux.AT.EMPTY_PATH, .{ .SIZE = true }, &statx))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ if (!statx.mask.SIZE) return error.Unexpected;
+ return statx.size;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => |err| return errnoBug(err),
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .LOOP => |err| return errnoBug(err),
+ .NAMETOOLONG => |err| return errnoBug(err),
+ .NOENT => |err| return errnoBug(err),
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => |err| return errnoBug(err),
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ } else if (is_windows) {
+ // TODO call NtQueryInformationFile and ask for only the size instead of "all"
+ }
+
+ const stat = try fileStat(t, file);
+ return stat.size;
+}
+
const fileStat = switch (native_os) {
.linux => fileStatLinux,
.windows => fileStatWindows,
@@ -1824,7 +2110,7 @@ const fileStat = switch (native_os) {
else => fileStatPosix,
};
-fn fileStatPosix(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File.Stat {
+fn fileStatPosix(userdata: ?*anyopaque, file: File) File.StatError!File.Stat {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -1842,7 +2128,6 @@ fn fileStatPosix(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1857,38 +2142,28 @@ fn fileStatPosix(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File
}
}
-fn fileStatLinux(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File.Stat {
+fn fileStatLinux(userdata: ?*anyopaque, file: File) File.StatError!File.Stat {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const linux = std.os.linux;
+ const use_c = std.c.versionCheck(if (builtin.abi.isAndroid())
+ .{ .major = 30, .minor = 0, .patch = 0 }
+ else
+ .{ .major = 2, .minor = 28, .patch = 0 });
+ const sys = if (use_c) std.c else std.os.linux;
try current_thread.beginSyscall();
while (true) {
var statx = std.mem.zeroes(linux.Statx);
- const rc = linux.statx(
- file.handle,
- "",
- linux.AT.EMPTY_PATH,
- .{ .TYPE = true, .MODE = true, .ATIME = true, .MTIME = true, .CTIME = true, .INO = true, .SIZE = true },
- &statx,
- );
- switch (linux.errno(rc)) {
+ switch (sys.errno(sys.statx(file.handle, "", linux.AT.EMPTY_PATH, linux_statx_mask, &statx))) {
.SUCCESS => {
current_thread.endSyscall();
- assert(statx.mask.TYPE);
- assert(statx.mask.MODE);
- assert(statx.mask.ATIME);
- assert(statx.mask.MTIME);
- assert(statx.mask.CTIME);
- assert(statx.mask.INO);
- assert(statx.mask.SIZE);
return statFromLinux(&statx);
},
.INTR => {
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1908,7 +2183,7 @@ fn fileStatLinux(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File
}
}
-fn fileStatWindows(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File.Stat {
+fn fileStatWindows(userdata: ?*anyopaque, file: File) File.StatError!File.Stat {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
try current_thread.checkCancel();
@@ -1922,14 +2197,14 @@ fn fileStatWindows(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.Fi
// size provided. This is treated as success because the type of variable-length information that this would be relevant for
// (name, volume name, etc) we don't care about.
.BUFFER_OVERFLOW => {},
- .INVALID_PARAMETER => unreachable,
+ .INVALID_PARAMETER => |err| return windows.statusBug(err),
.ACCESS_DENIED => return error.AccessDenied,
else => return windows.unexpectedStatus(rc),
}
return .{
.inode = info.InternalInformation.IndexNumber,
.size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)),
- .mode = 0,
+ .permissions = .default_file,
.kind = if (info.BasicInformation.FileAttributes.REPARSE_POINT) reparse_point: {
var tag_info: windows.FILE.ATTRIBUTE_TAG_INFO = undefined;
const tag_rc = windows.ntdll.NtQueryInformationFile(file.handle, &io_status_block, &tag_info, @sizeOf(windows.FILE.ATTRIBUTE_TAG_INFO), .AttributeTag);
@@ -1937,7 +2212,7 @@ fn fileStatWindows(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.Fi
.SUCCESS => {},
// INFO_LENGTH_MISMATCH and ACCESS_DENIED are the only documented possible errors
// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/d295752f-ce89-4b98-8553-266d37c84f0e
- .INFO_LENGTH_MISMATCH => unreachable,
+ .INFO_LENGTH_MISMATCH => |err| return windows.statusBug(err),
.ACCESS_DENIED => return error.AccessDenied,
else => return windows.unexpectedStatus(rc),
}
@@ -1951,10 +2226,11 @@ fn fileStatWindows(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.Fi
.atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
.mtime = windows.fromSysTime(info.BasicInformation.LastWriteTime),
.ctime = windows.fromSysTime(info.BasicInformation.ChangeTime),
+ .nlink = 0,
};
}
-fn fileStatWasi(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File.Stat {
+fn fileStatWasi(userdata: ?*anyopaque, file: File) File.StatError!File.Stat {
if (builtin.link_libc) return fileStatPosix(userdata, file);
const t: *Threaded = @ptrCast(@alignCast(userdata));
@@ -1972,7 +2248,6 @@ fn fileStatWasi(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File.
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -1996,10 +2271,10 @@ const dirAccess = switch (native_os) {
fn dirAccessPosix(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.AccessOptions,
-) Io.Dir.AccessError!void {
+ options: Dir.AccessOptions,
+) Dir.AccessError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -2024,7 +2299,6 @@ fn dirAccessPosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2050,10 +2324,10 @@ fn dirAccessPosix(
fn dirAccessWasi(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.AccessOptions,
-) Io.Dir.AccessError!void {
+ options: Dir.AccessOptions,
+) Dir.AccessError!void {
if (builtin.link_libc) return dirAccessPosix(userdata, dir, sub_path, options);
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -2074,7 +2348,6 @@ fn dirAccessWasi(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2123,10 +2396,10 @@ fn dirAccessWasi(
fn dirAccessWindows(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.AccessOptions,
-) Io.Dir.AccessError!void {
+ options: Dir.AccessOptions,
+) Dir.AccessError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
try current_thread.checkCancel();
@@ -2175,10 +2448,10 @@ const dirCreateFile = switch (native_os) {
fn dirCreateFilePosix(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- flags: Io.File.CreateFlags,
-) Io.File.OpenError!Io.File {
+ flags: File.CreateFlags,
+) File.OpenError!File {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -2211,7 +2484,7 @@ fn dirCreateFilePosix(
try current_thread.beginSyscall();
const fd: posix.fd_t = while (true) {
- const rc = openat_sym(dir.handle, sub_path_posix, os_flags, flags.mode);
+ const rc = openat_sym(dir.handle, sub_path_posix, os_flags, flags.permissions.toMode());
switch (posix.errno(rc)) {
.SUCCESS => {
current_thread.endSyscall();
@@ -2221,7 +2494,6 @@ fn dirCreateFilePosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2238,14 +2510,14 @@ fn dirCreateFilePosix(
.NFILE => return error.SystemFdQuotaExceeded,
.NODEV => return error.NoDevice,
.NOENT => return error.FileNotFound,
- .SRCH => return error.ProcessNotFound,
+ .SRCH => return error.FileNotFound, // Linux when accessing procfs.
.NOMEM => return error.SystemResources,
.NOSPC => return error.NoSpaceLeft,
.NOTDIR => return error.NotDir,
.PERM => return error.PermissionDenied,
.EXIST => return error.PathAlreadyExists,
.BUSY => return error.DeviceBusy,
- .OPNOTSUPP => return error.FileLocksNotSupported,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
.AGAIN => return error.WouldBlock,
.TXTBSY => return error.FileBusy,
.NXIO => return error.NoDevice,
@@ -2276,7 +2548,6 @@ fn dirCreateFilePosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2284,7 +2555,7 @@ fn dirCreateFilePosix(
.INVAL => |err| return errnoBug(err), // invalid parameters
.NOLCK => return error.SystemResources,
.AGAIN => return error.WouldBlock,
- .OPNOTSUPP => return error.FileLocksNotSupported,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
else => |err| return posix.unexpectedErrno(err),
}
},
@@ -2338,10 +2609,10 @@ fn dirCreateFilePosix(
fn dirCreateFileWindows(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- flags: Io.File.CreateFlags,
-) Io.File.OpenError!Io.File {
+ flags: File.CreateFlags,
+) File.OpenError!File {
const w = windows;
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -2367,35 +2638,42 @@ fn dirCreateFileWindows(
.OPEN_IF,
});
errdefer w.CloseHandle(handle);
+
var io_status_block: w.IO_STATUS_BLOCK = undefined;
- const range_off: w.LARGE_INTEGER = 0;
- const range_len: w.LARGE_INTEGER = 1;
const exclusive = switch (flags.lock) {
.none => return .{ .handle = handle },
.shared => false,
.exclusive => true,
};
- try w.LockFile(
+ const status = w.ntdll.NtLockFile(
handle,
null,
null,
null,
&io_status_block,
- &range_off,
- &range_len,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
null,
@intFromBool(flags.lock_nonblocking),
@intFromBool(exclusive),
);
+ switch (status) {
+ .SUCCESS => {},
+ .INSUFFICIENT_RESOURCES => return error.SystemResources,
+ .LOCK_NOT_GRANTED => return error.WouldBlock,
+ .ACCESS_VIOLATION => |err| return windows.statusBug(err), // bad io_status_block pointer
+ else => return windows.unexpectedStatus(status),
+ }
+
return .{ .handle = handle };
}
fn dirCreateFileWasi(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- flags: Io.File.CreateFlags,
-) Io.File.OpenError!Io.File {
+ flags: File.CreateFlags,
+) File.OpenError!File {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const wasi = std.os.wasi;
@@ -2436,7 +2714,6 @@ fn dirCreateFileWasi(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2476,10 +2753,10 @@ const dirOpenFile = switch (native_os) {
fn dirOpenFilePosix(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- flags: Io.File.OpenFlags,
-) Io.File.OpenError!Io.File {
+ flags: File.OpenFlags,
+) File.OpenError!File {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -2490,6 +2767,7 @@ fn dirOpenFilePosix(
.wasi => .{
.read = flags.mode != .write_only,
.write = flags.mode != .read_only,
+ .NOFOLLOW = !flags.follow_symlinks,
},
else => .{
.ACCMODE = switch (flags.mode) {
@@ -2497,11 +2775,13 @@ fn dirOpenFilePosix(
.write_only => .WRONLY,
.read_write => .RDWR,
},
+ .NOFOLLOW = !flags.follow_symlinks,
},
};
if (@hasField(posix.O, "CLOEXEC")) os_flags.CLOEXEC = true;
if (@hasField(posix.O, "LARGEFILE")) os_flags.LARGEFILE = true;
if (@hasField(posix.O, "NOCTTY")) os_flags.NOCTTY = !flags.allow_ctty;
+ if (@hasField(posix.O, "PATH") and flags.path_only) os_flags.PATH = true;
// Use the O locking flags if the os supports them to acquire the lock
// atomically. Note that the NONBLOCK flag is removed after the openat()
@@ -2530,7 +2810,6 @@ fn dirOpenFilePosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2547,14 +2826,14 @@ fn dirOpenFilePosix(
.NFILE => return error.SystemFdQuotaExceeded,
.NODEV => return error.NoDevice,
.NOENT => return error.FileNotFound,
- .SRCH => return error.ProcessNotFound,
+ .SRCH => return error.FileNotFound, // Linux when opening procfs files.
.NOMEM => return error.SystemResources,
.NOSPC => return error.NoSpaceLeft,
.NOTDIR => return error.NotDir,
.PERM => return error.PermissionDenied,
.EXIST => return error.PathAlreadyExists,
.BUSY => return error.DeviceBusy,
- .OPNOTSUPP => return error.FileLocksNotSupported,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
.AGAIN => return error.WouldBlock,
.TXTBSY => return error.FileBusy,
.NXIO => return error.NoDevice,
@@ -2566,6 +2845,18 @@ fn dirOpenFilePosix(
};
errdefer posix.close(fd);
+ if (!flags.allow_directory) {
+ const is_dir = is_dir: {
+ const stat = fileStat(t, .{ .handle = fd }) catch |err| switch (err) {
+ // The directory-ness is either unknown or unknowable
+ error.Streaming => break :is_dir false,
+ else => |e| return e,
+ };
+ break :is_dir stat.kind == .directory;
+ };
+ if (is_dir) return error.IsDir;
+ }
+
if (have_flock and !have_flock_open_flags and flags.lock != .none) {
const lock_nonblocking: i32 = if (flags.lock_nonblocking) posix.LOCK.NB else 0;
const lock_flags = switch (flags.lock) {
@@ -2584,7 +2875,6 @@ fn dirOpenFilePosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2592,7 +2882,7 @@ fn dirOpenFilePosix(
.INVAL => |err| return errnoBug(err), // invalid parameters
.NOLCK => return error.SystemResources,
.AGAIN => return error.WouldBlock,
- .OPNOTSUPP => return error.FileLocksNotSupported,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
else => |err| return posix.unexpectedErrno(err),
}
},
@@ -2613,7 +2903,6 @@ fn dirOpenFilePosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |err| {
current_thread.endSyscall();
return posix.unexpectedErrno(err);
@@ -2634,7 +2923,6 @@ fn dirOpenFilePosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |err| {
current_thread.endSyscall();
return posix.unexpectedErrno(err);
@@ -2648,10 +2936,10 @@ fn dirOpenFilePosix(
fn dirOpenFileWindows(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- flags: Io.File.OpenFlags,
-) Io.File.OpenError!Io.File {
+ flags: File.OpenFlags,
+) File.OpenError!File {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const sub_path_w_array = try windows.sliceToPrefixedFileW(dir.handle, sub_path);
const sub_path_w = sub_path_w_array.span();
@@ -2663,10 +2951,11 @@ pub fn dirOpenFileWtf16(
t: *Threaded,
dir_handle: ?windows.HANDLE,
sub_path_w: [:0]const u16,
- flags: Io.File.OpenFlags,
-) Io.File.OpenError!Io.File {
- if (std.mem.eql(u16, sub_path_w, &.{'.'})) return error.IsDir;
- if (std.mem.eql(u16, sub_path_w, &.{ '.', '.' })) return error.IsDir;
+ flags: File.OpenFlags,
+) File.OpenError!File {
+ const allow_directory = flags.allow_directory and !flags.isWrite();
+ if (!allow_directory and std.mem.eql(u16, sub_path_w, &.{'.'})) return error.IsDir;
+ if (!allow_directory and std.mem.eql(u16, sub_path_w, &.{ '.', '.' })) return error.IsDir;
const path_len_bytes = std.math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong;
const current_thread = Thread.getCurrent(t);
const w = windows;
@@ -2711,7 +3000,7 @@ pub fn dirOpenFileWtf16(
.OPEN,
.{
.IO = if (flags.follow_symlinks) .SYNCHRONOUS_NONALERT else .ASYNCHRONOUS,
- .NON_DIRECTORY_FILE = true,
+ .NON_DIRECTORY_FILE = !allow_directory,
.OPEN_REPARSE_POINT = !flags.follow_symlinks,
},
null,
@@ -2763,34 +3052,39 @@ pub fn dirOpenFileWtf16(
};
errdefer w.CloseHandle(handle);
- const range_off: w.LARGE_INTEGER = 0;
- const range_len: w.LARGE_INTEGER = 1;
const exclusive = switch (flags.lock) {
.none => return .{ .handle = handle },
.shared => false,
.exclusive => true,
};
- try w.LockFile(
+ const status = w.ntdll.NtLockFile(
handle,
null,
null,
null,
&io_status_block,
- &range_off,
- &range_len,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
null,
@intFromBool(flags.lock_nonblocking),
@intFromBool(exclusive),
);
+ switch (status) {
+ .SUCCESS => {},
+ .INSUFFICIENT_RESOURCES => return error.SystemResources,
+ .LOCK_NOT_GRANTED => return error.WouldBlock,
+ .ACCESS_VIOLATION => |err| return windows.statusBug(err), // bad io_status_block pointer
+ else => return windows.unexpectedStatus(status),
+ }
return .{ .handle = handle };
}
fn dirOpenFileWasi(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- flags: Io.File.OpenFlags,
-) Io.File.OpenError!Io.File {
+ flags: File.OpenFlags,
+) File.OpenError!File {
if (builtin.link_libc) return dirOpenFilePosix(userdata, dir, sub_path, flags);
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -2827,15 +3121,13 @@ fn dirOpenFileWasi(
while (true) {
switch (wasi.path_open(dir.handle, lookup_flags, sub_path.ptr, sub_path.len, oflags, base, inheriting, fdflags, &fd)) {
.SUCCESS => {
- errdefer posix.close(fd);
current_thread.endSyscall();
- return .{ .handle = fd };
+ break;
},
.INTR => {
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2863,6 +3155,21 @@ fn dirOpenFileWasi(
},
}
}
+ errdefer posix.close(fd);
+
+ if (!flags.allow_directory) {
+ const is_dir = is_dir: {
+ const stat = fileStat(t, .{ .handle = fd }) catch |err| switch (err) {
+ // The directory-ness is either unknown or unknowable
+ error.Streaming => break :is_dir false,
+ else => |e| return e,
+ };
+ break :is_dir stat.kind == .directory;
+ };
+ if (is_dir) return error.IsDir;
+ }
+
+ return .{ .handle = fd };
}
const dirOpenDir = switch (native_os) {
@@ -2874,10 +3181,10 @@ const dirOpenDir = switch (native_os) {
/// This function is also used for WASI when libc is linked.
fn dirOpenDirPosix(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.OpenOptions,
-) Io.Dir.OpenError!Io.Dir {
+ options: Dir.OpenOptions,
+) Dir.OpenError!Dir {
const t: *Threaded = @ptrCast(@alignCast(userdata));
if (is_windows) {
@@ -2919,7 +3226,6 @@ fn dirOpenDirPosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2948,10 +3254,10 @@ fn dirOpenDirPosix(
fn dirOpenDirHaiku(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.OpenOptions,
-) Io.Dir.OpenError!Io.Dir {
+ options: Dir.OpenOptions,
+) Dir.OpenError!Dir {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -2972,7 +3278,6 @@ fn dirOpenDirHaiku(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -2999,10 +3304,10 @@ fn dirOpenDirHaiku(
pub fn dirOpenDirWindows(
t: *Io.Threaded,
- dir: Io.Dir,
+ dir: Dir,
sub_path_w: [:0]const u16,
- options: Io.Dir.OpenOptions,
-) Io.Dir.OpenError!Io.Dir {
+ options: Dir.OpenOptions,
+) Dir.OpenError!Dir {
const current_thread = Thread.getCurrent(t);
const w = windows;
@@ -3013,7 +3318,7 @@ pub fn dirOpenDirWindows(
.Buffer = @constCast(sub_path_w.ptr),
};
var io_status_block: w.IO_STATUS_BLOCK = undefined;
- var result: Io.Dir = .{ .handle = undefined };
+ var result: Dir = .{ .handle = undefined };
try current_thread.checkCancel();
const rc = w.ntdll.NtCreateFile(
&result.handle,
@@ -3068,23 +3373,3133 @@ pub fn dirOpenDirWindows(
}
}
-const MakeOpenDirAccessMaskWOptions = struct {
- no_follow: bool,
- create_disposition: u32,
+fn dirClose(userdata: ?*anyopaque, dirs: []const Dir) void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ _ = t;
+ for (dirs) |dir| posix.close(dir.handle);
+}
+
+const dirRead = switch (native_os) {
+ .linux => dirReadLinux,
+ .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => dirReadDarwin,
+ .freebsd, .netbsd, .dragonfly, .openbsd => dirReadBsd,
+ .illumos => dirReadIllumos,
+ .haiku => dirReadHaiku,
+ .windows => dirReadWindows,
+ .wasi => dirReadWasi,
+ else => dirReadUnimplemented,
};
-fn dirClose(userdata: ?*anyopaque, dir: Io.Dir) void {
+fn dirReadLinux(userdata: ?*anyopaque, dr: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ const linux = std.os.linux;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ var buffer_index: usize = 0;
+ while (buffer.len - buffer_index != 0) {
+ if (dr.end - dr.index == 0) {
+ // Refill the buffer, unless we've already created references to
+ // buffered data.
+ if (buffer_index != 0) break;
+ if (dr.state == .reset) {
+ posixSeekTo(current_thread, dr.dir.handle, 0) catch |err| switch (err) {
+ error.Unseekable => return error.Unexpected,
+ else => |e| return e,
+ };
+ dr.state = .reading;
+ }
+ try current_thread.beginSyscall();
+ const n = while (true) {
+ const rc = linux.getdents64(dr.dir.handle, dr.buffer.ptr, dr.buffer.len);
+ switch (linux.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break rc;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // Dir is invalid or was opened without iteration ability.
+ .FAULT => |err| return errnoBug(err),
+ .NOTDIR => |err| return errnoBug(err),
+ // To be consistent across platforms, iteration
+ // ends if the directory being iterated is deleted
+ // during iteration. This matches the behavior of
+ // non-Linux, non-WASI UNIX platforms.
+ .NOENT => {
+ dr.state = .finished;
+ return 0;
+ },
+ // This can occur when reading /proc/$PID/net, or
+ // if the provided buffer is too small. Neither
+ // scenario is intended to be handled by this API.
+ .INVAL => return error.Unexpected,
+ .ACCES => return error.AccessDenied, // Lacking permission to iterate this directory.
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ };
+ if (n == 0) {
+ dr.state = .finished;
+ return 0;
+ }
+ dr.index = 0;
+ dr.end = n;
+ }
+ // Linux aligns the header by padding after the null byte of the name
+ // to align the next entry. This means we can find the end of the name
+ // by looking at only the 8 bytes before the next record. However since
+ // file names are usually short it's better to keep the machine code
+ // simpler.
+ //
+ // Furthermore, I observed qemu user mode to not align this struct, so
+ // this code makes the conservative choice to not assume alignment.
+ const linux_entry: *align(1) linux.dirent64 = @ptrCast(&dr.buffer[dr.index]);
+ const next_index = dr.index + linux_entry.reclen;
+ dr.index = next_index;
+ const name_ptr: [*]u8 = &linux_entry.name;
+ const padded_name = name_ptr[0 .. linux_entry.reclen - @offsetOf(linux.dirent64, "name")];
+ const name_len = std.mem.findScalar(u8, padded_name, 0).?;
+ const name = name_ptr[0..name_len :0];
+
+ if (std.mem.eql(u8, name, ".") or std.mem.eql(u8, name, "..")) continue;
+
+ const entry_kind: File.Kind = switch (linux_entry.type) {
+ linux.DT.BLK => .block_device,
+ linux.DT.CHR => .character_device,
+ linux.DT.DIR => .directory,
+ linux.DT.FIFO => .named_pipe,
+ linux.DT.LNK => .sym_link,
+ linux.DT.REG => .file,
+ linux.DT.SOCK => .unix_domain_socket,
+ else => .unknown,
+ };
+ buffer[buffer_index] = .{
+ .name = name,
+ .kind = entry_kind,
+ .inode = linux_entry.ino,
+ };
+ buffer_index += 1;
+ }
+ return buffer_index;
+}
+
+fn dirReadDarwin(userdata: ?*anyopaque, dr: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const Header = extern struct {
+ seek: i64,
+ };
+ const header: *Header = @ptrCast(dr.buffer.ptr);
+ const header_end: usize = @sizeOf(Header);
+ if (dr.index < header_end) {
+ // Initialize header.
+ dr.index = header_end;
+ dr.end = header_end;
+ header.* = .{ .seek = 0 };
+ }
+ var buffer_index: usize = 0;
+ while (buffer.len - buffer_index != 0) {
+ if (dr.end - dr.index == 0) {
+ // Refill the buffer, unless we've already created references to
+ // buffered data.
+ if (buffer_index != 0) break;
+ if (dr.state == .reset) {
+ posixSeekTo(current_thread, dr.dir.handle, 0) catch |err| switch (err) {
+ error.Unseekable => return error.Unexpected,
+ else => |e| return e,
+ };
+ dr.state = .reading;
+ }
+ const dents_buffer = dr.buffer[header_end..];
+ try current_thread.beginSyscall();
+ const n: usize = while (true) {
+ const rc = posix.system.getdirentries(dr.dir.handle, dents_buffer.ptr, dents_buffer.len, &header.seek);
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // Dir is invalid or was opened without iteration ability.
+ .FAULT => |err| return errnoBug(err),
+ .NOTDIR => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ };
+ if (n == 0) {
+ dr.state = .finished;
+ return 0;
+ }
+ dr.index = header_end;
+ dr.end = header_end + n;
+ }
+ const darwin_entry = @as(*align(1) posix.system.dirent, @ptrCast(&dr.buffer[dr.index]));
+ const next_index = dr.index + darwin_entry.reclen;
+ dr.index = next_index;
+
+ const name = @as([*]u8, @ptrCast(&darwin_entry.name))[0..darwin_entry.namlen];
+ if (std.mem.eql(u8, name, ".") or std.mem.eql(u8, name, "..") or (darwin_entry.ino == 0))
+ continue;
+
+ const entry_kind: File.Kind = switch (darwin_entry.type) {
+ posix.DT.BLK => .block_device,
+ posix.DT.CHR => .character_device,
+ posix.DT.DIR => .directory,
+ posix.DT.FIFO => .named_pipe,
+ posix.DT.LNK => .sym_link,
+ posix.DT.REG => .file,
+ posix.DT.SOCK => .unix_domain_socket,
+ posix.DT.WHT => .whiteout,
+ else => .unknown,
+ };
+ buffer[buffer_index] = .{
+ .name = name,
+ .kind = entry_kind,
+ .inode = darwin_entry.ino,
+ };
+ buffer_index += 1;
+ }
+ return buffer_index;
+}
+
+fn dirReadBsd(userdata: ?*anyopaque, dr: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ var buffer_index: usize = 0;
+ while (buffer.len - buffer_index != 0) {
+ if (dr.end - dr.index == 0) {
+ // Refill the buffer, unless we've already created references to
+ // buffered data.
+ if (buffer_index != 0) break;
+ if (dr.state == .reset) {
+ posixSeekTo(current_thread, dr.dir.handle, 0) catch |err| switch (err) {
+ error.Unseekable => return error.Unexpected,
+ else => |e| return e,
+ };
+ dr.state = .reading;
+ }
+ try current_thread.beginSyscall();
+ const n: usize = while (true) {
+ const rc = posix.system.getdents(dr.dir.handle, dr.buffer.ptr, dr.buffer.len);
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // Dir is invalid or was opened without iteration ability
+ .FAULT => |err| return errnoBug(err),
+ .NOTDIR => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ // Introduced in freebsd 13.2: directory unlinked
+ // but still open. To be consistent, iteration ends
+ // if the directory being iterated is deleted
+ // during iteration.
+ .NOENT => {
+ dr.state = .finished;
+ return 0;
+ },
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ };
+ if (n == 0) {
+ dr.state = .finished;
+ return 0;
+ }
+ dr.index = 0;
+ dr.end = n;
+ }
+ const bsd_entry = @as(*align(1) posix.system.dirent, @ptrCast(&dr.buffer[dr.index]));
+ const next_index = dr.index +
+ if (@hasField(posix.system.dirent, "reclen")) bsd_entry.reclen else bsd_entry.reclen();
+ dr.index = next_index;
+
+ const name = @as([*]u8, @ptrCast(&bsd_entry.name))[0..bsd_entry.namlen];
+
+ const skip_zero_fileno = switch (native_os) {
+ // fileno=0 is used to mark invalid entries or deleted files.
+ .openbsd, .netbsd => true,
+ else => false,
+ };
+ if (std.mem.eql(u8, name, ".") or std.mem.eql(u8, name, "..") or
+ (skip_zero_fileno and bsd_entry.fileno == 0))
+ {
+ continue;
+ }
+
+ const entry_kind: File.Kind = switch (bsd_entry.type) {
+ posix.DT.BLK => .block_device,
+ posix.DT.CHR => .character_device,
+ posix.DT.DIR => .directory,
+ posix.DT.FIFO => .named_pipe,
+ posix.DT.LNK => .sym_link,
+ posix.DT.REG => .file,
+ posix.DT.SOCK => .unix_domain_socket,
+ posix.DT.WHT => .whiteout,
+ else => .unknown,
+ };
+ buffer[buffer_index] = .{
+ .name = name,
+ .kind = entry_kind,
+ .inode = bsd_entry.fileno,
+ };
+ buffer_index += 1;
+ }
+ return buffer_index;
+}
+
+fn dirReadIllumos(userdata: ?*anyopaque, dr: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ var buffer_index: usize = 0;
+ while (buffer.len - buffer_index != 0) {
+ if (dr.end - dr.index == 0) {
+ // Refill the buffer, unless we've already created references to
+ // buffered data.
+ if (buffer_index != 0) break;
+ if (dr.state == .reset) {
+ posixSeekTo(current_thread, dr.dir.handle, 0) catch |err| switch (err) {
+ error.Unseekable => return error.Unexpected,
+ else => |e| return e,
+ };
+ dr.state = .reading;
+ }
+ try current_thread.beginSyscall();
+ const n: usize = while (true) {
+ const rc = posix.system.getdents(dr.dir.handle, dr.buffer.ptr, dr.buffer.len);
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break rc;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // Dir is invalid or was opened without iteration ability
+ .FAULT => |err| return errnoBug(err),
+ .NOTDIR => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ };
+ if (n == 0) {
+ dr.state = .finished;
+ return 0;
+ }
+ dr.index = 0;
+ dr.end = n;
+ }
+ const entry = @as(*align(1) posix.system.dirent, @ptrCast(&dr.buffer[dr.index]));
+ const next_index = dr.index + entry.reclen;
+ dr.index = next_index;
+
+ const name = std.mem.sliceTo(@as([*:0]u8, @ptrCast(&entry.name)), 0);
+ if (std.mem.eql(u8, name, ".") or std.mem.eql(u8, name, "..")) continue;
+
+ // illumos dirent doesn't expose type, so we have to call stat to get it.
+ const stat = try posixStatFile(current_thread, dr.dir.handle, name, posix.AT.SYMLINK_NOFOLLOW);
+
+ buffer[buffer_index] = .{
+ .name = name,
+ .kind = stat.kind,
+ .inode = entry.ino,
+ };
+ buffer_index += 1;
+ }
+ return buffer_index;
+}
+
+fn dirReadHaiku(userdata: ?*anyopaque, dr: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ _ = userdata;
+ _ = dr;
+ _ = buffer;
+ @panic("TODO implement dirReadHaiku");
+}
+
+fn dirReadWindows(userdata: ?*anyopaque, dr: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const w = windows;
+
+ // We want to be able to use the `dr.buffer` for both the NtQueryDirectoryFile call (which
+ // returns WTF-16 names) *and* as a buffer for storing those WTF-16 names as WTF-8 to be able
+ // to return them in `Dir.Entry.name`. However, the problem that needs to be overcome in order to do
+ // that is that each WTF-16 code unit can be encoded as a maximum of 3 WTF-8 bytes, which means
+ // that it's not guaranteed that the memory used for the WTF-16 name will be sufficient
+ // for the WTF-8 encoding of the same name (for example, € is encoded as one WTF-16 code unit,
+ // [2 bytes] but encoded in WTF-8 as 3 bytes).
+ //
+ // The approach taken here is to "reserve" enough space in the `dr.buffer` to ensure that
+ // at least one entry with the maximum possible WTF-8 name length can be stored without clobbering
+ // any entries that follow it. That is, we determine how much space is needed to allow that,
+ // and then only provide the remaining portion of `dr.buffer` to the NtQueryDirectoryFile
+ // call. The WTF-16 names can then be safely converted using the full `dr.buffer` slice, making
+ // sure that each name can only potentially overwrite the data of its own entry.
+ //
+ // The worst case, where an entry's name is both the maximum length of a component and
+ // made up entirely of code points that are encoded as one WTF-16 code unit/three WTF-8 bytes,
+ // would therefore look like the diagram below, and only one entry would be able to be returned:
+ //
+ // | reserved | remaining unreserved buffer |
+ // | entry 1 | entry 2 | ... |
+ // | wtf-8 name of entry 1 |
+ //
+ // However, in the average case we will be able to store more than one WTF-8 name at a time in the
+ // available buffer and therefore we will be able to populate more than one `Dir.Entry` at a time.
+ // That might look something like this (where name 1, name 2, etc are the converted WTF-8 names):
+ //
+ // | reserved | remaining unreserved buffer |
+ // | entry 1 | entry 2 | ... |
+ // | name 1 | name 2 | name 3 | name 4 | ... |
+ //
+ // Note: More than the minimum amount of space could be reserved to make the "worst case"
+ // less likely, but since the worst-case also requires a maximum length component to matter,
+ // it's unlikely for it to become a problem in normal scenarios even if all names on the filesystem
+ // are made up of non-ASCII characters that have the "one WTF-16 code unit <-> three WTF-8 bytes"
+ // property (e.g. code points >= U+0800 and <= U+FFFF), as it's unlikely for a significant
+ // number of components to be maximum length.
+
+ // We need `3 * NAME_MAX` bytes to store a max-length component as WTF-8 safely.
+ // Because needing to store a max-length component depends on a `FileName` *with* the maximum
+ // component length, we know that the corresponding populated `FILE_BOTH_DIR_INFORMATION` will
+ // be of size `@sizeOf(w.FILE_BOTH_DIR_INFORMATION) + 2 * NAME_MAX` bytes, so we only need to
+ // reserve enough to get us to up to having `3 * NAME_MAX` bytes available when taking into account
+ // that we have the ability to write over top of the reserved memory + the full footprint of that
+ // particular `FILE_BOTH_DIR_INFORMATION`.
+ const max_info_len = @sizeOf(w.FILE_BOTH_DIR_INFORMATION) + w.NAME_MAX * 2;
+ const info_align = @alignOf(w.FILE_BOTH_DIR_INFORMATION);
+ const reserve_needed = std.mem.alignForward(usize, Dir.max_name_bytes, info_align) - max_info_len;
+ const unreserved_start = std.mem.alignForward(usize, reserve_needed, info_align);
+ const unreserved_buffer = dr.buffer[unreserved_start..];
+ // This is enforced by `Dir.Reader`
+ assert(unreserved_buffer.len >= max_info_len);
+
+ var name_index: usize = 0;
+ var buffer_index: usize = 0;
+ while (buffer.len - buffer_index != 0) {
+ if (dr.end - dr.index == 0) {
+ // Refill the buffer, unless we've already created references to
+ // buffered data.
+ if (buffer_index != 0) break;
+
+ try current_thread.checkCancel();
+ var io_status_block: w.IO_STATUS_BLOCK = undefined;
+ const rc = w.ntdll.NtQueryDirectoryFile(
+ dr.dir.handle,
+ null,
+ null,
+ null,
+ &io_status_block,
+ unreserved_buffer.ptr,
+ std.math.lossyCast(w.ULONG, unreserved_buffer.len),
+ .BothDirectory,
+ w.FALSE,
+ null,
+ @intFromBool(dr.state == .reset),
+ );
+ dr.state = .reading;
+ if (io_status_block.Information == 0) {
+ dr.state = .finished;
+ return 0;
+ }
+ dr.index = 0;
+ dr.end = io_status_block.Information;
+ switch (rc) {
+ .SUCCESS => {},
+ .ACCESS_DENIED => return error.AccessDenied, // Double-check that the Dir was opened with iteration ability
+ else => return w.unexpectedStatus(rc),
+ }
+ }
+
+ // While the official API docs guarantee FILE_BOTH_DIR_INFORMATION to be aligned properly
+ // this may not always be the case (e.g. due to faulty VM/sandboxing tools)
+ const dir_info: *align(2) w.FILE_BOTH_DIR_INFORMATION = @ptrCast(@alignCast(&unreserved_buffer[dr.index]));
+ const backtrack_index = dr.index;
+ if (dir_info.NextEntryOffset != 0) {
+ dr.index += dir_info.NextEntryOffset;
+ } else {
+ dr.index = dr.end;
+ }
+
+ const name_wtf16le = @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2];
+
+ if (std.mem.eql(u16, name_wtf16le, &[_]u16{'.'}) or std.mem.eql(u16, name_wtf16le, &[_]u16{ '.', '.' })) {
+ continue;
+ }
+
+ // Read any relevant information from the `dir_info` now since it's possible the WTF-8
+ // name will overwrite it.
+ const kind: File.Kind = blk: {
+ const attrs = dir_info.FileAttributes;
+ if (attrs.REPARSE_POINT) break :blk .sym_link;
+ if (attrs.DIRECTORY) break :blk .directory;
+ break :blk .file;
+ };
+ const inode: File.INode = dir_info.FileIndex;
+
+ // If there's no more space for WTF-8 names without bleeding over into
+ // the remaining unprocessed entries, then backtrack and return what we have so far.
+ if (name_index + std.unicode.calcWtf8Len(name_wtf16le) > unreserved_start + dr.index) {
+ // We should always be able to fit at least one entry into the buffer no matter what
+ assert(buffer_index != 0);
+ dr.index = backtrack_index;
+ break;
+ }
+
+ const name_buf = dr.buffer[name_index..];
+ const name_wtf8_len = std.unicode.wtf16LeToWtf8(name_buf, name_wtf16le);
+ const name_wtf8 = name_buf[0..name_wtf8_len];
+ name_index += name_wtf8_len;
+
+ buffer[buffer_index] = .{
+ .name = name_wtf8,
+ .kind = kind,
+ .inode = inode,
+ };
+ buffer_index += 1;
+ }
+
+ return buffer_index;
+}
+
+fn dirReadWasi(userdata: ?*anyopaque, dr: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ // We intentinally use fd_readdir even when linked with libc, since its
+ // implementation is exactly the same as below, and we avoid the code
+ // complexity here.
+ const wasi = std.os.wasi;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const Header = extern struct {
+ cookie: u64,
+ };
+ const header: *align(@alignOf(usize)) Header = @ptrCast(dr.buffer.ptr);
+ const header_end: usize = @sizeOf(Header);
+ if (dr.index < header_end) {
+ // Initialize header.
+ dr.index = header_end;
+ dr.end = header_end;
+ header.* = .{ .cookie = wasi.DIRCOOKIE_START };
+ }
+ var buffer_index: usize = 0;
+ while (buffer.len - buffer_index != 0) {
+ // According to the WASI spec, the last entry might be truncated, so we
+ // need to check if the remaining buffer contains the whole dirent.
+ if (dr.end - dr.index < @sizeOf(wasi.dirent_t)) {
+ // Refill the buffer, unless we've already created references to
+ // buffered data.
+ if (buffer_index != 0) break;
+ if (dr.state == .reset) {
+ header.* = .{ .cookie = wasi.DIRCOOKIE_START };
+ dr.state = .reading;
+ }
+ const dents_buffer = dr.buffer[header_end..];
+ var n: usize = undefined;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (wasi.fd_readdir(dr.dir.handle, dents_buffer.ptr, dents_buffer.len, header.cookie, &n)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // Dir is invalid or was opened without iteration ability.
+ .FAULT => |err| return errnoBug(err),
+ .NOTDIR => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ // To be consistent across platforms, iteration
+ // ends if the directory being iterated is deleted
+ // during iteration. This matches the behavior of
+ // non-Linux, non-WASI UNIX platforms.
+ .NOENT => {
+ dr.state = .finished;
+ return 0;
+ },
+ .NOTCAPABLE => return error.AccessDenied,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ if (n == 0) {
+ dr.state = .finished;
+ return 0;
+ }
+ dr.index = header_end;
+ dr.end = header_end + n;
+ }
+ const entry: *align(1) wasi.dirent_t = @ptrCast(&dr.buffer[dr.index]);
+ const entry_size = @sizeOf(wasi.dirent_t);
+ const name_index = dr.index + entry_size;
+ if (name_index + entry.namlen > dr.end) {
+ // This case, the name is truncated, so we need to call readdir to store the entire name.
+ dr.end = dr.index; // Force fd_readdir in the next loop.
+ continue;
+ }
+ const name = dr.buffer[name_index..][0..entry.namlen];
+ const next_index = name_index + entry.namlen;
+ dr.index = next_index;
+ header.cookie = entry.next;
+
+ if (std.mem.eql(u8, name, ".") or std.mem.eql(u8, name, ".."))
+ continue;
+
+ const entry_kind: File.Kind = switch (entry.type) {
+ .BLOCK_DEVICE => .block_device,
+ .CHARACTER_DEVICE => .character_device,
+ .DIRECTORY => .directory,
+ .SYMBOLIC_LINK => .sym_link,
+ .REGULAR_FILE => .file,
+ .SOCKET_STREAM, .SOCKET_DGRAM => .unix_domain_socket,
+ else => .unknown,
+ };
+ buffer[buffer_index] = .{
+ .name = name,
+ .kind = entry_kind,
+ .inode = entry.ino,
+ };
+ buffer_index += 1;
+ }
+ return buffer_index;
+}
+
+fn dirReadUnimplemented(userdata: ?*anyopaque, dir_reader: *Dir.Reader, buffer: []Dir.Entry) Dir.Reader.Error!usize {
+ _ = userdata;
+ _ = dir_reader;
+ _ = buffer;
+ return error.Unimplemented;
+}
+
+const dirRealPathFile = switch (native_os) {
+ .windows => dirRealPathFileWindows,
+ else => dirRealPathFilePosix,
+};
+
+fn dirRealPathFileWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, out_buffer: []u8) Dir.RealPathFileError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ try current_thread.checkCancel();
+
+ var path_name_w = try windows.sliceToPrefixedFileW(dir.handle, sub_path);
+
+ const h_file = blk: {
+ const res = windows.OpenFile(path_name_w.span(), .{
+ .dir = dir.handle,
+ .access_mask = .{
+ .GENERIC = .{ .READ = true },
+ .STANDARD = .{ .SYNCHRONIZE = true },
+ },
+ .creation = .OPEN,
+ .filter = .any,
+ }) catch |err| switch (err) {
+ error.WouldBlock => unreachable,
+ else => |e| return e,
+ };
+ break :blk res;
+ };
+ defer windows.CloseHandle(h_file);
+ return realPathWindows(current_thread, h_file, out_buffer);
+}
+
+fn realPathWindows(current_thread: *Thread, h_file: windows.HANDLE, out_buffer: []u8) File.RealPathError!usize {
+ _ = current_thread; // TODO move GetFinalPathNameByHandle logic into std.Io.Threaded and add cancel checks
+ var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined;
+ const wide_slice = try windows.GetFinalPathNameByHandle(h_file, .{}, &wide_buf);
+
+ const len = std.unicode.calcWtf8Len(wide_slice);
+ if (len > out_buffer.len)
+ return error.NameTooLong;
+
+ return std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
+}
+
+fn dirRealPathFilePosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, out_buffer: []u8) Dir.RealPathFileError!usize {
+ if (native_os == .wasi) return error.OperationUnsupported;
+
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
+
+ if (builtin.link_libc and dir.handle == posix.AT.FDCWD) {
+ if (out_buffer.len < posix.PATH_MAX) return error.NameTooLong;
+ try current_thread.beginSyscall();
+ while (true) {
+ if (std.c.realpath(sub_path_posix, out_buffer.ptr)) |redundant_pointer| {
+ current_thread.endSyscall();
+ assert(redundant_pointer == out_buffer.ptr);
+ return std.mem.indexOfScalar(u8, out_buffer, 0) orelse out_buffer.len;
+ }
+ const err: posix.E = @enumFromInt(std.c._errno().*);
+ if (err == .INTR) {
+ try current_thread.checkCancel();
+ continue;
+ }
+ current_thread.endSyscall();
+ switch (err) {
+ .INVAL => return errnoBug(err),
+ .BADF => return errnoBug(err),
+ .FAULT => return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .NOENT => return error.FileNotFound,
+ .OPNOTSUPP => return error.OperationUnsupported,
+ .NOTDIR => return error.NotDir,
+ .NAMETOOLONG => return error.NameTooLong,
+ .LOOP => return error.SymLinkLoop,
+ .IO => return error.InputOutput,
+ else => return posix.unexpectedErrno(err),
+ }
+ }
+ }
+
+ var flags: posix.O = .{};
+ if (@hasField(posix.O, "NONBLOCK")) flags.NONBLOCK = true;
+ if (@hasField(posix.O, "CLOEXEC")) flags.CLOEXEC = true;
+ if (@hasField(posix.O, "PATH")) flags.PATH = true;
+
+ const mode: posix.mode_t = 0;
+
+ try current_thread.beginSyscall();
+ const fd: posix.fd_t = while (true) {
+ const rc = openat_sym(dir.handle, sub_path_posix, flags, mode);
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => return error.BadPathName,
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ .ACCES => return error.AccessDenied,
+ .FBIG => return error.FileTooBig,
+ .OVERFLOW => return error.FileTooBig,
+ .ISDIR => return error.IsDir,
+ .LOOP => return error.SymLinkLoop,
+ .MFILE => return error.ProcessFdQuotaExceeded,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NFILE => return error.SystemFdQuotaExceeded,
+ .NODEV => return error.NoDevice,
+ .NOENT => return error.FileNotFound,
+ .SRCH => return error.FileNotFound, // Linux when accessing procfs.
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .NOTDIR => return error.NotDir,
+ .PERM => return error.PermissionDenied,
+ .EXIST => return error.PathAlreadyExists,
+ .BUSY => return error.DeviceBusy,
+ .NXIO => return error.NoDevice,
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ };
+ defer posix.close(fd);
+ return realPathPosix(current_thread, fd, out_buffer);
+}
+
+const dirRealPath = switch (native_os) {
+ .windows => dirRealPathWindows,
+ else => dirRealPathPosix,
+};
+
+fn dirRealPathPosix(userdata: ?*anyopaque, dir: Dir, out_buffer: []u8) Dir.RealPathError!usize {
+ if (native_os == .wasi) return error.OperationUnsupported;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ return realPathPosix(current_thread, dir.handle, out_buffer);
+}
+
+fn dirRealPathWindows(userdata: ?*anyopaque, dir: Dir, out_buffer: []u8) Dir.RealPathError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ return realPathWindows(current_thread, dir.handle, out_buffer);
+}
+
+const fileRealPath = switch (native_os) {
+ .windows => fileRealPathWindows,
+ else => fileRealPathPosix,
+};
+
+fn fileRealPathWindows(userdata: ?*anyopaque, file: File, out_buffer: []u8) File.RealPathError!usize {
+ if (native_os == .wasi) return error.OperationUnsupported;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ return realPathWindows(current_thread, file.handle, out_buffer);
+}
+
+fn fileRealPathPosix(userdata: ?*anyopaque, file: File, out_buffer: []u8) File.RealPathError!usize {
+ if (native_os == .wasi) return error.OperationUnsupported;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ return realPathPosix(current_thread, file.handle, out_buffer);
+}
+
+fn realPathPosix(current_thread: *Thread, fd: posix.fd_t, out_buffer: []u8) File.RealPathError!usize {
+ switch (native_os) {
+ .netbsd, .dragonfly, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => {
+ var sufficient_buffer: [posix.PATH_MAX]u8 = undefined;
+ @memset(&sufficient_buffer, 0);
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.fcntl(fd, posix.F.GETPATH, &sufficient_buffer))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .BADF => return error.FileNotFound,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NameTooLong,
+ .RANGE => return error.NameTooLong,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ const n = std.mem.indexOfScalar(u8, &sufficient_buffer, 0) orelse sufficient_buffer.len;
+ if (n > out_buffer.len) return error.NameTooLong;
+ @memcpy(out_buffer[0..n], sufficient_buffer[0..n]);
+ return n;
+ },
+ .linux, .serenity, .illumos => {
+ var procfs_buf: ["/proc/self/path/-2147483648\x00".len]u8 = undefined;
+ const template = if (native_os == .illumos) "/proc/self/path/{d}" else "/proc/self/fd/{d}";
+ const proc_path = std.fmt.bufPrintSentinel(&procfs_buf, template, .{fd}, 0) catch unreachable;
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = posix.system.readlink(proc_path, out_buffer.ptr, out_buffer.len);
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ const len: usize = @bitCast(rc);
+ return len;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .FAULT => |err| return errnoBug(err),
+ .IO => return error.FileSystem,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => return error.NotDir,
+ .ILSEQ => |err| return errnoBug(err),
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ },
+ .freebsd => {
+ var k_file: std.c.kinfo_file = undefined;
+ k_file.structsize = std.c.KINFO_FILE_SIZE;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(std.c.fcntl(fd, std.c.F.KINFO, @intFromPtr(&k_file)))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .BADF => {
+ current_thread.endSyscall();
+ return error.FileNotFound;
+ },
+ else => |err| {
+ current_thread.endSyscall();
+ return posix.unexpectedErrno(err);
+ },
+ }
+ }
+ const len = std.mem.findScalar(u8, &k_file.path, 0) orelse k_file.path.len;
+ if (len == 0) return error.NameTooLong;
+ @memcpy(out_buffer[0..len], k_file.path[0..len]);
+ return len;
+ },
+ else => return error.OperationUnsupported,
+ }
+ comptime unreachable;
+}
+
+const dirDeleteFile = switch (native_os) {
+ .windows => dirDeleteFileWindows,
+ .wasi => dirDeleteFileWasi,
+ else => dirDeleteFilePosix,
+};
+
+fn dirDeleteFileWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8) Dir.DeleteFileError!void {
+ return dirDeleteWindows(userdata, dir, sub_path, false) catch |err| switch (err) {
+ error.DirNotEmpty => unreachable,
+ else => |e| return e,
+ };
+}
+
+fn dirDeleteFileWasi(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8) Dir.DeleteFileError!void {
+ if (builtin.link_libc) return dirDeleteFilePosix(userdata, dir, sub_path);
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.beginSyscall();
+ while (true) {
+ const res = std.os.wasi.path_unlink_file(dir.handle, sub_path.ptr, sub_path.len);
+ switch (res) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BUSY => return error.FileBusy,
+ .FAULT => |err| return errnoBug(err),
+ .IO => return error.FileSystem,
+ .ISDIR => return error.IsDir,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .NOTCAPABLE => return error.AccessDenied,
+ .ILSEQ => return error.BadPathName,
+ .INVAL => |err| return errnoBug(err), // invalid flags, or pathname has . as last component
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirDeleteFilePosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8) Dir.DeleteFileError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.unlinkat(dir.handle, sub_path_posix, 0))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ // Some systems return permission errors when trying to delete a
+ // directory, so we need to handle that case specifically and
+ // translate the error.
+ .PERM => switch (native_os) {
+ .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd, .illumos => {
+
+ // Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them).
+ var st = std.mem.zeroes(posix.Stat);
+ while (true) {
+ try current_thread.checkCancel();
+ switch (posix.errno(fstatat_sym(dir.handle, sub_path_posix, &st, posix.AT.SYMLINK_NOFOLLOW))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break;
+ },
+ .INTR => continue,
+ else => {
+ current_thread.endSyscall();
+ return error.PermissionDenied;
+ },
+ }
+ }
+ const is_dir = st.mode & posix.S.IFMT == posix.S.IFDIR;
+ if (is_dir)
+ return error.IsDir
+ else
+ return error.PermissionDenied;
+ },
+ else => {
+ current_thread.endSyscall();
+ return error.PermissionDenied;
+ },
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .BUSY => return error.FileBusy,
+ .FAULT => |err| return errnoBug(err),
+ .IO => return error.FileSystem,
+ .ISDIR => return error.IsDir,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .EXIST => |err| return errnoBug(err),
+ .NOTEMPTY => |err| return errnoBug(err), // Not passing AT.REMOVEDIR
+ .ILSEQ => return error.BadPathName,
+ .INVAL => |err| return errnoBug(err), // invalid flags, or pathname has . as last component
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+const dirDeleteDir = switch (native_os) {
+ .windows => dirDeleteDirWindows,
+ .wasi => dirDeleteDirWasi,
+ else => dirDeleteDirPosix,
+};
+
+fn dirDeleteDirWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8) Dir.DeleteDirError!void {
+ return dirDeleteWindows(userdata, dir, sub_path, true) catch |err| switch (err) {
+ error.IsDir => unreachable,
+ else => |e| return e,
+ };
+}
+
+fn dirDeleteWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, remove_dir: bool) (Dir.DeleteDirError || Dir.DeleteFileError)!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const w = windows;
+
+ try current_thread.checkCancel();
+
+ const sub_path_w_buf = try w.sliceToPrefixedFileW(dir.handle, sub_path);
+ const sub_path_w = sub_path_w_buf.span();
+
+ const path_len_bytes = @as(u16, @intCast(sub_path_w.len * 2));
+ var nt_name: w.UNICODE_STRING = .{
+ .Length = path_len_bytes,
+ .MaximumLength = path_len_bytes,
+ // The Windows API makes this mutable, but it will not mutate here.
+ .Buffer = @constCast(sub_path_w.ptr),
+ };
+
+ if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
+ // Windows does not recognize this, but it does work with empty string.
+ nt_name.Length = 0;
+ }
+ if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) {
+ // Can't remove the parent directory with an open handle.
+ return error.FileBusy;
+ }
+
+ var io_status_block: w.IO_STATUS_BLOCK = undefined;
+ var tmp_handle: w.HANDLE = undefined;
+ var rc = w.ntdll.NtCreateFile(
+ &tmp_handle,
+ .{ .STANDARD = .{
+ .RIGHTS = .{ .DELETE = true },
+ .SYNCHRONIZE = true,
+ } },
+ &.{
+ .Length = @sizeOf(w.OBJECT_ATTRIBUTES),
+ .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else dir.handle,
+ .Attributes = .{},
+ .ObjectName = &nt_name,
+ .SecurityDescriptor = null,
+ .SecurityQualityOfService = null,
+ },
+ &io_status_block,
+ null,
+ .{},
+ .VALID_FLAGS,
+ .OPEN,
+ .{
+ .DIRECTORY_FILE = remove_dir,
+ .NON_DIRECTORY_FILE = !remove_dir,
+ .OPEN_REPARSE_POINT = true, // would we ever want to delete the target instead?
+ },
+ null,
+ 0,
+ );
+ switch (rc) {
+ .SUCCESS => {},
+ .OBJECT_NAME_INVALID => |err| return w.statusBug(err),
+ .OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
+ .OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
+ .BAD_NETWORK_PATH => return error.NetworkNotFound, // \\server was not found
+ .BAD_NETWORK_NAME => return error.NetworkNotFound, // \\server was found but \\server\share wasn't
+ .INVALID_PARAMETER => |err| return w.statusBug(err),
+ .FILE_IS_A_DIRECTORY => return error.IsDir,
+ .NOT_A_DIRECTORY => return error.NotDir,
+ .SHARING_VIOLATION => return error.FileBusy,
+ .ACCESS_DENIED => return error.AccessDenied,
+ .DELETE_PENDING => return,
+ else => return w.unexpectedStatus(rc),
+ }
+ defer w.CloseHandle(tmp_handle);
+
+ // FileDispositionInformationEx has varying levels of support:
+ // - FILE_DISPOSITION_INFORMATION_EX requires >= win10_rs1
+ // (INVALID_INFO_CLASS is returned if not supported)
+ // - Requires the NTFS filesystem
+ // (on filesystems like FAT32, INVALID_PARAMETER is returned)
+ // - FILE_DISPOSITION_POSIX_SEMANTICS requires >= win10_rs1
+ // - FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE requires >= win10_rs5
+ // (NOT_SUPPORTED is returned if a flag is unsupported)
+ //
+ // The strategy here is just to try using FileDispositionInformationEx and fall back to
+ // FileDispositionInformation if the return value lets us know that some aspect of it is not supported.
+ const need_fallback = need_fallback: {
+ try current_thread.checkCancel();
+
+ // Deletion with posix semantics if the filesystem supports it.
+ const info: w.FILE.DISPOSITION.INFORMATION.EX = .{ .Flags = .{
+ .DELETE = true,
+ .POSIX_SEMANTICS = true,
+ .IGNORE_READONLY_ATTRIBUTE = true,
+ } };
+
+ rc = w.ntdll.NtSetInformationFile(
+ tmp_handle,
+ &io_status_block,
+ &info,
+ @sizeOf(w.FILE.DISPOSITION.INFORMATION.EX),
+ .DispositionEx,
+ );
+ switch (rc) {
+ .SUCCESS => return,
+ // The filesystem does not support FileDispositionInformationEx
+ .INVALID_PARAMETER,
+ // The operating system does not support FileDispositionInformationEx
+ .INVALID_INFO_CLASS,
+ // The operating system does not support one of the flags
+ .NOT_SUPPORTED,
+ => break :need_fallback true,
+ // For all other statuses, fall down to the switch below to handle them.
+ else => break :need_fallback false,
+ }
+ };
+
+ if (need_fallback) {
+ try current_thread.checkCancel();
+
+ // Deletion with file pending semantics, which requires waiting or moving
+ // files to get them removed (from here).
+ const file_dispo: w.FILE.DISPOSITION.INFORMATION = .{
+ .DeleteFile = w.TRUE,
+ };
+
+ rc = w.ntdll.NtSetInformationFile(
+ tmp_handle,
+ &io_status_block,
+ &file_dispo,
+ @sizeOf(w.FILE.DISPOSITION.INFORMATION),
+ .Disposition,
+ );
+ }
+ switch (rc) {
+ .SUCCESS => {},
+ .DIRECTORY_NOT_EMPTY => return error.DirNotEmpty,
+ .INVALID_PARAMETER => |err| return w.statusBug(err),
+ .CANNOT_DELETE => return error.AccessDenied,
+ .MEDIA_WRITE_PROTECTED => return error.AccessDenied,
+ .ACCESS_DENIED => return error.AccessDenied,
+ else => return w.unexpectedStatus(rc),
+ }
+}
+
+fn dirDeleteDirWasi(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8) Dir.DeleteDirError!void {
+ if (builtin.link_libc) return dirDeleteDirPosix(userdata, dir, sub_path);
+
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ const res = std.os.wasi.path_remove_directory(dir.handle, sub_path.ptr, sub_path.len);
+ switch (res) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BUSY => return error.FileBusy,
+ .FAULT => |err| return errnoBug(err),
+ .IO => return error.FileSystem,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .NOTEMPTY => return error.DirNotEmpty,
+ .NOTCAPABLE => return error.AccessDenied,
+ .ILSEQ => return error.BadPathName,
+ .INVAL => |err| return errnoBug(err), // invalid flags, or pathname has . as last component
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirDeleteDirPosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8) Dir.DeleteDirError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.unlinkat(dir.handle, sub_path_posix, posix.AT.REMOVEDIR))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BUSY => return error.FileBusy,
+ .FAULT => |err| return errnoBug(err),
+ .IO => return error.FileSystem,
+ .ISDIR => |err| return errnoBug(err),
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .EXIST => |err| return errnoBug(err),
+ .NOTEMPTY => return error.DirNotEmpty,
+ .ILSEQ => return error.BadPathName,
+ .INVAL => |err| return errnoBug(err), // invalid flags, or pathname has . as last component
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+const dirRename = switch (native_os) {
+ .windows => dirRenameWindows,
+ .wasi => dirRenameWasi,
+ else => dirRenamePosix,
+};
+
+fn dirRenameWindows(
+ userdata: ?*anyopaque,
+ old_dir: Dir,
+ old_sub_path: []const u8,
+ new_dir: Dir,
+ new_sub_path: []const u8,
+) Dir.RenameError!void {
+ const w = windows;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ const old_path_w_buf = try windows.sliceToPrefixedFileW(old_dir.handle, old_sub_path);
+ const old_path_w = old_path_w_buf.span();
+ const new_path_w_buf = try windows.sliceToPrefixedFileW(new_dir.handle, new_sub_path);
+ const new_path_w = new_path_w_buf.span();
+ const replace_if_exists = true;
+
+ try current_thread.checkCancel();
+
+ const src_fd = w.OpenFile(old_path_w, .{
+ .dir = old_dir.handle,
+ .access_mask = .{
+ .GENERIC = .{ .WRITE = true },
+ .STANDARD = .{
+ .RIGHTS = .{ .DELETE = true },
+ .SYNCHRONIZE = true,
+ },
+ },
+ .creation = .OPEN,
+ .filter = .any, // This function is supposed to rename both files and directories.
+ .follow_symlinks = false,
+ }) catch |err| switch (err) {
+ error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`.
+ else => |e| return e,
+ };
+ defer w.CloseHandle(src_fd);
+
+ var rc: w.NTSTATUS = undefined;
+ // FileRenameInformationEx has varying levels of support:
+ // - FILE_RENAME_INFORMATION_EX requires >= win10_rs1
+ // (INVALID_INFO_CLASS is returned if not supported)
+ // - Requires the NTFS filesystem
+ // (on filesystems like FAT32, INVALID_PARAMETER is returned)
+ // - FILE_RENAME_POSIX_SEMANTICS requires >= win10_rs1
+ // - FILE_RENAME_IGNORE_READONLY_ATTRIBUTE requires >= win10_rs5
+ // (NOT_SUPPORTED is returned if a flag is unsupported)
+ //
+ // The strategy here is just to try using FileRenameInformationEx and fall back to
+ // FileRenameInformation if the return value lets us know that some aspect of it is not supported.
+ const need_fallback = need_fallback: {
+ const rename_info: w.FILE.RENAME_INFORMATION = .init(.{
+ .Flags = .{
+ .REPLACE_IF_EXISTS = replace_if_exists,
+ .POSIX_SEMANTICS = true,
+ .IGNORE_READONLY_ATTRIBUTE = true,
+ },
+ .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir.handle,
+ .FileName = new_path_w,
+ });
+ var io_status_block: w.IO_STATUS_BLOCK = undefined;
+ const rename_info_buf = rename_info.toBuffer();
+ rc = w.ntdll.NtSetInformationFile(
+ src_fd,
+ &io_status_block,
+ rename_info_buf.ptr,
+ @intCast(rename_info_buf.len),
+ .RenameEx,
+ );
+ switch (rc) {
+ .SUCCESS => return,
+ // The filesystem does not support FileDispositionInformationEx
+ .INVALID_PARAMETER,
+ // The operating system does not support FileDispositionInformationEx
+ .INVALID_INFO_CLASS,
+ // The operating system does not support one of the flags
+ .NOT_SUPPORTED,
+ => break :need_fallback true,
+ // For all other statuses, fall down to the switch below to handle them.
+ else => break :need_fallback false,
+ }
+ };
+
+ if (need_fallback) {
+ const rename_info: w.FILE.RENAME_INFORMATION = .init(.{
+ .Flags = .{ .REPLACE_IF_EXISTS = replace_if_exists },
+ .RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(new_path_w)) null else new_dir.handle,
+ .FileName = new_path_w,
+ });
+ var io_status_block: w.IO_STATUS_BLOCK = undefined;
+ const rename_info_buf = rename_info.toBuffer();
+ rc = w.ntdll.NtSetInformationFile(
+ src_fd,
+ &io_status_block,
+ rename_info_buf.ptr,
+ @intCast(rename_info_buf.len),
+ .Rename,
+ );
+ }
+
+ switch (rc) {
+ .SUCCESS => {},
+ .INVALID_HANDLE => |err| return w.statusBug(err),
+ .INVALID_PARAMETER => |err| return w.statusBug(err),
+ .OBJECT_PATH_SYNTAX_BAD => |err| return w.statusBug(err),
+ .ACCESS_DENIED => return error.AccessDenied,
+ .OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
+ .OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
+ .NOT_SAME_DEVICE => return error.RenameAcrossMountPoints,
+ .OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
+ .DIRECTORY_NOT_EMPTY => return error.PathAlreadyExists,
+ .FILE_IS_A_DIRECTORY => return error.IsDir,
+ .NOT_A_DIRECTORY => return error.NotDir,
+ else => return w.unexpectedStatus(rc),
+ }
+}
+
+fn dirRenameWasi(
+ userdata: ?*anyopaque,
+ old_dir: Dir,
+ old_sub_path: []const u8,
+ new_dir: Dir,
+ new_sub_path: []const u8,
+) Dir.RenameError!void {
+ if (builtin.link_libc) return dirRenamePosix(userdata, old_dir, old_sub_path, new_dir, new_sub_path);
+
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.path_rename(old_dir.handle, old_sub_path.ptr, old_sub_path.len, new_dir.handle, new_sub_path.ptr, new_sub_path.len)) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BUSY => return error.FileBusy,
+ .DQUOT => return error.DiskQuota,
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ISDIR => return error.IsDir,
+ .LOOP => return error.SymLinkLoop,
+ .MLINK => return error.LinkQuotaExceeded,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .EXIST => return error.PathAlreadyExists,
+ .NOTEMPTY => return error.PathAlreadyExists,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .XDEV => return error.RenameAcrossMountPoints,
+ .NOTCAPABLE => return error.AccessDenied,
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirRenamePosix(
+ userdata: ?*anyopaque,
+ old_dir: Dir,
+ old_sub_path: []const u8,
+ new_dir: Dir,
+ new_sub_path: []const u8,
+) Dir.RenameError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var old_path_buffer: [posix.PATH_MAX]u8 = undefined;
+ var new_path_buffer: [posix.PATH_MAX]u8 = undefined;
+
+ const old_sub_path_posix = try pathToPosix(old_sub_path, &old_path_buffer);
+ const new_sub_path_posix = try pathToPosix(new_sub_path, &new_path_buffer);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.renameat(old_dir.handle, old_sub_path_posix, new_dir.handle, new_sub_path_posix))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BUSY => return error.FileBusy,
+ .DQUOT => return error.DiskQuota,
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ISDIR => return error.IsDir,
+ .LOOP => return error.SymLinkLoop,
+ .MLINK => return error.LinkQuotaExceeded,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .EXIST => return error.PathAlreadyExists,
+ .NOTEMPTY => return error.PathAlreadyExists,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .XDEV => return error.RenameAcrossMountPoints,
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+const dirSymLink = switch (native_os) {
+ .windows => dirSymLinkWindows,
+ .wasi => dirSymLinkWasi,
+ else => dirSymLinkPosix,
+};
+
+fn dirSymLinkWindows(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ target_path: []const u8,
+ sym_link_path: []const u8,
+ flags: Dir.SymLinkFlags,
+) Dir.SymLinkError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const w = windows;
+
+ try current_thread.checkCancel();
+
+ // Target path does not use sliceToPrefixedFileW because certain paths
+ // are handled differently when creating a symlink than they would be
+ // when converting to an NT namespaced path. CreateSymbolicLink in
+ // symLinkW will handle the necessary conversion.
+ var target_path_w: w.PathSpace = undefined;
+ target_path_w.len = try w.wtf8ToWtf16Le(&target_path_w.data, target_path);
+ target_path_w.data[target_path_w.len] = 0;
+ // However, we need to canonicalize any path separators to `\`, since if
+ // the target path is relative, then it must use `\` as the path separator.
+ std.mem.replaceScalar(
+ u16,
+ target_path_w.data[0..target_path_w.len],
+ std.mem.nativeToLittle(u16, '/'),
+ std.mem.nativeToLittle(u16, '\\'),
+ );
+
+ const sym_link_path_w = try w.sliceToPrefixedFileW(dir.handle, sym_link_path);
+
+ const SYMLINK_DATA = extern struct {
+ ReparseTag: w.IO_REPARSE_TAG,
+ ReparseDataLength: w.USHORT,
+ Reserved: w.USHORT,
+ SubstituteNameOffset: w.USHORT,
+ SubstituteNameLength: w.USHORT,
+ PrintNameOffset: w.USHORT,
+ PrintNameLength: w.USHORT,
+ Flags: w.ULONG,
+ };
+
+ const symlink_handle = w.OpenFile(sym_link_path_w.span(), .{
+ .access_mask = .{
+ .GENERIC = .{ .READ = true, .WRITE = true },
+ .STANDARD = .{ .SYNCHRONIZE = true },
+ },
+ .dir = dir.handle,
+ .creation = .CREATE,
+ .filter = if (flags.is_directory) .dir_only else .non_directory_only,
+ }) catch |err| switch (err) {
+ error.IsDir => return error.PathAlreadyExists,
+ error.NotDir => return error.Unexpected,
+ error.WouldBlock => return error.Unexpected,
+ error.PipeBusy => return error.Unexpected,
+ error.NoDevice => return error.Unexpected,
+ error.AntivirusInterference => return error.Unexpected,
+ else => |e| return e,
+ };
+ defer w.CloseHandle(symlink_handle);
+
+ // Relevant portions of the documentation:
+ // > Relative links are specified using the following conventions:
+ // > - Root relative—for example, "\Windows\System32" resolves to "current drive:\Windows\System32".
+ // > - Current working directory–relative—for example, if the current working directory is
+ // > C:\Windows\System32, "C:File.txt" resolves to "C:\Windows\System32\File.txt".
+ // > Note: If you specify a current working directory–relative link, it is created as an absolute
+ // > link, due to the way the current working directory is processed based on the user and the thread.
+ // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createsymboliclinkw
+ var is_target_absolute = false;
+ const final_target_path = target_path: {
+ if (w.hasCommonNtPrefix(u16, target_path_w.span())) {
+ // Already an NT path, no need to do anything to it
+ break :target_path target_path_w.span();
+ } else {
+ switch (w.getWin32PathType(u16, target_path_w.span())) {
+ // Rooted paths need to avoid getting put through wToPrefixedFileW
+ // (and they are treated as relative in this context)
+ // Note: It seems that rooted paths in symbolic links are relative to
+ // the drive that the symbolic exists on, not to the CWD's drive.
+ // So, if the symlink is on C:\ and the CWD is on D:\,
+ // it will still resolve the path relative to the root of
+ // the C:\ drive.
+ .rooted => break :target_path target_path_w.span(),
+ // Keep relative paths relative, but anything else needs to get NT-prefixed.
+ else => if (!std.fs.path.isAbsoluteWindowsWtf16(target_path_w.span()))
+ break :target_path target_path_w.span(),
+ }
+ }
+ var prefixed_target_path = try w.wToPrefixedFileW(dir.handle, target_path_w.span());
+ // We do this after prefixing to ensure that drive-relative paths are treated as absolute
+ is_target_absolute = std.fs.path.isAbsoluteWindowsWtf16(prefixed_target_path.span());
+ break :target_path prefixed_target_path.span();
+ };
+
+ // prepare reparse data buffer
+ var buffer: [w.MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 = undefined;
+ const buf_len = @sizeOf(SYMLINK_DATA) + final_target_path.len * 4;
+ const header_len = @sizeOf(w.ULONG) + @sizeOf(w.USHORT) * 2;
+ const target_is_absolute = std.fs.path.isAbsoluteWindowsWtf16(final_target_path);
+ const symlink_data = SYMLINK_DATA{
+ .ReparseTag = .SYMLINK,
+ .ReparseDataLength = @intCast(buf_len - header_len),
+ .Reserved = 0,
+ .SubstituteNameOffset = @intCast(final_target_path.len * 2),
+ .SubstituteNameLength = @intCast(final_target_path.len * 2),
+ .PrintNameOffset = 0,
+ .PrintNameLength = @intCast(final_target_path.len * 2),
+ .Flags = if (!target_is_absolute) w.SYMLINK_FLAG_RELATIVE else 0,
+ };
+
+ @memcpy(buffer[0..@sizeOf(SYMLINK_DATA)], std.mem.asBytes(&symlink_data));
+ @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. final_target_path.len * 2], @as([*]const u8, @ptrCast(final_target_path)));
+ const paths_start = @sizeOf(SYMLINK_DATA) + final_target_path.len * 2;
+ @memcpy(buffer[paths_start..][0 .. final_target_path.len * 2], @as([*]const u8, @ptrCast(final_target_path)));
+ const rc = w.DeviceIoControl(symlink_handle, w.FSCTL.SET_REPARSE_POINT, .{ .in = buffer[0..buf_len] });
+ switch (rc) {
+ .SUCCESS => {},
+ .PRIVILEGE_NOT_HELD => return error.PermissionDenied,
+ .ACCESS_DENIED => return error.AccessDenied,
+ .INVALID_DEVICE_REQUEST => return error.FileSystem,
+ else => return windows.unexpectedStatus(rc),
+ }
+}
+
+fn dirSymLinkWasi(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ target_path: []const u8,
+ sym_link_path: []const u8,
+ flags: Dir.SymLinkFlags,
+) Dir.SymLinkError!void {
+ if (builtin.link_libc) return dirSymLinkPosix(userdata, dir, target_path, sym_link_path, flags);
+
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.path_symlink(target_path.ptr, target_path.len, dir.handle, sym_link_path.ptr, sym_link_path.len)) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .BADF => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .DQUOT => return error.DiskQuota,
+ .EXIST => return error.PathAlreadyExists,
+ .IO => return error.FileSystem,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .NOTCAPABLE => return error.AccessDenied,
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirSymLinkPosix(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ target_path: []const u8,
+ sym_link_path: []const u8,
+ flags: Dir.SymLinkFlags,
+) Dir.SymLinkError!void {
+ _ = flags;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var target_path_buffer: [posix.PATH_MAX]u8 = undefined;
+ var sym_link_path_buffer: [posix.PATH_MAX]u8 = undefined;
+
+ const target_path_posix = try pathToPosix(target_path, &target_path_buffer);
+ const sym_link_path_posix = try pathToPosix(sym_link_path, &sym_link_path_buffer);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.symlinkat(target_path_posix, dir.handle, sym_link_path_posix))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .DQUOT => return error.DiskQuota,
+ .EXIST => return error.PathAlreadyExists,
+ .IO => return error.FileSystem,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+const dirReadLink = switch (native_os) {
+ .windows => dirReadLinkWindows,
+ .wasi => dirReadLinkWasi,
+ else => dirReadLinkPosix,
+};
+
+fn dirReadLinkWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const w = windows;
+
+ try current_thread.checkCancel();
+
+ var sub_path_w_buf = try windows.sliceToPrefixedFileW(dir.handle, sub_path);
+
+ const result_w = try w.ReadLink(dir.handle, sub_path_w_buf.span(), &sub_path_w_buf.data);
+
+ const len = std.unicode.calcWtf8Len(result_w);
+ if (len > buffer.len) return error.NameTooLong;
+
+ return std.unicode.wtf16LeToWtf8(buffer, result_w);
+}
+
+fn dirReadLinkWasi(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize {
+ if (builtin.link_libc) return dirReadLinkPosix(userdata, dir, sub_path, buffer);
+
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var n: usize = undefined;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.path_readlink(dir.handle, sub_path.ptr, sub_path.len, buffer.ptr, buffer.len, &n)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return n;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => return error.NotLink,
+ .IO => return error.FileSystem,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => return error.NotDir,
+ .NOTCAPABLE => return error.AccessDenied,
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirReadLinkPosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var sub_path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &sub_path_buffer);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = posix.system.readlinkat(dir.handle, sub_path_posix, buffer.ptr, buffer.len);
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ const len: usize = @bitCast(rc);
+ return len;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => return error.NotLink,
+ .IO => return error.FileSystem,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => return error.NotDir,
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+const dirSetPermissions = switch (native_os) {
+ .windows => dirSetPermissionsWindows,
+ else => dirSetPermissionsPosix,
+};
+
+fn dirSetPermissionsWindows(userdata: ?*anyopaque, dir: Dir, permissions: Dir.Permissions) Dir.SetPermissionsError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
_ = t;
- posix.close(dir.handle);
+ _ = dir;
+ _ = permissions;
+ @panic("TODO implement dirSetPermissionsWindows");
+}
+
+fn dirSetPermissionsPosix(userdata: ?*anyopaque, dir: Dir, permissions: Dir.Permissions) Dir.SetPermissionsError!void {
+ if (@sizeOf(Dir.Permissions) == 0) return;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ return setPermissionsPosix(current_thread, dir.handle, permissions.toMode());
+}
+
+fn dirSetFilePermissions(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ sub_path: []const u8,
+ permissions: Dir.Permissions,
+ options: Dir.SetFilePermissionsOptions,
+) Dir.SetFilePermissionsError!void {
+ if (@sizeOf(Dir.Permissions) == 0) return;
+ if (is_windows) @panic("TODO implement dirSetFilePermissions windows");
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
+
+ const mode = permissions.toMode();
+ const flags: u32 = if (!options.follow_symlinks) posix.AT.SYMLINK_NOFOLLOW else 0;
+
+ return posixFchmodat(t, current_thread, dir.handle, sub_path_posix, mode, flags);
+}
+
+fn posixFchmodat(
+ t: *Threaded,
+ current_thread: *Thread,
+ dir_fd: posix.fd_t,
+ path: [*:0]const u8,
+ mode: posix.mode_t,
+ flags: u32,
+) Dir.SetFilePermissionsError!void {
+ // No special handling for linux is needed if we can use the libc fallback
+ // or `flags` is empty. Glibc only added the fallback in 2.32.
+ if (have_fchmodat_flags or flags == 0) {
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = if (have_fchmodat_flags or builtin.link_libc)
+ posix.system.fchmodat(dir_fd, path, mode, flags)
+ else
+ posix.system.fchmodat(dir_fd, path, mode);
+ switch (posix.errno(rc)) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .IO => return error.InputOutput,
+ .LOOP => return error.SymLinkLoop,
+ .MFILE => return error.ProcessFdQuotaExceeded,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NFILE => return error.SystemFdQuotaExceeded,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .OPNOTSUPP => return error.OperationUnsupported,
+ .PERM => return error.PermissionDenied,
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ if (@atomicLoad(UseFchmodat2, &t.use_fchmodat2, .monotonic) == .disabled)
+ return fchmodatFallback(current_thread, dir_fd, path, mode);
+
+ comptime assert(native_os == .linux);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.linux.errno(std.os.linux.fchmodat2(dir_fd, path, mode, flags))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .IO => return error.InputOutput,
+ .LOOP => return error.SymLinkLoop,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => return error.FileNotFound,
+ .OPNOTSUPP => return error.OperationUnsupported,
+ .PERM => return error.PermissionDenied,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .NOSYS => {
+ @atomicStore(UseFchmodat2, &t.use_fchmodat2, .disabled, .monotonic);
+ return fchmodatFallback(current_thread, dir_fd, path, mode);
+ },
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fchmodatFallback(
+ current_thread: *Thread,
+ dir_fd: posix.fd_t,
+ path: [*:0]const u8,
+ mode: posix.mode_t,
+) Dir.SetFilePermissionsError!void {
+ comptime assert(native_os == .linux);
+ const use_c = std.c.versionCheck(if (builtin.abi.isAndroid())
+ .{ .major = 30, .minor = 0, .patch = 0 }
+ else
+ .{ .major = 2, .minor = 28, .patch = 0 });
+ const sys = if (use_c) std.c else std.os.linux;
+
+ // Fallback to changing permissions using procfs:
+ //
+ // 1. Open `path` as a `PATH` descriptor.
+ // 2. Stat the fd and check if it isn't a symbolic link.
+ // 3. Generate the procfs reference to the fd via `/proc/self/fd/{fd}`.
+ // 4. Pass the procfs path to `chmod` with the `mode`.
+ try current_thread.beginSyscall();
+ const path_fd: posix.fd_t = while (true) {
+ const rc = posix.system.openat(dir_fd, path, .{
+ .PATH = true,
+ .NOFOLLOW = true,
+ .CLOEXEC = true,
+ }, @as(posix.mode_t, 0));
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .LOOP => return error.SymLinkLoop,
+ .MFILE => return error.ProcessFdQuotaExceeded,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NFILE => return error.SystemFdQuotaExceeded,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ };
+ defer posix.close(path_fd);
+
+ try current_thread.beginSyscall();
+ const path_mode = while (true) {
+ var statx = std.mem.zeroes(std.os.linux.Statx);
+ switch (sys.errno(sys.statx(path_fd, "", posix.AT.EMPTY_PATH, .{ .TYPE = true }, &statx))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ if (!statx.mask.TYPE) return error.Unexpected;
+ break statx.mode;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .LOOP => return error.SymLinkLoop,
+ .NOMEM => return error.SystemResources,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ };
+
+ // Even though we only wanted TYPE, the kernel can still fill in the additional bits.
+ if ((path_mode & posix.S.IFMT) == posix.S.IFLNK)
+ return error.OperationUnsupported;
+
+ var procfs_buf: ["/proc/self/fd/-2147483648\x00".len]u8 = undefined;
+ const proc_path = std.fmt.bufPrintSentinel(&procfs_buf, "/proc/self/fd/{d}", .{path_fd}, 0) catch unreachable;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.chmod(proc_path, mode))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .NOENT => return error.OperationUnsupported, // procfs not mounted.
+ .BADF => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .IO => return error.InputOutput,
+ .LOOP => return error.SymLinkLoop,
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => return error.FileNotFound,
+ .PERM => return error.PermissionDenied,
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+const dirSetOwner = switch (native_os) {
+ .windows => dirSetOwnerUnsupported,
+ else => dirSetOwnerPosix,
+};
+
+fn dirSetOwnerUnsupported(userdata: ?*anyopaque, dir: Dir, owner: ?File.Uid, group: ?File.Gid) Dir.SetOwnerError!void {
+ _ = userdata;
+ _ = dir;
+ _ = owner;
+ _ = group;
+ return error.Unexpected;
+}
+
+fn dirSetOwnerPosix(userdata: ?*anyopaque, dir: Dir, owner: ?File.Uid, group: ?File.Gid) Dir.SetOwnerError!void {
+ if (!have_fchown) return error.Unexpected; // Unsupported OS, don't call this function.
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const uid = owner orelse ~@as(posix.uid_t, 0);
+ const gid = group orelse ~@as(posix.gid_t, 0);
+ return posixFchown(current_thread, dir.handle, uid, gid);
+}
+
+fn posixFchown(current_thread: *Thread, fd: posix.fd_t, uid: posix.uid_t, gid: posix.gid_t) File.SetOwnerError!void {
+ comptime assert(have_fchown);
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.fchown(fd, uid, gid))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // likely fd refers to directory opened without `Dir.OpenOptions.iterate`
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .IO => return error.InputOutput,
+ .LOOP => return error.SymLinkLoop,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => return error.FileNotFound,
+ .PERM => return error.PermissionDenied,
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirSetFileOwner(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ sub_path: []const u8,
+ owner: ?File.Uid,
+ group: ?File.Gid,
+ options: Dir.SetFileOwnerOptions,
+) Dir.SetFileOwnerError!void {
+ if (!have_fchown) return error.Unexpected; // Unsupported OS, don't call this function.
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ var path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
+
+ _ = current_thread;
+ _ = dir;
+ _ = sub_path_posix;
+ _ = owner;
+ _ = group;
+ _ = options;
+ @panic("TODO implement dirSetFileOwner");
+}
+
+const fileSync = switch (native_os) {
+ .windows => fileSyncWindows,
+ .wasi => fileSyncWasi,
+ else => fileSyncPosix,
+};
+
+fn fileSyncWindows(userdata: ?*anyopaque, file: File) File.SyncError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ try current_thread.checkCancel();
+
+ if (windows.kernel32.FlushFileBuffers(file.handle) != 0)
+ return;
+
+ switch (windows.GetLastError()) {
+ .SUCCESS => return,
+ .INVALID_HANDLE => unreachable,
+ .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time
+ .UNEXP_NET_ERR => return error.InputOutput,
+ else => |err| return windows.unexpectedError(err),
+ }
+}
+
+fn fileSyncPosix(userdata: ?*anyopaque, file: File) File.SyncError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.fsync(file.handle))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => |err| return errnoBug(err),
+ .IO => return error.InputOutput,
+ .NOSPC => return error.NoSpaceLeft,
+ .DQUOT => return error.DiskQuota,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileSyncWasi(userdata: ?*anyopaque, file: File) File.SyncError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.fd_sync(file.handle)) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => |err| return errnoBug(err),
+ .IO => return error.InputOutput,
+ .NOSPC => return error.NoSpaceLeft,
+ .DQUOT => return error.DiskQuota,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileIsTty(userdata: ?*anyopaque, file: File) Io.Cancelable!bool {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ return isTty(current_thread, file);
+}
+
+fn isTty(current_thread: *Thread, file: File) Io.Cancelable!bool {
+ if (is_windows) {
+ if (try isCygwinPty(current_thread, file)) return true;
+ try current_thread.checkCancel();
+ var out: windows.DWORD = undefined;
+ return windows.kernel32.GetConsoleMode(file.handle, &out) != 0;
+ }
+
+ if (builtin.link_libc) {
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = posix.system.isatty(file.handle);
+ switch (posix.errno(rc - 1)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return true;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => {
+ current_thread.endSyscall();
+ return false;
+ },
+ }
+ }
+ }
+
+ if (native_os == .wasi) {
+ var statbuf: std.os.wasi.fdstat_t = undefined;
+ const err = std.os.wasi.fd_fdstat_get(file.handle, &statbuf);
+ if (err != .SUCCESS)
+ return false;
+
+ // A tty is a character device that we can't seek or tell on.
+ if (statbuf.fs_filetype != .CHARACTER_DEVICE)
+ return false;
+ if (statbuf.fs_rights_base.FD_SEEK or statbuf.fs_rights_base.FD_TELL)
+ return false;
+
+ return true;
+ }
+
+ if (native_os == .linux) {
+ const linux = std.os.linux;
+ try current_thread.beginSyscall();
+ while (true) {
+ var wsz: posix.winsize = undefined;
+ const fd: usize = @bitCast(@as(isize, file.handle));
+ const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz));
+ switch (linux.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return true;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => {
+ current_thread.endSyscall();
+ return false;
+ },
+ }
+ }
+ }
+
+ @compileError("unimplemented");
+}
+
+fn fileEnableAnsiEscapeCodes(userdata: ?*anyopaque, file: File) File.EnableAnsiEscapeCodesError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ try current_thread.checkCancel();
+
+ // For Windows Terminal, VT Sequences processing is enabled by default.
+ var original_console_mode: windows.DWORD = 0;
+ if (windows.kernel32.GetConsoleMode(file.handle, &original_console_mode) != 0) {
+ if (original_console_mode & windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING != 0) return;
+
+ // For Windows Console, VT Sequences processing support was added in Windows 10 build 14361, but disabled by default.
+ // https://devblogs.microsoft.com/commandline/tmux-support-arrives-for-bash-on-ubuntu-on-windows/
+ //
+ // Note: In Microsoft's example for enabling virtual terminal processing, it
+ // shows attempting to enable `DISABLE_NEWLINE_AUTO_RETURN` as well:
+ // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#example-of-enabling-virtual-terminal-processing
+ // This is avoided because in the old Windows Console, that flag causes \n (as opposed to \r\n)
+ // to behave unexpectedly (the cursor moves down 1 row but remains on the same column).
+ // Additionally, the default console mode in Windows Terminal does not have
+ // `DISABLE_NEWLINE_AUTO_RETURN` set, so by only enabling `ENABLE_VIRTUAL_TERMINAL_PROCESSING`
+ // we end up matching the mode of Windows Terminal.
+ const requested_console_modes = windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+ const console_mode = original_console_mode | requested_console_modes;
+ try current_thread.checkCancel();
+ if (windows.kernel32.SetConsoleMode(file.handle, console_mode) != 0) return;
+ }
+ if (try isCygwinPty(current_thread, file)) return;
+ } else {
+ if (try supportsAnsiEscapeCodes(current_thread, file)) return;
+ }
+ return error.NotTerminalDevice;
+}
+
+fn fileSupportsAnsiEscapeCodes(userdata: ?*anyopaque, file: File) Io.Cancelable!bool {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ return supportsAnsiEscapeCodes(current_thread, file);
+}
+
+fn supportsAnsiEscapeCodes(current_thread: *Thread, file: File) Io.Cancelable!bool {
+ if (is_windows) {
+ try current_thread.checkCancel();
+ var console_mode: windows.DWORD = 0;
+ if (windows.kernel32.GetConsoleMode(file.handle, &console_mode) != 0) {
+ if (console_mode & windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING != 0) return true;
+ }
+ return isCygwinPty(current_thread, file);
+ }
+
+ if (native_os == .wasi) {
+ // WASI sanitizes stdout when fd is a tty so ANSI escape codes will not
+ // be interpreted as actual cursor commands, and stderr is always
+ // sanitized.
+ return false;
+ }
+
+ if (try isTty(current_thread, file)) return true;
+
+ return false;
+}
+
+fn isCygwinPty(current_thread: *Thread, file: File) Io.Cancelable!bool {
+ if (!is_windows) return false;
+
+ const handle = file.handle;
+
+ // If this is a MSYS2/cygwin pty, then it will be a named pipe with a name in one of these formats:
+ // msys-[...]-ptyN-[...]
+ // cygwin-[...]-ptyN-[...]
+ //
+ // Example: msys-1888ae32e00d56aa-pty0-to-master
+
+ // First, just check that the handle is a named pipe.
+ // This allows us to avoid the more costly NtQueryInformationFile call
+ // for handles that aren't named pipes.
+ {
+ try current_thread.checkCancel();
+ var io_status: windows.IO_STATUS_BLOCK = undefined;
+ var device_info: windows.FILE.FS_DEVICE_INFORMATION = undefined;
+ const rc = windows.ntdll.NtQueryVolumeInformationFile(
+ handle,
+ &io_status,
+ &device_info,
+ @sizeOf(windows.FILE.FS_DEVICE_INFORMATION),
+ .Device,
+ );
+ switch (rc) {
+ .SUCCESS => {},
+ else => return false,
+ }
+ if (device_info.DeviceType.FileDevice != .NAMED_PIPE) return false;
+ }
+
+ const name_bytes_offset = @offsetOf(windows.FILE.NAME_INFORMATION, "FileName");
+ // `NAME_MAX` UTF-16 code units (2 bytes each)
+ // This buffer may not be long enough to handle *all* possible paths
+ // (PATH_MAX_WIDE would be necessary for that), but because we only care
+ // about certain paths and we know they must be within a reasonable length,
+ // we can use this smaller buffer and just return false on any error from
+ // NtQueryInformationFile.
+ const num_name_bytes = windows.MAX_PATH * 2;
+ var name_info_bytes align(@alignOf(windows.FILE.NAME_INFORMATION)) = [_]u8{0} ** (name_bytes_offset + num_name_bytes);
+
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ try current_thread.checkCancel();
+ const rc = windows.ntdll.NtQueryInformationFile(
+ handle,
+ &io_status_block,
+ &name_info_bytes,
+ @intCast(name_info_bytes.len),
+ .Name,
+ );
+ switch (rc) {
+ .SUCCESS => {},
+ .INVALID_PARAMETER => unreachable,
+ else => return false,
+ }
+
+ const name_info: *const windows.FILE_NAME_INFO = @ptrCast(&name_info_bytes);
+ const name_bytes = name_info_bytes[name_bytes_offset .. name_bytes_offset + name_info.FileNameLength];
+ const name_wide = std.mem.bytesAsSlice(u16, name_bytes);
+ // The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master
+ return (std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'm', 's', 'y', 's', '-' }) or
+ std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'c', 'y', 'g', 'w', 'i', 'n', '-' })) and
+ std.mem.indexOf(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null;
+}
+
+fn fileSetLength(userdata: ?*anyopaque, file: File, length: u64) File.SetLengthError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ const signed_len: i64 = @bitCast(length);
+ if (signed_len < 0) return error.FileTooBig; // Avoid ambiguous EINVAL errors.
+
+ if (is_windows) {
+ try current_thread.checkCancel();
+
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ const eof_info: windows.FILE.END_OF_FILE_INFORMATION = .{
+ .EndOfFile = signed_len,
+ };
+
+ const status = windows.ntdll.NtSetInformationFile(
+ file.handle,
+ &io_status_block,
+ &eof_info,
+ @sizeOf(windows.FILE.END_OF_FILE_INFORMATION),
+ .EndOfFile,
+ );
+ switch (status) {
+ .SUCCESS => return,
+ .INVALID_HANDLE => |err| return windows.statusBug(err), // Handle not open for writing.
+ .ACCESS_DENIED => return error.AccessDenied,
+ .USER_MAPPED_FILE => return error.AccessDenied,
+ .INVALID_PARAMETER => return error.FileTooBig,
+ else => return windows.unexpectedStatus(status),
+ }
+ }
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.fd_filestat_set_size(file.handle, length)) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .PERM => return error.PermissionDenied,
+ .TXTBSY => return error.FileBusy,
+ .BADF => |err| return errnoBug(err), // Handle not open for writing
+ .INVAL => return error.NonResizable,
+ .NOTCAPABLE => return error.AccessDenied,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(ftruncate_sym(file.handle, signed_len))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .PERM => return error.PermissionDenied,
+ .TXTBSY => return error.FileBusy,
+ .BADF => |err| return errnoBug(err), // Handle not open for writing.
+ .INVAL => return error.NonResizable, // This is returned for /dev/null for example.
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileSetOwner(userdata: ?*anyopaque, file: File, owner: ?File.Uid, group: ?File.Gid) File.SetOwnerError!void {
+ if (!have_fchown) return error.Unexpected; // Unsupported OS, don't call this function.
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const uid = owner orelse ~@as(posix.uid_t, 0);
+ const gid = group orelse ~@as(posix.gid_t, 0);
+ return posixFchown(current_thread, file.handle, uid, gid);
+}
+
+fn fileSetPermissions(userdata: ?*anyopaque, file: File, permissions: File.Permissions) File.SetPermissionsError!void {
+ if (@sizeOf(File.Permissions) == 0) return;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ switch (native_os) {
+ .windows => {
+ try current_thread.checkCancel();
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ const info: windows.FILE.BASIC_INFORMATION = .{
+ .CreationTime = 0,
+ .LastAccessTime = 0,
+ .LastWriteTime = 0,
+ .ChangeTime = 0,
+ .FileAttributes = permissions.toAttributes(),
+ };
+ const status = windows.ntdll.NtSetInformationFile(
+ file.handle,
+ &io_status_block,
+ &info,
+ @sizeOf(windows.FILE.BASIC_INFORMATION),
+ .Basic,
+ );
+ switch (status) {
+ .SUCCESS => return,
+ .INVALID_HANDLE => |err| return windows.statusBug(err),
+ .ACCESS_DENIED => return error.AccessDenied,
+ else => return windows.unexpectedStatus(status),
+ }
+ },
+ .wasi => return error.Unexpected, // Unsupported OS.
+ else => return setPermissionsPosix(current_thread, file.handle, permissions.toMode()),
+ }
+}
+
+fn setPermissionsPosix(current_thread: *Thread, fd: posix.fd_t, mode: posix.mode_t) File.SetPermissionsError!void {
+ comptime assert(have_fchmod);
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.fchmod(fd, mode))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ACCES => return error.AccessDenied,
+ .IO => return error.InputOutput,
+ .LOOP => return error.SymLinkLoop,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOTDIR => return error.FileNotFound,
+ .PERM => return error.PermissionDenied,
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirSetTimestamps(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ sub_path: []const u8,
+ last_accessed: Io.Timestamp,
+ last_modified: Io.Timestamp,
+ options: Dir.SetTimestampsOptions,
+) Dir.SetTimestampsError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ @panic("TODO implement dirSetTimestamps windows");
+ }
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ @panic("TODO implement dirSetTimestamps wasi");
+ }
+
+ const times: [2]posix.timespec = .{
+ timestampToPosix(last_accessed.nanoseconds),
+ timestampToPosix(last_modified.nanoseconds),
+ };
+
+ const flags: u32 = if (!options.follow_symlinks) posix.AT.SYMLINK_NOFOLLOW else 0;
+
+ var path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.utimensat(dir.handle, sub_path_posix, &times, flags))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BADF => |err| return errnoBug(err), // always a race condition
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn dirSetTimestampsNow(
+ userdata: ?*anyopaque,
+ dir: Dir,
+ sub_path: []const u8,
+ options: Dir.SetTimestampsOptions,
+) Dir.SetTimestampsError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ @panic("TODO implement dirSetTimestampsNow windows");
+ }
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ @panic("TODO implement dirSetTimestampsNow wasi");
+ }
+
+ const flags: u32 = if (!options.follow_symlinks) posix.AT.SYMLINK_NOFOLLOW else 0;
+
+ var path_buffer: [posix.PATH_MAX]u8 = undefined;
+ const sub_path_posix = try pathToPosix(sub_path, &path_buffer);
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.utimensat(dir.handle, sub_path_posix, null, flags))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BADF => |err| return errnoBug(err), // always a race condition
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileSetTimestamps(
+ userdata: ?*anyopaque,
+ file: File,
+ last_accessed: Io.Timestamp,
+ last_modified: Io.Timestamp,
+) File.SetTimestampsError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ try current_thread.checkCancel();
+
+ const atime_ft = windows.nanoSecondsToFileTime(last_accessed);
+ const mtime_ft = windows.nanoSecondsToFileTime(last_modified);
+
+ // https://github.com/ziglang/zig/issues/1840
+ const rc = windows.kernel32.SetFileTime(file.handle, null, &atime_ft, &mtime_ft);
+ if (rc == 0) {
+ switch (windows.GetLastError()) {
+ else => |err| return windows.unexpectedError(err),
+ }
+ }
+ return;
+ }
+
+ const times: [2]posix.timespec = .{
+ timestampToPosix(last_accessed.nanoseconds),
+ timestampToPosix(last_modified.nanoseconds),
+ };
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ const atim = times[0].toTimestamp();
+ const mtim = times[1].toTimestamp();
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.fd_filestat_set_times(file.handle, atim, mtim, .{
+ .ATIM = true,
+ .MTIM = true,
+ })) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BADF => |err| return errnoBug(err), // File descriptor use-after-free.
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.futimens(file.handle, &times))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BADF => |err| return errnoBug(err), // always a race condition
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileSetTimestampsNow(userdata: ?*anyopaque, file: File) File.SetTimestampsError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ @panic("TODO implement fileSetTimestampsNow windows");
+ }
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.fd_filestat_set_times(file.handle, 0, 0, .{
+ .ATIM_NOW = true,
+ .MTIM_NOW = true,
+ })) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BADF => |err| return errnoBug(err), // always a race condition
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.futimens(file.handle, null))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .PERM => return error.PermissionDenied,
+ .BADF => |err| return errnoBug(err), // always a race condition
+ .FAULT => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err),
+ .ROFS => return error.ReadOnlyFileSystem,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+const windows_lock_range_off: windows.LARGE_INTEGER = 0;
+const windows_lock_range_len: windows.LARGE_INTEGER = 1;
+
+fn fileLock(userdata: ?*anyopaque, file: File, lock: File.Lock) File.LockError!void {
+ if (native_os == .wasi) return error.FileLocksUnsupported;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ const exclusive = switch (lock) {
+ .none => {
+ // To match the non-Windows behavior, unlock
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ const status = windows.ntdll.NtUnlockFile(
+ file.handle,
+ &io_status_block,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
+ 0,
+ );
+ switch (status) {
+ .SUCCESS => {},
+ .RANGE_NOT_LOCKED => {},
+ .ACCESS_VIOLATION => |err| return windows.statusBug(err), // bad io_status_block pointer
+ else => return windows.unexpectedStatus(status),
+ }
+ return;
+ },
+ .shared => false,
+ .exclusive => true,
+ };
+ try current_thread.checkCancel();
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ const status = windows.ntdll.NtLockFile(
+ file.handle,
+ null,
+ null,
+ null,
+ &io_status_block,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
+ null,
+ windows.FALSE,
+ @intFromBool(exclusive),
+ );
+ switch (status) {
+ .SUCCESS => return,
+ .INSUFFICIENT_RESOURCES => return error.SystemResources,
+ .LOCK_NOT_GRANTED => |err| return windows.statusBug(err), // passed FailImmediately=false
+ .ACCESS_VIOLATION => |err| return windows.statusBug(err), // bad io_status_block pointer
+ else => return windows.unexpectedStatus(status),
+ }
+ }
+
+ const operation: i32 = switch (lock) {
+ .none => posix.LOCK.UN,
+ .shared => posix.LOCK.SH,
+ .exclusive => posix.LOCK.EX,
+ };
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.flock(file.handle, operation))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err), // invalid parameters
+ .NOLCK => return error.SystemResources,
+ .AGAIN => |err| return errnoBug(err),
+ .OPNOTSUPP => return error.FileLocksUnsupported,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileTryLock(userdata: ?*anyopaque, file: File, lock: File.Lock) File.LockError!bool {
+ if (native_os == .wasi) return error.FileLocksUnsupported;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ const exclusive = switch (lock) {
+ .none => {
+ // To match the non-Windows behavior, unlock
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ const status = windows.ntdll.NtUnlockFile(
+ file.handle,
+ &io_status_block,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
+ 0,
+ );
+ switch (status) {
+ .SUCCESS => return true,
+ .RANGE_NOT_LOCKED => return false,
+ .ACCESS_VIOLATION => |err| return windows.statusBug(err), // bad io_status_block pointer
+ else => return windows.unexpectedStatus(status),
+ }
+ },
+ .shared => false,
+ .exclusive => true,
+ };
+ try current_thread.checkCancel();
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ const status = windows.ntdll.NtLockFile(
+ file.handle,
+ null,
+ null,
+ null,
+ &io_status_block,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
+ null,
+ windows.TRUE,
+ @intFromBool(exclusive),
+ );
+ switch (status) {
+ .SUCCESS => return true,
+ .INSUFFICIENT_RESOURCES => return error.SystemResources,
+ .LOCK_NOT_GRANTED => return false,
+ .ACCESS_VIOLATION => |err| return windows.statusBug(err), // bad io_status_block pointer
+ else => return windows.unexpectedStatus(status),
+ }
+ }
+
+ const operation: i32 = switch (lock) {
+ .none => posix.LOCK.UN,
+ .shared => posix.LOCK.SH | posix.LOCK.NB,
+ .exclusive => posix.LOCK.EX | posix.LOCK.NB,
+ };
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.flock(file.handle, operation))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return true;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .AGAIN => {
+ current_thread.endSyscall();
+ return false;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err), // invalid parameters
+ .NOLCK => return error.SystemResources,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileUnlock(userdata: ?*anyopaque, file: File) void {
+ if (native_os == .wasi) return;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ _ = t;
+
+ if (is_windows) {
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ const status = windows.ntdll.NtUnlockFile(
+ file.handle,
+ &io_status_block,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
+ 0,
+ );
+ if (is_debug) switch (status) {
+ .SUCCESS => {},
+ .RANGE_NOT_LOCKED => unreachable, // Function asserts unlocked.
+ .ACCESS_VIOLATION => unreachable, // bad io_status_block pointer
+ else => unreachable, // Resource deallocation must succeed.
+ };
+ return;
+ }
+
+ while (true) {
+ switch (posix.errno(posix.system.flock(file.handle, posix.LOCK.UN))) {
+ .SUCCESS => return,
+ .CANCELED, .INTR => continue,
+ .AGAIN => return assert(!is_debug), // unlocking can't block
+ .BADF => return assert(!is_debug), // File descriptor used after closed.
+ .INVAL => return assert(!is_debug), // invalid parameters
+ .NOLCK => return assert(!is_debug), // Resource deallocation.
+ .OPNOTSUPP => return assert(!is_debug), // We already got the lock.
+ else => return assert(!is_debug), // Resource deallocation must succeed.
+ }
+ }
+}
+
+fn fileDowngradeLock(userdata: ?*anyopaque, file: File) File.DowngradeLockError!void {
+ if (native_os == .wasi) return;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ try current_thread.checkCancel();
+ // On Windows it works like a semaphore + exclusivity flag. To
+ // implement this function, we first obtain another lock in shared
+ // mode. This changes the exclusivity flag, but increments the
+ // semaphore to 2. So we follow up with an NtUnlockFile which
+ // decrements the semaphore but does not modify the exclusivity flag.
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
+ switch (windows.ntdll.NtLockFile(
+ file.handle,
+ null,
+ null,
+ null,
+ &io_status_block,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
+ null,
+ windows.TRUE,
+ windows.FALSE,
+ )) {
+ .SUCCESS => {},
+ .INSUFFICIENT_RESOURCES => |err| return windows.statusBug(err),
+ .LOCK_NOT_GRANTED => |err| return windows.statusBug(err), // File was not locked in exclusive mode.
+ .ACCESS_VIOLATION => |err| return windows.statusBug(err), // bad io_status_block pointer
+ else => |status| return windows.unexpectedStatus(status),
+ }
+ const status = windows.ntdll.NtUnlockFile(
+ file.handle,
+ &io_status_block,
+ &windows_lock_range_off,
+ &windows_lock_range_len,
+ 0,
+ );
+ if (is_debug) switch (status) {
+ .SUCCESS => {},
+ .RANGE_NOT_LOCKED => unreachable, // File was not locked.
+ .ACCESS_VIOLATION => unreachable, // bad io_status_block pointer
+ else => unreachable, // Resource deallocation must succeed.
+ };
+ return;
+ }
+
+ const operation = posix.LOCK.SH | posix.LOCK.NB;
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.flock(file.handle, operation))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .AGAIN => |err| return errnoBug(err), // File was not locked in exclusive mode.
+ .BADF => |err| return errnoBug(err),
+ .INVAL => |err| return errnoBug(err), // invalid parameters
+ .NOLCK => |err| return errnoBug(err), // Lock already obtained.
+ .OPNOTSUPP => |err| return errnoBug(err), // Lock already obtained.
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
}
fn dirOpenDirWasi(
userdata: ?*anyopaque,
- dir: Io.Dir,
+ dir: Dir,
sub_path: []const u8,
- options: Io.Dir.OpenOptions,
-) Io.Dir.OpenError!Io.Dir {
+ options: Dir.OpenOptions,
+) Dir.OpenError!Dir {
if (builtin.link_libc) return dirOpenDirPosix(userdata, dir, sub_path, options);
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -3128,7 +6543,6 @@ fn dirOpenDirWasi(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3155,10 +6569,118 @@ fn dirOpenDirWasi(
}
}
-fn fileClose(userdata: ?*anyopaque, file: Io.File) void {
+fn dirHardLink(
+ userdata: ?*anyopaque,
+ old_dir: Dir,
+ old_sub_path: []const u8,
+ new_dir: Dir,
+ new_sub_path: []const u8,
+ options: Dir.HardLinkOptions,
+) Dir.HardLinkError!void {
+ if (is_windows) return error.OperationUnsupported;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ const flags: std.os.wasi.lookupflags_t = .{
+ .SYMLINK_FOLLOW = options.follow_symlinks,
+ };
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.path_link(
+ old_dir.handle,
+ flags,
+ old_sub_path.ptr,
+ old_sub_path.len,
+ new_dir.handle,
+ new_sub_path.ptr,
+ new_sub_path.len,
+ )) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .DQUOT => return error.DiskQuota,
+ .EXIST => return error.PathAlreadyExists,
+ .FAULT => |err| return errnoBug(err),
+ .IO => return error.HardwareFailure,
+ .LOOP => return error.SymLinkLoop,
+ .MLINK => return error.LinkQuotaExceeded,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .NOTDIR => return error.NotDir,
+ .PERM => return error.PermissionDenied,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .XDEV => return error.NotSameFileSystem,
+ .INVAL => |err| return errnoBug(err),
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ var old_path_buffer: [posix.PATH_MAX]u8 = undefined;
+ var new_path_buffer: [posix.PATH_MAX]u8 = undefined;
+
+ const old_sub_path_posix = try pathToPosix(old_sub_path, &old_path_buffer);
+ const new_sub_path_posix = try pathToPosix(new_sub_path, &new_path_buffer);
+
+ const flags: u32 = if (!options.follow_symlinks) posix.AT.SYMLINK_NOFOLLOW else 0;
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.linkat(
+ old_dir.handle,
+ old_sub_path_posix,
+ new_dir.handle,
+ new_sub_path_posix,
+ flags,
+ ))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .DQUOT => return error.DiskQuota,
+ .EXIST => return error.PathAlreadyExists,
+ .FAULT => |err| return errnoBug(err),
+ .IO => return error.HardwareFailure,
+ .LOOP => return error.SymLinkLoop,
+ .MLINK => return error.LinkQuotaExceeded,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .NOTDIR => return error.NotDir,
+ .PERM => return error.PermissionDenied,
+ .ROFS => return error.ReadOnlyFileSystem,
+ .XDEV => return error.NotSameFileSystem,
+ .INVAL => |err| return errnoBug(err),
+ .ILSEQ => return error.BadPathName,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn fileClose(userdata: ?*anyopaque, files: []const File) void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
_ = t;
- posix.close(file.handle);
+ for (files) |file| posix.close(file.handle);
}
const fileReadStreaming = switch (native_os) {
@@ -3166,7 +6688,7 @@ const fileReadStreaming = switch (native_os) {
else => fileReadStreamingPosix,
};
-fn fileReadStreamingPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8) Io.File.Reader.Error!usize {
+fn fileReadStreamingPosix(userdata: ?*anyopaque, file: File, data: []const []u8) File.Reader.Error!usize {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
@@ -3179,6 +6701,7 @@ fn fileReadStreamingPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8) Io
i += 1;
}
}
+ if (i == 0) return 0;
const dest = iovecs_buffer[0..i];
assert(dest[0].len > 0);
@@ -3195,7 +6718,6 @@ fn fileReadStreamingPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8) Io
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3229,13 +6751,11 @@ fn fileReadStreamingPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8) Io
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
.INVAL => |err| return errnoBug(err),
.FAULT => |err| return errnoBug(err),
- .SRCH => return error.ProcessNotFound,
.AGAIN => return error.WouldBlock,
.BADF => |err| {
if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory.
@@ -3255,13 +6775,14 @@ fn fileReadStreamingPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8) Io
}
}
-fn fileReadStreamingWindows(userdata: ?*anyopaque, file: Io.File, data: [][]u8) Io.File.Reader.Error!usize {
+fn fileReadStreamingWindows(userdata: ?*anyopaque, file: File, data: []const []u8) File.Reader.Error!usize {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const DWORD = windows.DWORD;
var index: usize = 0;
- while (data[index].len == 0) index += 1;
+ while (index < data.len and data[index].len == 0) index += 1;
+ if (index == data.len) return 0;
const buffer = data[index];
const want_read_count: DWORD = @min(std.math.maxInt(DWORD), buffer.len);
@@ -3284,11 +6805,11 @@ fn fileReadStreamingWindows(userdata: ?*anyopaque, file: Io.File, data: [][]u8)
}
}
-fn fileReadPositionalPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8, offset: u64) Io.File.ReadPositionalError!usize {
+fn fileReadPositionalPosix(userdata: ?*anyopaque, file: File, data: []const []u8, offset: u64) File.ReadPositionalError!usize {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
- if (!have_preadv) @compileError("TODO");
+ if (!have_preadv) @compileError("TODO implement fileReadPositionalPosix for cursed operating systems that don't support preadv (it's only Haiku)");
var iovecs_buffer: [max_iovecs_len]posix.iovec = undefined;
var i: usize = 0;
@@ -3299,6 +6820,7 @@ fn fileReadPositionalPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8, o
i += 1;
}
}
+ if (i == 0) return 0;
const dest = iovecs_buffer[0..i];
assert(dest[0].len > 0);
@@ -3315,7 +6837,6 @@ fn fileReadPositionalPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8, o
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3353,13 +6874,11 @@ fn fileReadPositionalPosix(userdata: ?*anyopaque, file: Io.File, data: [][]u8, o
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
.INVAL => |err| return errnoBug(err),
.FAULT => |err| return errnoBug(err),
- .SRCH => return error.ProcessNotFound,
.AGAIN => return error.WouldBlock,
.BADF => |err| {
if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory.
@@ -3387,14 +6906,15 @@ const fileReadPositional = switch (native_os) {
else => fileReadPositionalPosix,
};
-fn fileReadPositionalWindows(userdata: ?*anyopaque, file: Io.File, data: [][]u8, offset: u64) Io.File.ReadPositionalError!usize {
+fn fileReadPositionalWindows(userdata: ?*anyopaque, file: File, data: []const []u8, offset: u64) File.ReadPositionalError!usize {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const DWORD = windows.DWORD;
var index: usize = 0;
- while (data[index].len == 0) index += 1;
+ while (index < data.len and data[index].len == 0) index += 1;
+ if (index == data.len) return 0;
const buffer = data[index];
const want_read_count: DWORD = @min(std.math.maxInt(DWORD), buffer.len);
@@ -3429,24 +6949,16 @@ fn fileReadPositionalWindows(userdata: ?*anyopaque, file: Io.File, data: [][]u8,
}
}
-fn fileSeekBy(userdata: ?*anyopaque, file: Io.File, offset: i64) Io.File.SeekError!void {
- const t: *Threaded = @ptrCast(@alignCast(userdata));
- _ = t;
- _ = file;
- _ = offset;
- @panic("TODO implement fileSeekBy");
-}
-
-fn fileSeekTo(userdata: ?*anyopaque, file: Io.File, offset: u64) Io.File.SeekError!void {
+fn fileSeekBy(userdata: ?*anyopaque, file: File, offset: i64) File.SeekError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const current_thread = Thread.getCurrent(t);
const fd = file.handle;
if (native_os == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
+ var result: u64 = undefined;
try current_thread.beginSyscall();
while (true) {
- var result: u64 = undefined;
- switch (posix.errno(posix.system.llseek(fd, offset, &result, posix.SEEK.SET))) {
+ switch (posix.errno(posix.system.llseek(fd, @bitCast(offset), &result, posix.SEEK.CUR))) {
.SUCCESS => {
current_thread.endSyscall();
return;
@@ -3455,7 +6967,6 @@ fn fileSeekTo(userdata: ?*anyopaque, file: Io.File, offset: u64) Io.File.SeekErr
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3473,13 +6984,43 @@ fn fileSeekTo(userdata: ?*anyopaque, file: Io.File, offset: u64) Io.File.SeekErr
if (native_os == .windows) {
try current_thread.checkCancel();
- return windows.SetFilePointerEx_BEGIN(fd, offset);
+ return windows.SetFilePointerEx_CURRENT(fd, offset);
}
- if (native_os == .wasi and !builtin.link_libc) while (true) {
+ if (native_os == .wasi and !builtin.link_libc) {
var new_offset: std.os.wasi.filesize_t = undefined;
try current_thread.beginSyscall();
- switch (std.os.wasi.fd_seek(fd, @bitCast(offset), .SET, &new_offset)) {
+ while (true) {
+ switch (std.os.wasi.fd_seek(fd, offset, .CUR, &new_offset)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ .INVAL => return error.Unseekable,
+ .OVERFLOW => return error.Unseekable,
+ .SPIPE => return error.Unseekable,
+ .NXIO => return error.Unseekable,
+ .NOTCAPABLE => return error.AccessDenied,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ if (posix.SEEK == void) return error.Unseekable;
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(lseek_sym(fd, offset, posix.SEEK.CUR))) {
.SUCCESS => {
current_thread.endSyscall();
return;
@@ -3488,7 +7029,6 @@ fn fileSeekTo(userdata: ?*anyopaque, file: Io.File, offset: u64) Io.File.SeekErr
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3497,15 +7037,86 @@ fn fileSeekTo(userdata: ?*anyopaque, file: Io.File, offset: u64) Io.File.SeekErr
.OVERFLOW => return error.Unseekable,
.SPIPE => return error.Unseekable,
.NXIO => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
else => |err| return posix.unexpectedErrno(err),
}
},
}
- };
+ }
+}
+
+fn fileSeekTo(userdata: ?*anyopaque, file: File, offset: u64) File.SeekError!void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+ const fd = file.handle;
+
+ if (native_os == .windows) {
+ try current_thread.checkCancel();
+ return windows.SetFilePointerEx_BEGIN(fd, offset);
+ }
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ try current_thread.beginSyscall();
+ while (true) {
+ var new_offset: std.os.wasi.filesize_t = undefined;
+ switch (std.os.wasi.fd_seek(fd, @bitCast(offset), .SET, &new_offset)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ .INVAL => return error.Unseekable,
+ .OVERFLOW => return error.Unseekable,
+ .SPIPE => return error.Unseekable,
+ .NXIO => return error.Unseekable,
+ .NOTCAPABLE => return error.AccessDenied,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
if (posix.SEEK == void) return error.Unseekable;
+ return posixSeekTo(current_thread, fd, offset);
+}
+
+fn posixSeekTo(current_thread: *Thread, fd: posix.fd_t, offset: u64) File.SeekError!void {
+ if (native_os == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
+ try current_thread.beginSyscall();
+ while (true) {
+ var result: u64 = undefined;
+ switch (posix.errno(posix.system.llseek(fd, offset, &result, posix.SEEK.SET))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .BADF => |err| return errnoBug(err), // File descriptor used after closed.
+ .INVAL => return error.Unseekable,
+ .OVERFLOW => return error.Unseekable,
+ .SPIPE => return error.Unseekable,
+ .NXIO => return error.Unseekable,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
try current_thread.beginSyscall();
while (true) {
switch (posix.errno(lseek_sym(fd, @bitCast(offset), posix.SEEK.SET))) {
@@ -3517,7 +7128,6 @@ fn fileSeekTo(userdata: ?*anyopaque, file: Io.File, offset: u64) Io.File.SeekErr
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3533,9 +7143,10 @@ fn fileSeekTo(userdata: ?*anyopaque, file: Io.File, offset: u64) Io.File.SeekErr
}
}
-fn openSelfExe(userdata: ?*anyopaque, flags: Io.File.OpenFlags) Io.File.OpenSelfExeError!Io.File {
+fn processExecutableOpen(userdata: ?*anyopaque, flags: File.OpenFlags) std.process.OpenExecutableError!File {
const t: *Threaded = @ptrCast(@alignCast(userdata));
switch (native_os) {
+ .wasi => return error.OperationUnsupported,
.linux, .serenity => return dirOpenFilePosix(t, .{ .handle = posix.AT.FDCWD }, "/proc/self/exe", flags),
.windows => {
// If ImagePathName is a symlink, then it will contain the path of the symlink,
@@ -3546,34 +7157,1204 @@ fn openSelfExe(userdata: ?*anyopaque, flags: Io.File.OpenFlags) Io.File.OpenSelf
const prefixed_path_w = try windows.wToPrefixedFileW(null, image_path_name);
return dirOpenFileWtf16(t, null, prefixed_path_w.span(), flags);
},
- else => @panic("TODO implement openSelfExe"),
+ .driverkit,
+ .ios,
+ .maccatalyst,
+ .macos,
+ .tvos,
+ .visionos,
+ .watchos,
+ => {
+ // _NSGetExecutablePath() returns a path that might be a symlink to
+ // the executable. Here it does not matter since we open it.
+ var symlink_path_buf: [posix.PATH_MAX + 1]u8 = undefined;
+ var n: u32 = symlink_path_buf.len;
+ const rc = std.c._NSGetExecutablePath(&symlink_path_buf, &n);
+ if (rc != 0) return error.NameTooLong;
+ const symlink_path = std.mem.sliceTo(&symlink_path_buf, 0);
+ return dirOpenFilePosix(t, .cwd(), symlink_path, flags);
+ },
+ else => {
+ var buffer: [Dir.max_path_bytes]u8 = undefined;
+ const n = try processExecutablePath(t, &buffer);
+ buffer[n] = 0;
+ const executable_path = buffer[0..n :0];
+ return dirOpenFilePosix(t, .cwd(), executable_path, flags);
+ },
+ }
+}
+
+fn processExecutablePath(userdata: ?*anyopaque, out_buffer: []u8) std.process.ExecutablePathError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+
+ switch (native_os) {
+ .driverkit,
+ .ios,
+ .maccatalyst,
+ .macos,
+ .tvos,
+ .visionos,
+ .watchos,
+ => {
+ // _NSGetExecutablePath() returns a path that might be a symlink to
+ // the executable.
+ var symlink_path_buf: [posix.PATH_MAX + 1]u8 = undefined;
+ var n: u32 = symlink_path_buf.len;
+ const rc = std.c._NSGetExecutablePath(&symlink_path_buf, &n);
+ if (rc != 0) return error.NameTooLong;
+ const symlink_path = std.mem.sliceTo(&symlink_path_buf, 0);
+ return Io.Dir.realPathFileAbsolute(ioBasic(t), symlink_path, out_buffer) catch |err| switch (err) {
+ error.NetworkNotFound => unreachable, // Windows-only
+ else => |e| return e,
+ };
+ },
+ .linux, .serenity => return Io.Dir.readLinkAbsolute(ioBasic(t), "/proc/self/exe", out_buffer) catch |err| switch (err) {
+ error.UnsupportedReparsePointType => unreachable, // Windows-only
+ error.NetworkNotFound => unreachable, // Windows-only
+ else => |e| return e,
+ },
+ .illumos => return Io.Dir.readLinkAbsolute(ioBasic(t), "/proc/self/path/a.out", out_buffer) catch |err| switch (err) {
+ error.UnsupportedReparsePointType => unreachable, // Windows-only
+ error.NetworkNotFound => unreachable, // Windows-only
+ else => |e| return e,
+ },
+ .freebsd, .dragonfly => {
+ const current_thread = Thread.getCurrent(t);
+ var mib: [4]c_int = .{ posix.CTL.KERN, posix.KERN.PROC, posix.KERN.PROC_PATHNAME, -1 };
+ var out_len: usize = out_buffer.len;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.sysctl(&mib, mib.len, out_buffer.ptr, &out_len, null, 0))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return out_len - 1; // discard terminating NUL
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FAULT => |err| return errnoBug(err),
+ .PERM => return error.PermissionDenied,
+ .NOMEM => return error.SystemResources,
+ .NOENT => |err| return errnoBug(err),
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ },
+ .netbsd => {
+ const current_thread = Thread.getCurrent(t);
+ var mib = [4]c_int{ posix.CTL.KERN, posix.KERN.PROC_ARGS, -1, posix.KERN.PROC_PATHNAME };
+ var out_len: usize = out_buffer.len;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.sysctl(&mib, mib.len, out_buffer.ptr, &out_len, null, 0))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return out_len - 1; // discard terminating NUL
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .FAULT => |err| return errnoBug(err),
+ .PERM => return error.PermissionDenied,
+ .NOMEM => return error.SystemResources,
+ .NOENT => |err| return errnoBug(err),
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ },
+ .openbsd, .haiku => {
+ // The best we can do on these operating systems is check based on
+ // the first process argument.
+ const argv0 = t.argv0.value orelse return error.OperationUnsupported;
+ if (std.mem.findScalar(u8, argv0, '/') != null) {
+ // argv[0] is a path (relative or absolute): use realpath(3) directly
+ const current_thread = Thread.getCurrent(t);
+ var resolved_buf: [std.c.PATH_MAX]u8 = undefined;
+ try current_thread.beginSyscall();
+ while (true) {
+ if (std.c.realpath(argv0, &resolved_buf)) |p| {
+ assert(p == &resolved_buf);
+ break current_thread.endSyscall();
+ } else switch (@as(std.c.E, @enumFromInt(std.c._errno().*))) {
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .ACCES => return error.AccessDenied,
+ .INVAL => |err| return errnoBug(err), // the pathname argument is a null pointer
+ .IO => return error.InputOutput,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.NotDir,
+ .NOMEM => |err| return errnoBug(err), // sufficient storage space is unavailable for allocation
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ const resolved = std.mem.sliceTo(&resolved_buf, 0);
+ if (resolved.len > out_buffer.len)
+ return error.NameTooLong;
+ @memcpy(out_buffer[0..resolved.len], resolved);
+ return resolved.len;
+ } else if (argv0.len != 0) {
+ // argv[0] is not empty (and not a path): search PATH
+ t.scanEnviron();
+ const PATH = t.environ.string.PATH orelse return error.FileNotFound;
+ const current_thread = Thread.getCurrent(t);
+ var it = std.mem.tokenizeScalar(u8, PATH, ':');
+ it: while (it.next()) |dir| {
+ var resolved_path_buf: [std.c.PATH_MAX]u8 = undefined;
+ const resolved_path = std.fmt.bufPrintSentinel(&resolved_path_buf, "{s}/{s}", .{
+ dir, argv0,
+ }, 0) catch continue;
+
+ var resolved_buf: [std.c.PATH_MAX]u8 = undefined;
+ try current_thread.beginSyscall();
+ while (true) {
+ if (std.c.realpath(resolved_path, &resolved_buf)) |p| {
+ assert(p == &resolved_buf);
+ break current_thread.endSyscall();
+ } else switch (@as(std.c.E, @enumFromInt(std.c._errno().*))) {
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .NAMETOOLONG => {
+ current_thread.endSyscall();
+ return error.NameTooLong;
+ },
+ .NOMEM => {
+ current_thread.endSyscall();
+ return error.SystemResources;
+ },
+ .IO => {
+ current_thread.endSyscall();
+ return error.InputOutput;
+ },
+ .ACCES, .LOOP, .NOENT, .NOTDIR => {
+ current_thread.endSyscall();
+ continue :it;
+ },
+ else => |err| {
+ current_thread.endSyscall();
+ return posix.unexpectedErrno(err);
+ },
+ }
+ }
+ const resolved = std.mem.sliceTo(&resolved_buf, 0);
+ if (resolved.len > out_buffer.len)
+ return error.NameTooLong;
+ @memcpy(out_buffer[0..resolved.len], resolved);
+ return resolved.len;
+ }
+ }
+ return error.FileNotFound;
+ },
+ .windows => {
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.checkCancel();
+ const w = windows;
+ const image_path_unicode_string = &w.peb().ProcessParameters.ImagePathName;
+ const image_path_name = image_path_unicode_string.Buffer.?[0 .. image_path_unicode_string.Length / 2 :0];
+
+ // If ImagePathName is a symlink, then it will contain the path of the
+ // symlink, not the path that the symlink points to. We want the path
+ // that the symlink points to, though, so we need to get the realpath.
+ var path_name_w_buf = try w.wToPrefixedFileW(null, image_path_name);
+
+ const h_file = blk: {
+ const res = w.OpenFile(path_name_w_buf.span(), .{
+ .dir = null,
+ .access_mask = .{
+ .GENERIC = .{ .READ = true },
+ .STANDARD = .{ .SYNCHRONIZE = true },
+ },
+ .creation = .OPEN,
+ .filter = .any,
+ }) catch |err| switch (err) {
+ error.WouldBlock => unreachable,
+ else => |e| return e,
+ };
+ break :blk res;
+ };
+ defer w.CloseHandle(h_file);
+
+ // TODO move GetFinalPathNameByHandle logic into std.Io.Threaded and add cancel checks
+ const wide_slice = try w.GetFinalPathNameByHandle(h_file, .{}, &path_name_w_buf.data);
+
+ const len = std.unicode.calcWtf8Len(wide_slice);
+ if (len > out_buffer.len)
+ return error.NameTooLong;
+
+ const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
+ return end_index;
+ },
+ else => return error.OperationUnsupported,
}
}
fn fileWritePositional(
userdata: ?*anyopaque,
- file: Io.File,
- buffer: [][]const u8,
+ file: File,
+ header: []const u8,
+ data: []const []const u8,
+ splat: usize,
offset: u64,
-) Io.File.WritePositionalError!usize {
+) File.WritePositionalError!usize {
const t: *Threaded = @ptrCast(@alignCast(userdata));
- _ = t;
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ if (header.len != 0) {
+ return writeFilePositionalWindows(current_thread, file.handle, header, offset);
+ }
+ for (data[0 .. data.len - 1]) |buf| {
+ if (buf.len == 0) continue;
+ return writeFilePositionalWindows(current_thread, file.handle, buf, offset);
+ }
+ const pattern = data[data.len - 1];
+ if (pattern.len == 0 or splat == 0) return 0;
+ return writeFilePositionalWindows(current_thread, file.handle, pattern, offset);
+ }
+
+ var iovecs: [max_iovecs_len]posix.iovec_const = undefined;
+ var iovlen: iovlen_t = 0;
+ addBuf(&iovecs, &iovlen, header);
+ for (data[0 .. data.len - 1]) |bytes| addBuf(&iovecs, &iovlen, bytes);
+ const pattern = data[data.len - 1];
+ if (iovecs.len - iovlen != 0) switch (splat) {
+ 0 => {},
+ 1 => addBuf(&iovecs, &iovlen, pattern),
+ else => switch (pattern.len) {
+ 0 => {},
+ 1 => {
+ var backup_buffer: [splat_buffer_size]u8 = undefined;
+ const splat_buffer = &backup_buffer;
+ const memset_len = @min(splat_buffer.len, splat);
+ const buf = splat_buffer[0..memset_len];
+ @memset(buf, pattern[0]);
+ addBuf(&iovecs, &iovlen, buf);
+ var remaining_splat = splat - buf.len;
+ while (remaining_splat > splat_buffer.len and iovecs.len - iovlen != 0) {
+ assert(buf.len == splat_buffer.len);
+ addBuf(&iovecs, &iovlen, splat_buffer);
+ remaining_splat -= splat_buffer.len;
+ }
+ addBuf(&iovecs, &iovlen, splat_buffer[0..@min(remaining_splat, splat_buffer.len)]);
+ },
+ else => for (0..@min(splat, iovecs.len - iovlen)) |_| {
+ addBuf(&iovecs, &iovlen, pattern);
+ },
+ },
+ };
+
+ if (iovlen == 0) return 0;
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ var n_written: usize = undefined;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.fd_pwrite(file.handle, &iovecs, iovlen, offset, &n_written)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return n_written;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .INVAL => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .AGAIN => |err| return errnoBug(err),
+ .BADF => return error.NotOpenForWriting, // can be a race condition.
+ .DESTADDRREQ => |err| return errnoBug(err), // `connect` was never called.
+ .DQUOT => return error.DiskQuota,
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .NOSPC => return error.NoSpaceLeft,
+ .PERM => return error.PermissionDenied,
+ .PIPE => return error.BrokenPipe,
+ .NOTCAPABLE => return error.AccessDenied,
+ .NXIO => return error.Unseekable,
+ .SPIPE => return error.Unseekable,
+ .OVERFLOW => return error.Unseekable,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ try current_thread.beginSyscall();
while (true) {
- _ = file;
- _ = buffer;
- _ = offset;
- @panic("TODO implement fileWritePositional");
+ const rc = pwritev_sym(file.handle, &iovecs, @intCast(iovlen), @bitCast(offset));
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .INVAL => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .AGAIN => return error.WouldBlock,
+ .BADF => return error.NotOpenForWriting, // Usually a race condition.
+ .DESTADDRREQ => |err| return errnoBug(err), // `connect` was never called.
+ .DQUOT => return error.DiskQuota,
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .NOSPC => return error.NoSpaceLeft,
+ .PERM => return error.PermissionDenied,
+ .PIPE => return error.BrokenPipe,
+ .CONNRESET => |err| return errnoBug(err), // Not a socket handle.
+ .BUSY => return error.DeviceBusy,
+ .TXTBSY => return error.FileBusy,
+ .NXIO => return error.Unseekable,
+ .SPIPE => return error.Unseekable,
+ .OVERFLOW => return error.Unseekable,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
}
}
-fn fileWriteStreaming(userdata: ?*anyopaque, file: Io.File, buffer: [][]const u8) Io.File.WriteStreamingError!usize {
+fn writeFilePositionalWindows(
+ current_thread: *Thread,
+ handle: windows.HANDLE,
+ bytes: []const u8,
+ offset: u64,
+) File.WritePositionalError!usize {
+ try current_thread.checkCancel();
+
+ var bytes_written: windows.DWORD = undefined;
+ var overlapped: windows.OVERLAPPED = .{
+ .Internal = 0,
+ .InternalHigh = 0,
+ .DUMMYUNIONNAME = .{
+ .DUMMYSTRUCTNAME = .{
+ .Offset = @truncate(offset),
+ .OffsetHigh = @truncate(offset >> 32),
+ },
+ },
+ .hEvent = null,
+ };
+ const adjusted_len = std.math.lossyCast(u32, bytes.len);
+ if (windows.kernel32.WriteFile(handle, bytes.ptr, adjusted_len, &bytes_written, &overlapped) == 0) {
+ switch (windows.GetLastError()) {
+ .INVALID_USER_BUFFER => return error.SystemResources,
+ .NOT_ENOUGH_MEMORY => return error.SystemResources,
+ .OPERATION_ABORTED => return error.Canceled,
+ .NOT_ENOUGH_QUOTA => return error.SystemResources,
+ .NO_DATA => return error.BrokenPipe,
+ .INVALID_HANDLE => return error.NotOpenForWriting,
+ .LOCK_VIOLATION => return error.LockViolation,
+ .ACCESS_DENIED => return error.AccessDenied,
+ .WORKING_SET_QUOTA => return error.SystemResources,
+ else => |err| return windows.unexpectedError(err),
+ }
+ }
+ return bytes_written;
+}
+
+fn fileWriteStreaming(
+ userdata: ?*anyopaque,
+ file: File,
+ header: []const u8,
+ data: []const []const u8,
+ splat: usize,
+) File.Writer.Error!usize {
const t: *Threaded = @ptrCast(@alignCast(userdata));
- _ = t;
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ if (header.len != 0) {
+ return writeFileStreamingWindows(current_thread, file.handle, header);
+ }
+ for (data[0 .. data.len - 1]) |buf| {
+ if (buf.len == 0) continue;
+ return writeFileStreamingWindows(current_thread, file.handle, buf);
+ }
+ const pattern = data[data.len - 1];
+ if (pattern.len == 0 or splat == 0) return 0;
+ return writeFileStreamingWindows(current_thread, file.handle, pattern);
+ }
+
+ var iovecs: [max_iovecs_len]posix.iovec_const = undefined;
+ var iovlen: iovlen_t = 0;
+ addBuf(&iovecs, &iovlen, header);
+ for (data[0 .. data.len - 1]) |bytes| addBuf(&iovecs, &iovlen, bytes);
+ const pattern = data[data.len - 1];
+ if (iovecs.len - iovlen != 0) switch (splat) {
+ 0 => {},
+ 1 => addBuf(&iovecs, &iovlen, pattern),
+ else => switch (pattern.len) {
+ 0 => {},
+ 1 => {
+ var backup_buffer: [splat_buffer_size]u8 = undefined;
+ const splat_buffer = &backup_buffer;
+ const memset_len = @min(splat_buffer.len, splat);
+ const buf = splat_buffer[0..memset_len];
+ @memset(buf, pattern[0]);
+ addBuf(&iovecs, &iovlen, buf);
+ var remaining_splat = splat - buf.len;
+ while (remaining_splat > splat_buffer.len and iovecs.len - iovlen != 0) {
+ assert(buf.len == splat_buffer.len);
+ addBuf(&iovecs, &iovlen, splat_buffer);
+ remaining_splat -= splat_buffer.len;
+ }
+ addBuf(&iovecs, &iovlen, splat_buffer[0..@min(remaining_splat, splat_buffer.len)]);
+ },
+ else => for (0..@min(splat, iovecs.len - iovlen)) |_| {
+ addBuf(&iovecs, &iovlen, pattern);
+ },
+ },
+ };
+
+ if (iovlen == 0) return 0;
+
+ if (native_os == .wasi and !builtin.link_libc) {
+ var n_written: usize = undefined;
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (std.os.wasi.fd_write(file.handle, &iovecs, iovlen, &n_written)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return n_written;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .INVAL => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .AGAIN => |err| return errnoBug(err),
+ .BADF => return error.NotOpenForWriting, // can be a race condition.
+ .DESTADDRREQ => |err| return errnoBug(err), // `connect` was never called.
+ .DQUOT => return error.DiskQuota,
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .NOSPC => return error.NoSpaceLeft,
+ .PERM => return error.PermissionDenied,
+ .PIPE => return error.BrokenPipe,
+ .NOTCAPABLE => return error.AccessDenied,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+ }
+
+ try current_thread.beginSyscall();
while (true) {
- _ = file;
- _ = buffer;
- @panic("TODO implement fileWriteStreaming");
+ const rc = posix.system.writev(file.handle, &iovecs, @intCast(iovlen));
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ return @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ switch (e) {
+ .INVAL => |err| return errnoBug(err),
+ .FAULT => |err| return errnoBug(err),
+ .AGAIN => return error.WouldBlock,
+ .BADF => return error.NotOpenForWriting, // Can be a race condition.
+ .DESTADDRREQ => |err| return errnoBug(err), // `connect` was never called.
+ .DQUOT => return error.DiskQuota,
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .NOSPC => return error.NoSpaceLeft,
+ .PERM => return error.PermissionDenied,
+ .PIPE => return error.BrokenPipe,
+ .CONNRESET => |err| return errnoBug(err), // Not a socket handle.
+ .BUSY => return error.DeviceBusy,
+ else => |err| return posix.unexpectedErrno(err),
+ }
+ },
+ }
+ }
+}
+
+fn writeFileStreamingWindows(
+ current_thread: *Thread,
+ handle: windows.HANDLE,
+ bytes: []const u8,
+) File.Writer.Error!usize {
+ try current_thread.checkCancel();
+
+ var bytes_written: windows.DWORD = undefined;
+ const adjusted_len = std.math.lossyCast(u32, bytes.len);
+ if (windows.kernel32.WriteFile(handle, bytes.ptr, adjusted_len, &bytes_written, null) == 0) {
+ switch (windows.GetLastError()) {
+ .INVALID_USER_BUFFER => return error.SystemResources,
+ .NOT_ENOUGH_MEMORY => return error.SystemResources,
+ .OPERATION_ABORTED => return error.Canceled,
+ .NOT_ENOUGH_QUOTA => return error.SystemResources,
+ .NO_DATA => return error.BrokenPipe,
+ .INVALID_HANDLE => return error.NotOpenForWriting,
+ .LOCK_VIOLATION => return error.LockViolation,
+ .ACCESS_DENIED => return error.AccessDenied,
+ .WORKING_SET_QUOTA => return error.SystemResources,
+ else => |err| return windows.unexpectedError(err),
+ }
+ }
+ return bytes_written;
+}
+
+fn fileWriteFileStreaming(
+ userdata: ?*anyopaque,
+ file: File,
+ header: []const u8,
+ file_reader: *File.Reader,
+ limit: Io.Limit,
+) File.Writer.WriteFileError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const reader_buffered = file_reader.interface.buffered();
+ if (reader_buffered.len >= @intFromEnum(limit)) {
+ const n = try fileWriteStreaming(t, file, header, &.{limit.slice(reader_buffered)}, 1);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ }
+ const file_limit = @intFromEnum(limit) - reader_buffered.len;
+ const out_fd = file.handle;
+ const in_fd = file_reader.file.handle;
+
+ if (file_reader.size) |size| {
+ if (size - file_reader.pos == 0) {
+ if (reader_buffered.len != 0) {
+ const n = try fileWriteStreaming(t, file, header, &.{limit.slice(reader_buffered)}, 1);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ } else {
+ return error.EndOfStream;
+ }
+ }
+ }
+
+ if (native_os == .freebsd) sf: {
+ // Try using sendfile on FreeBSD.
+ if (@atomicLoad(UseSendfile, &t.use_sendfile, .monotonic) == .disabled) break :sf;
+ const offset = std.math.cast(std.c.off_t, file_reader.pos) orelse break :sf;
+ var hdtr_data: std.c.sf_hdtr = undefined;
+ var headers: [2]posix.iovec_const = undefined;
+ var headers_i: u8 = 0;
+ if (header.len != 0) {
+ headers[headers_i] = .{ .base = header.ptr, .len = header.len };
+ headers_i += 1;
+ }
+ if (reader_buffered.len != 0) {
+ headers[headers_i] = .{ .base = reader_buffered.ptr, .len = reader_buffered.len };
+ headers_i += 1;
+ }
+ const hdtr: ?*std.c.sf_hdtr = if (headers_i == 0) null else b: {
+ hdtr_data = .{
+ .headers = &headers,
+ .hdr_cnt = headers_i,
+ .trailers = null,
+ .trl_cnt = 0,
+ };
+ break :b &hdtr_data;
+ };
+ var sbytes: std.c.off_t = 0;
+ const nbytes: usize = @min(file_limit, std.math.maxInt(usize));
+ const flags = 0;
+
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(std.c.sendfile(in_fd, out_fd, offset, nbytes, hdtr, &sbytes, flags))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break;
+ },
+ .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseSendfile, &t.use_sendfile, .disabled, .monotonic);
+ return 0;
+ },
+ .INTR, .BUSY => {
+ if (sbytes == 0) {
+ try current_thread.checkCancel();
+ continue;
+ } else {
+ // Even if we are being canceled, there have been side
+ // effects, so it is better to report those side
+ // effects to the caller.
+ current_thread.endSyscall();
+ break;
+ }
+ },
+ .AGAIN => {
+ current_thread.endSyscall();
+ if (sbytes == 0) return error.WouldBlock;
+ break;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .NOTCONN => return error.BrokenPipe,
+ .IO => return error.InputOutput,
+ .PIPE => return error.BrokenPipe,
+ .NOBUFS => return error.SystemResources,
+ .BADF => |err| errnoBug(err),
+ .FAULT => |err| errnoBug(err),
+ else => |err| posix.unexpectedErrno(err),
+ });
+ // Give calling code chance to observe the error before trying
+ // something else.
+ @atomicStore(UseSendfile, &t.use_sendfile, .disabled, .monotonic);
+ return 0;
+ },
+ }
+ }
+ if (sbytes == 0) {
+ file_reader.size = file_reader.pos;
+ return error.EndOfStream;
+ }
+ const ubytes: usize = @intCast(sbytes);
+ file_reader.interface.toss(ubytes -| header.len);
+ return ubytes;
+ }
+
+ if (is_darwin) sf: {
+ // Try using sendfile on macOS.
+ if (@atomicLoad(UseSendfile, &t.use_sendfile, .monotonic) == .disabled) break :sf;
+ const offset = std.math.cast(std.c.off_t, file_reader.pos) orelse break :sf;
+ var hdtr_data: std.c.sf_hdtr = undefined;
+ var headers: [2]posix.iovec_const = undefined;
+ var headers_i: u8 = 0;
+ if (header.len != 0) {
+ headers[headers_i] = .{ .base = header.ptr, .len = header.len };
+ headers_i += 1;
+ }
+ if (reader_buffered.len != 0) {
+ headers[headers_i] = .{ .base = reader_buffered.ptr, .len = reader_buffered.len };
+ headers_i += 1;
+ }
+ const hdtr: ?*std.c.sf_hdtr = if (headers_i == 0) null else b: {
+ hdtr_data = .{
+ .headers = &headers,
+ .hdr_cnt = headers_i,
+ .trailers = null,
+ .trl_cnt = 0,
+ };
+ break :b &hdtr_data;
+ };
+ const max_count = std.math.maxInt(i32); // Avoid EINVAL.
+ var len: std.c.off_t = @min(file_limit, max_count);
+ const flags = 0;
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(std.c.sendfile(in_fd, out_fd, offset, &len, hdtr, flags))) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break;
+ },
+ .OPNOTSUPP, .NOTSOCK, .NOSYS => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseSendfile, &t.use_sendfile, .disabled, .monotonic);
+ return 0;
+ },
+ .INTR => {
+ if (len == 0) {
+ try current_thread.checkCancel();
+ continue;
+ } else {
+ // Even if we are being canceled, there have been side
+ // effects, so it is better to report those side
+ // effects to the caller.
+ current_thread.endSyscall();
+ break;
+ }
+ },
+ .AGAIN => {
+ current_thread.endSyscall();
+ if (len == 0) return error.WouldBlock;
+ break;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .NOTCONN => return error.BrokenPipe,
+ .IO => return error.InputOutput,
+ .PIPE => return error.BrokenPipe,
+ .BADF => |err| errnoBug(err),
+ .FAULT => |err| errnoBug(err),
+ .INVAL => |err| errnoBug(err),
+ else => |err| posix.unexpectedErrno(err),
+ });
+ // Give calling code chance to observe the error before trying
+ // something else.
+ @atomicStore(UseSendfile, &t.use_sendfile, .disabled, .monotonic);
+ return 0;
+ },
+ }
+ }
+ if (len == 0) {
+ file_reader.size = file_reader.pos;
+ return error.EndOfStream;
+ }
+ const u_len: usize = @bitCast(len);
+ file_reader.interface.toss(u_len -| header.len);
+ return u_len;
+ }
+
+ if (native_os == .linux) sf: {
+ // Try using sendfile on Linux.
+ if (@atomicLoad(UseSendfile, &t.use_sendfile, .monotonic) == .disabled) break :sf;
+ // Linux sendfile does not support headers.
+ if (header.len != 0 or reader_buffered.len != 0) {
+ const n = try fileWriteStreaming(t, file, header, &.{limit.slice(reader_buffered)}, 1);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ }
+ const max_count = 0x7ffff000; // Avoid EINVAL.
+ var off: std.os.linux.off_t = undefined;
+ const off_ptr: ?*std.os.linux.off_t, const count: usize = switch (file_reader.mode) {
+ .positional => o: {
+ const size = file_reader.getSize() catch return 0;
+ off = std.math.cast(std.os.linux.off_t, file_reader.pos) orelse return error.ReadFailed;
+ break :o .{ &off, @min(@intFromEnum(limit), size - file_reader.pos, max_count) };
+ },
+ .streaming => .{ null, limit.minInt(max_count) },
+ .streaming_simple, .positional_simple => break :sf,
+ .failure => return error.ReadFailed,
+ };
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.beginSyscall();
+ const n: usize = while (true) {
+ const rc = sendfile_sym(out_fd, in_fd, off_ptr, count);
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break @intCast(rc);
+ },
+ .NOSYS, .INVAL => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseSendfile, &t.use_sendfile, .disabled, .monotonic);
+ return 0;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
+ .AGAIN => return error.WouldBlock,
+ .IO => return error.InputOutput,
+ .PIPE => return error.BrokenPipe,
+ .NOMEM => return error.SystemResources,
+ .NXIO, .SPIPE => {
+ file_reader.mode = file_reader.mode.toStreaming();
+ const pos = file_reader.pos;
+ if (pos != 0) {
+ file_reader.pos = 0;
+ file_reader.seekBy(@intCast(pos)) catch {
+ file_reader.mode = .failure;
+ return error.ReadFailed;
+ };
+ }
+ return 0;
+ },
+ .BADF => |err| errnoBug(err), // Always a race condition.
+ .FAULT => |err| errnoBug(err), // Segmentation fault.
+ .OVERFLOW => |err| errnoBug(err), // We avoid passing too large of a `count`.
+ else => |err| posix.unexpectedErrno(err),
+ });
+ // Give calling code chance to observe the error before trying
+ // something else.
+ @atomicStore(UseSendfile, &t.use_sendfile, .disabled, .monotonic);
+ return 0;
+ },
+ }
+ };
+ if (n == 0) {
+ file_reader.size = file_reader.pos;
+ return error.EndOfStream;
+ }
+ file_reader.pos += n;
+ return n;
+ }
+
+ if (have_copy_file_range) cfr: {
+ if (@atomicLoad(UseCopyFileRange, &t.use_copy_file_range, .monotonic) == .disabled) break :cfr;
+ if (header.len != 0 or reader_buffered.len != 0) {
+ const n = try fileWriteStreaming(t, file, header, &.{limit.slice(reader_buffered)}, 1);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ }
+ var off_in: i64 = undefined;
+ const off_in_ptr: ?*i64 = switch (file_reader.mode) {
+ .positional_simple, .streaming_simple => return error.Unimplemented,
+ .positional => p: {
+ off_in = @intCast(file_reader.pos);
+ break :p &off_in;
+ },
+ .streaming => null,
+ .failure => return error.ReadFailed,
+ };
+ const current_thread = Thread.getCurrent(t);
+ const n: usize = switch (native_os) {
+ .linux => n: {
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = linux_copy_file_range_sys.copy_file_range(in_fd, off_in_ptr, out_fd, null, @intFromEnum(limit), 0);
+ switch (linux_copy_file_range_sys.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break :n @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .OPNOTSUPP, .INVAL, .NOSYS => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .OVERFLOW => |err| errnoBug(err), // We avoid passing too large a count.
+ .PERM => return error.PermissionDenied,
+ .BUSY => return error.DeviceBusy,
+ .TXTBSY => return error.FileBusy,
+ // copy_file_range can still work but not on
+ // this pair of file descriptors.
+ .XDEV => return error.Unimplemented,
+ .ISDIR => |err| errnoBug(err),
+ .BADF => |err| errnoBug(err),
+ else => |err| posix.unexpectedErrno(err),
+ });
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ }
+ }
+ },
+ .freebsd => n: {
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = std.c.copy_file_range(in_fd, off_in_ptr, out_fd, null, @intFromEnum(limit), 0);
+ switch (std.c.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break :n @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .OPNOTSUPP, .INVAL, .NOSYS => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .INTEGRITY => return error.CorruptedData,
+ .NOSPC => return error.NoSpaceLeft,
+ .ISDIR => |err| errnoBug(err),
+ .BADF => |err| errnoBug(err),
+ else => |err| posix.unexpectedErrno(err),
+ });
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ }
+ }
+ },
+ else => comptime unreachable,
+ };
+ if (n == 0) {
+ file_reader.size = file_reader.pos;
+ return error.EndOfStream;
+ }
+ file_reader.pos += n;
+ return n;
+ }
+
+ return error.Unimplemented;
+}
+
+fn netWriteFile(
+ userdata: ?*anyopaque,
+ socket_handle: net.Socket.Handle,
+ header: []const u8,
+ file_reader: *File.Reader,
+ limit: Io.Limit,
+) net.Stream.Writer.WriteFileError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ _ = t;
+ _ = socket_handle;
+ _ = header;
+ _ = file_reader;
+ _ = limit;
+ @panic("TODO implement netWriteFile");
+}
+
+fn netWriteFileUnavailable(
+ userdata: ?*anyopaque,
+ socket_handle: net.Socket.Handle,
+ header: []const u8,
+ file_reader: *File.Reader,
+ limit: Io.Limit,
+) net.Stream.Writer.WriteFileError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ _ = t;
+ _ = socket_handle;
+ _ = header;
+ _ = file_reader;
+ _ = limit;
+ return error.NetworkDown;
+}
+
+fn fileWriteFilePositional(
+ userdata: ?*anyopaque,
+ file: File,
+ header: []const u8,
+ file_reader: *File.Reader,
+ limit: Io.Limit,
+ offset: u64,
+) File.WriteFilePositionalError!usize {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const reader_buffered = file_reader.interface.buffered();
+ if (reader_buffered.len >= @intFromEnum(limit)) {
+ const n = try fileWritePositional(t, file, header, &.{limit.slice(reader_buffered)}, 1, offset);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ }
+ const out_fd = file.handle;
+ const in_fd = file_reader.file.handle;
+
+ if (file_reader.size) |size| {
+ if (size - file_reader.pos == 0) {
+ if (reader_buffered.len != 0) {
+ const n = try fileWritePositional(t, file, header, &.{limit.slice(reader_buffered)}, 1, offset);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ } else {
+ return error.EndOfStream;
+ }
+ }
+ }
+
+ if (have_copy_file_range) cfr: {
+ if (@atomicLoad(UseCopyFileRange, &t.use_copy_file_range, .monotonic) == .disabled) break :cfr;
+ if (header.len != 0 or reader_buffered.len != 0) {
+ const n = try fileWritePositional(t, file, header, &.{limit.slice(reader_buffered)}, 1, offset);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ }
+ var off_in: i64 = undefined;
+ const off_in_ptr: ?*i64 = switch (file_reader.mode) {
+ .positional_simple, .streaming_simple => return error.Unimplemented,
+ .positional => p: {
+ off_in = @intCast(file_reader.pos);
+ break :p &off_in;
+ },
+ .streaming => null,
+ .failure => return error.ReadFailed,
+ };
+ var off_out: i64 = @intCast(offset);
+ const current_thread = Thread.getCurrent(t);
+ const n: usize = switch (native_os) {
+ .linux => n: {
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = linux_copy_file_range_sys.copy_file_range(in_fd, off_in_ptr, out_fd, &off_out, @intFromEnum(limit), 0);
+ switch (linux_copy_file_range_sys.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break :n @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .OPNOTSUPP, .INVAL, .NOSYS => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .NOMEM => return error.SystemResources,
+ .NOSPC => return error.NoSpaceLeft,
+ .OVERFLOW => return error.Unseekable,
+ .NXIO => return error.Unseekable,
+ .SPIPE => return error.Unseekable,
+ .PERM => return error.PermissionDenied,
+ .TXTBSY => return error.FileBusy,
+ // copy_file_range can still work but not on
+ // this pair of file descriptors.
+ .XDEV => return error.Unimplemented,
+ .ISDIR => |err| errnoBug(err),
+ .BADF => |err| errnoBug(err),
+ else => |err| posix.unexpectedErrno(err),
+ });
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ }
+ }
+ },
+ .freebsd => n: {
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = std.c.copy_file_range(in_fd, off_in_ptr, out_fd, &off_out, @intFromEnum(limit), 0);
+ switch (std.c.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break :n @intCast(rc);
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .OPNOTSUPP, .INVAL, .NOSYS => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .FBIG => return error.FileTooBig,
+ .IO => return error.InputOutput,
+ .INTEGRITY => return error.CorruptedData,
+ .NOSPC => return error.NoSpaceLeft,
+ .OVERFLOW => return error.Unseekable,
+ .NXIO => return error.Unseekable,
+ .SPIPE => return error.Unseekable,
+ .ISDIR => |err| errnoBug(err),
+ .BADF => |err| errnoBug(err),
+ else => |err| posix.unexpectedErrno(err),
+ });
+ @atomicStore(UseCopyFileRange, &t.use_copy_file_range, .disabled, .monotonic);
+ return 0;
+ },
+ }
+ }
+ },
+ else => comptime unreachable,
+ };
+ if (n == 0) {
+ file_reader.size = file_reader.pos;
+ return error.EndOfStream;
+ }
+ file_reader.pos += n;
+ return n;
+ }
+
+ if (is_darwin) fcf: {
+ if (@atomicLoad(UseFcopyfile, &t.use_fcopyfile, .monotonic) == .disabled) break :fcf;
+ if (file_reader.pos != 0) break :fcf;
+ if (offset != 0) break :fcf;
+ if (limit != .unlimited) break :fcf;
+ const size = file_reader.getSize() catch break :fcf;
+ if (header.len != 0 or reader_buffered.len != 0) {
+ const n = try fileWritePositional(t, file, header, &.{limit.slice(reader_buffered)}, 1, offset);
+ file_reader.interface.toss(n -| header.len);
+ return n;
+ }
+ const current_thread = Thread.getCurrent(t);
+ try current_thread.beginSyscall();
+ while (true) {
+ const rc = std.c.fcopyfile(in_fd, out_fd, null, .{ .DATA = true });
+ switch (posix.errno(rc)) {
+ .SUCCESS => {
+ current_thread.endSyscall();
+ break;
+ },
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .OPNOTSUPP => {
+ // Give calling code chance to observe before trying
+ // something else.
+ current_thread.endSyscall();
+ @atomicStore(UseFcopyfile, &t.use_fcopyfile, .disabled, .monotonic);
+ return 0;
+ },
+ else => |e| {
+ current_thread.endSyscall();
+ assert(error.Unexpected == switch (e) {
+ .NOMEM => return error.SystemResources,
+ .INVAL => |err| errnoBug(err),
+ else => |err| posix.unexpectedErrno(err),
+ });
+ return 0;
+ },
+ }
+ }
+ file_reader.pos = size;
+ return size;
}
+
+ return error.Unimplemented;
}
fn nowPosix(userdata: ?*anyopaque, clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
@@ -3673,7 +8454,6 @@ fn sleepLinux(userdata: ?*anyopaque, timeout: Io.Timeout) Io.SleepError!void {
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3751,7 +8531,6 @@ fn sleepPosix(userdata: ?*anyopaque, timeout: Io.Timeout) Io.SleepError!void {
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
// This prong handles success as well as unexpected errors.
else => return current_thread.endSyscall(),
}
@@ -3825,7 +8604,6 @@ fn netListenIpPosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -3989,7 +8767,6 @@ fn netListenUnixPosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4113,7 +8890,6 @@ fn posixBindUnix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4158,7 +8934,6 @@ fn posixBind(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4194,7 +8969,6 @@ fn posixConnect(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4241,7 +9015,6 @@ fn posixConnectUnix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4286,7 +9059,6 @@ fn posixGetSockName(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4354,7 +9126,6 @@ fn setSocketOption(current_thread: *Thread, fd: posix.fd_t, level: i32, opt_name
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4684,7 +9455,6 @@ fn openSocketPosix(
switch (posix.errno(posix.system.fcntl(fd, posix.F.SETFD, @as(usize, posix.FD_CLOEXEC)))) {
.SUCCESS => break,
.INTR => continue,
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |err| {
current_thread.endSyscall();
return posix.unexpectedErrno(err);
@@ -4698,7 +9468,6 @@ fn openSocketPosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4800,7 +9569,6 @@ fn netAcceptPosix(userdata: ?*anyopaque, listen_fd: net.Socket.Handle) net.Serve
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4909,7 +9677,6 @@ fn netReadPosix(userdata: ?*anyopaque, fd: net.Socket.Handle, data: [][]u8) net.
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -4942,7 +9709,6 @@ fn netReadPosix(userdata: ?*anyopaque, fd: net.Socket.Handle, data: [][]u8) net.
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -5178,7 +9944,6 @@ fn netSendOne(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -5255,7 +10020,6 @@ fn netSendMany(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -5388,7 +10152,6 @@ fn netReceivePosix(
continue :recv;
},
.INTR => continue,
- .CANCELED => return .{ current_thread.endSyscallCanceled(), message_i },
.FAULT => |err| return .{ errnoBug(err), message_i },
.INVAL => |err| return .{ errnoBug(err), message_i },
@@ -5397,7 +10160,6 @@ fn netReceivePosix(
}
},
.INTR => continue,
- .CANCELED => return .{ current_thread.endSyscallCanceled(), message_i },
.BADF => |err| return .{ errnoBug(err), message_i },
.NFILE => return .{ error.SystemFdQuotaExceeded, message_i },
@@ -5496,7 +10258,7 @@ fn netWritePosix(
addBuf(&iovecs, &msg.iovlen, splat_buffer);
remaining_splat -= splat_buffer.len;
}
- addBuf(&iovecs, &msg.iovlen, splat_buffer[0..remaining_splat]);
+ addBuf(&iovecs, &msg.iovlen, splat_buffer[0..@min(remaining_splat, splat_buffer.len)]);
},
else => for (0..@min(splat, iovecs.len - msg.iovlen)) |_| {
addBuf(&iovecs, &msg.iovlen, pattern);
@@ -5504,6 +10266,7 @@ fn netWritePosix(
},
};
const flags = posix.MSG.NOSIGNAL;
+
try current_thread.beginSyscall();
while (true) {
const rc = posix.system.sendmsg(fd, &msg, flags);
@@ -5516,7 +10279,6 @@ fn netWritePosix(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -5580,7 +10342,7 @@ fn netWriteWindows(
addWsaBuf(&iovecs, &len, splat_buffer);
remaining_splat -= splat_buffer.len;
}
- addWsaBuf(&iovecs, &len, splat_buffer[0..remaining_splat]);
+ addWsaBuf(&iovecs, &len, splat_buffer[0..@min(remaining_splat, splat_buffer.len)]);
},
else => for (0..@min(splat, iovecs.len - len)) |_| {
addWsaBuf(&iovecs, &len, pattern);
@@ -5614,8 +10376,7 @@ fn netWriteWindows(
else => |err| err,
};
switch (wsa_error) {
- .EINTR => continue,
- .ECANCELLED, .E_CANCELLED, .OPERATION_ABORTED => return current_thread.endSyscallCanceled(),
+ .EINTR, .ECANCELLED, .E_CANCELLED, .OPERATION_ABORTED => continue,
.NOTINITIALISED => {
try initializeWsa(t);
continue;
@@ -5667,7 +10428,14 @@ fn netWriteUnavailable(
return error.NetworkDown;
}
-fn addBuf(v: []posix.iovec_const, i: *@FieldType(posix.msghdr_const, "iovlen"), bytes: []const u8) void {
+/// This is either usize or u32. Since, either is fine, let's use the same
+/// `addBuf` function for both writing to a file and sending network messages.
+const iovlen_t = switch (native_os) {
+ .wasi => u32,
+ else => @FieldType(posix.msghdr_const, "iovlen"),
+};
+
+fn addBuf(v: []posix.iovec_const, i: *iovlen_t, bytes: []const u8) void {
// OS checks ptr addr before length so zero length vectors must be omitted.
if (bytes.len == 0) return;
if (v.len - i.* == 0) return;
@@ -5675,18 +10443,18 @@ fn addBuf(v: []posix.iovec_const, i: *@FieldType(posix.msghdr_const, "iovlen"),
i.* += 1;
}
-fn netClose(userdata: ?*anyopaque, handle: net.Socket.Handle) void {
+fn netClose(userdata: ?*anyopaque, handles: []const net.Socket.Handle) void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
_ = t;
switch (native_os) {
- .windows => closeSocketWindows(handle),
- else => posix.close(handle),
+ .windows => for (handles) |handle| closeSocketWindows(handle),
+ else => for (handles) |handle| posix.close(handle),
}
}
-fn netCloseUnavailable(userdata: ?*anyopaque, handle: net.Socket.Handle) void {
+fn netCloseUnavailable(userdata: ?*anyopaque, handles: []const net.Socket.Handle) void {
_ = userdata;
- _ = handle;
+ _ = handles;
unreachable; // How you gonna close something that was impossible to open?
}
@@ -5727,7 +10495,6 @@ fn netInterfaceNameResolve(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
switch (e) {
@@ -5993,7 +10760,7 @@ fn netLookupFallible(
// TODO use dnsres_getaddrinfo
}
- if (native_os.isDarwin()) {
+ if (is_darwin) {
// TODO use CFHostStartInfoResolution / CFHostCancelInfoResolution
}
@@ -6031,7 +10798,6 @@ fn netLookupFallible(
try current_thread.checkCancel();
continue;
},
- .CANCELED => return current_thread.endSyscallCanceled(),
else => |e| {
current_thread.endSyscall();
return posix.unexpectedErrno(e);
@@ -6078,6 +10844,140 @@ fn netLookupFallible(
return error.OptionUnsupported;
}
+fn lockStderr(
+ userdata: ?*anyopaque,
+ buffer: []u8,
+ terminal_mode: ?Io.Terminal.Mode,
+) Io.Cancelable!Io.LockedStderr {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ // Only global mutex since this is Threaded.
+ std.process.stderr_thread_mutex.lock();
+ return initLockedStderr(t, buffer, terminal_mode);
+}
+
+fn tryLockStderr(
+ userdata: ?*anyopaque,
+ buffer: []u8,
+ terminal_mode: ?Io.Terminal.Mode,
+) Io.Cancelable!?Io.LockedStderr {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ // Only global mutex since this is Threaded.
+ if (!std.process.stderr_thread_mutex.tryLock()) return null;
+ return try initLockedStderr(t, buffer, terminal_mode);
+}
+
+fn initLockedStderr(
+ t: *Threaded,
+ buffer: []u8,
+ terminal_mode: ?Io.Terminal.Mode,
+) Io.Cancelable!Io.LockedStderr {
+ if (!t.stderr_writer_initialized) {
+ const io_t = ioBasic(t);
+ if (is_windows) t.stderr_writer.file = .stderr();
+ t.stderr_writer.io = io_t;
+ t.stderr_writer_initialized = true;
+ t.scanEnviron();
+ const NO_COLOR = t.environ.exist.NO_COLOR;
+ const CLICOLOR_FORCE = t.environ.exist.CLICOLOR_FORCE;
+ t.stderr_mode = terminal_mode orelse try .detect(io_t, t.stderr_writer.file, NO_COLOR, CLICOLOR_FORCE);
+ }
+ std.Progress.clearWrittenWithEscapeCodes(&t.stderr_writer) catch |err| switch (err) {
+ error.WriteFailed => switch (t.stderr_writer.err.?) {
+ error.Canceled => |e| return e,
+ else => {},
+ },
+ };
+ t.stderr_writer.interface.flush() catch |err| switch (err) {
+ error.WriteFailed => switch (t.stderr_writer.err.?) {
+ error.Canceled => |e| return e,
+ else => {},
+ },
+ };
+ t.stderr_writer.interface.buffer = buffer;
+ return .{
+ .file_writer = &t.stderr_writer,
+ .terminal_mode = terminal_mode orelse t.stderr_mode,
+ };
+}
+
+fn unlockStderr(userdata: ?*anyopaque) void {
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ t.stderr_writer.interface.flush() catch |err| switch (err) {
+ error.WriteFailed => switch (t.stderr_writer.err.?) {
+ error.Canceled => recancel(t),
+ else => {},
+ },
+ };
+ t.stderr_writer.interface.end = 0;
+ t.stderr_writer.interface.buffer = &.{};
+ std.process.stderr_thread_mutex.unlock();
+}
+
+fn processSetCurrentDir(userdata: ?*anyopaque, dir: Dir) std.process.SetCurrentDirError!void {
+ if (native_os == .wasi) return error.OperationUnsupported;
+ const t: *Threaded = @ptrCast(@alignCast(userdata));
+ const current_thread = Thread.getCurrent(t);
+
+ if (is_windows) {
+ try current_thread.checkCancel();
+ var dir_path_buffer: [windows.PATH_MAX_WIDE]u16 = undefined;
+ // TODO move GetFinalPathNameByHandle logic into std.Io.Threaded and add cancel checks
+ const dir_path = try windows.GetFinalPathNameByHandle(dir.handle, .{}, &dir_path_buffer);
+ const path_len_bytes = std.math.cast(u16, dir_path.len * 2) orelse return error.NameTooLong;
+ try current_thread.checkCancel();
+ var nt_name: windows.UNICODE_STRING = .{
+ .Length = path_len_bytes,
+ .MaximumLength = path_len_bytes,
+ .Buffer = @constCast(dir_path.ptr),
+ };
+ switch (windows.ntdll.RtlSetCurrentDirectory_U(&nt_name)) {
+ .SUCCESS => return,
+ .OBJECT_NAME_INVALID => return error.BadPathName,
+ .OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
+ .OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
+ .NO_MEDIA_IN_DEVICE => return error.NoDevice,
+ .INVALID_PARAMETER => |err| return windows.statusBug(err),
+ .ACCESS_DENIED => return error.AccessDenied,
+ .OBJECT_PATH_SYNTAX_BAD => |err| return windows.statusBug(err),
+ .NOT_A_DIRECTORY => return error.NotDir,
+ else => |status| return windows.unexpectedStatus(status),
+ }
+ }
+
+ if (dir.handle == posix.AT.FDCWD) return;
+
+ try current_thread.beginSyscall();
+ while (true) {
+ switch (posix.errno(posix.system.fchdir(dir.handle))) {
+ .SUCCESS => return current_thread.endSyscall(),
+ .INTR => {
+ try current_thread.checkCancel();
+ continue;
+ },
+ .ACCES => {
+ current_thread.endSyscall();
+ return error.AccessDenied;
+ },
+ .BADF => |err| {
+ current_thread.endSyscall();
+ return errnoBug(err);
+ },
+ .NOTDIR => {
+ current_thread.endSyscall();
+ return error.NotDir;
+ },
+ .IO => {
+ current_thread.endSyscall();
+ return error.FileSystem;
+ },
+ else => |err| {
+ current_thread.endSyscall();
+ return posix.unexpectedErrno(err);
+ },
+ }
+ }
+}
+
pub const PosixAddress = extern union {
any: posix.sockaddr,
in: posix.sockaddr.in,
@@ -6269,14 +11169,30 @@ fn clockToWasi(clock: Io.Clock) std.os.wasi.clockid_t {
};
}
-fn statFromLinux(stx: *const std.os.linux.Statx) Io.File.Stat {
+const linux_statx_mask: std.os.linux.STATX = .{
+ .TYPE = true,
+ .MODE = true,
+ .ATIME = true,
+ .MTIME = true,
+ .CTIME = true,
+ .INO = true,
+ .SIZE = true,
+ .NLINK = true,
+};
+
+fn statFromLinux(stx: *const std.os.linux.Statx) Io.UnexpectedError!File.Stat {
+ const actual_mask_int: u32 = @bitCast(stx.mask);
+ const wanted_mask_int: u32 = @bitCast(linux_statx_mask);
+ if ((actual_mask_int | wanted_mask_int) != actual_mask_int) return error.Unexpected;
+
const atime = stx.atime;
const mtime = stx.mtime;
const ctime = stx.ctime;
return .{
.inode = stx.ino,
+ .nlink = stx.nlink,
.size = stx.size,
- .mode = stx.mode,
+ .permissions = .fromMode(stx.mode),
.kind = switch (stx.mode & std.os.linux.S.IFMT) {
std.os.linux.S.IFDIR => .directory,
std.os.linux.S.IFCHR => .character_device,
@@ -6293,14 +11209,15 @@ fn statFromLinux(stx: *const std.os.linux.Statx) Io.File.Stat {
};
}
-fn statFromPosix(st: *const posix.Stat) Io.File.Stat {
+fn statFromPosix(st: *const posix.Stat) File.Stat {
const atime = st.atime();
const mtime = st.mtime();
const ctime = st.ctime();
return .{
.inode = st.ino,
+ .nlink = st.nlink,
.size = @bitCast(st.size),
- .mode = st.mode,
+ .permissions = .fromMode(st.mode),
.kind = k: {
const m = st.mode & posix.S.IFMT;
switch (m) {
@@ -6327,11 +11244,12 @@ fn statFromPosix(st: *const posix.Stat) Io.File.Stat {
};
}
-fn statFromWasi(st: *const std.os.wasi.filestat_t) Io.File.Stat {
+fn statFromWasi(st: *const std.os.wasi.filestat_t) File.Stat {
return .{
.inode = st.ino,
+ .nlink = st.nlink,
.size = @bitCast(st.size),
- .mode = 0,
+ .permissions = .default_file,
.kind = switch (st.filetype) {
.BLOCK_DEVICE => .block_device,
.CHARACTER_DEVICE => .character_device,
@@ -6352,13 +11270,20 @@ fn timestampFromPosix(timespec: *const posix.timespec) Io.Timestamp {
}
fn timestampToPosix(nanoseconds: i96) posix.timespec {
+ if (builtin.zig_backend == .stage2_wasm) {
+ // Workaround for https://codeberg.org/ziglang/zig/issues/30575
+ return .{
+ .sec = @intCast(@divTrunc(nanoseconds, std.time.ns_per_s)),
+ .nsec = @intCast(@rem(nanoseconds, std.time.ns_per_s)),
+ };
+ }
return .{
.sec = @intCast(@divFloor(nanoseconds, std.time.ns_per_s)),
.nsec = @intCast(@mod(nanoseconds, std.time.ns_per_s)),
};
}
-fn pathToPosix(file_path: []const u8, buffer: *[posix.PATH_MAX]u8) Io.Dir.PathNameError![:0]u8 {
+fn pathToPosix(file_path: []const u8, buffer: *[posix.PATH_MAX]u8) Dir.PathNameError![:0]u8 {
if (std.mem.containsAtLeastScalar2(u8, file_path, 0, 1)) return error.BadPathName;
// >= rather than > to make room for the null byte
if (file_path.len >= buffer.len) return error.NameTooLong;
@@ -6605,7 +11530,7 @@ fn lookupHosts(
options: HostName.LookupOptions,
) !void {
const t_io = io(t);
- const file = Io.File.openAbsolute(t_io, "/etc/hosts", .{}) catch |err| switch (err) {
+ const file = Dir.openFileAbsolute(t_io, "/etc/hosts", .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.AccessDenied,
@@ -6809,6 +11734,506 @@ fn initializeWsa(t: *Threaded) error{ NetworkDown, Canceled }!void {
fn doNothingSignalHandler(_: posix.SIG) callconv(.c) void {}
+const pthreads_futex = struct {
+ const c = std.c;
+ const atomic = std.atomic;
+
+ const Event = struct {
+ cond: c.pthread_cond_t,
+ mutex: c.pthread_mutex_t,
+ state: enum { empty, waiting, notified },
+
+ fn init(self: *Event) void {
+ // Use static init instead of pthread_cond/mutex_init() since this is generally faster.
+ self.cond = .{};
+ self.mutex = .{};
+ self.state = .empty;
+ }
+
+ fn deinit(self: *Event) void {
+ // Some platforms reportedly give EINVAL for statically initialized pthread types.
+ const rc = c.pthread_cond_destroy(&self.cond);
+ assert(rc == .SUCCESS or rc == .INVAL);
+
+ const rm = c.pthread_mutex_destroy(&self.mutex);
+ assert(rm == .SUCCESS or rm == .INVAL);
+
+ self.* = undefined;
+ }
+
+ fn wait(self: *Event, timeout: ?u64) error{Timeout}!void {
+ assert(c.pthread_mutex_lock(&self.mutex) == .SUCCESS);
+ defer assert(c.pthread_mutex_unlock(&self.mutex) == .SUCCESS);
+
+ // Early return if the event was already set.
+ if (self.state == .notified) {
+ return;
+ }
+
+ // Compute the absolute timeout if one was specified.
+ // POSIX requires that REALTIME is used by default for the pthread timedwait functions.
+ // This can be changed with pthread_condattr_setclock, but it's an extension and may not be available everywhere.
+ var ts: c.timespec = undefined;
+ if (timeout) |timeout_ns| {
+ ts = std.posix.clock_gettime(c.CLOCK.REALTIME) catch return error.Timeout;
+ ts.sec +|= @as(@TypeOf(ts.sec), @intCast(timeout_ns / std.time.ns_per_s));
+ ts.nsec += @as(@TypeOf(ts.nsec), @intCast(timeout_ns % std.time.ns_per_s));
+
+ if (ts.nsec >= std.time.ns_per_s) {
+ ts.sec +|= 1;
+ ts.nsec -= std.time.ns_per_s;
+ }
+ }
+
+ // Start waiting on the event - there can be only one thread waiting.
+ assert(self.state == .empty);
+ self.state = .waiting;
+
+ while (true) {
+ // Block using either pthread_cond_wait or pthread_cond_timewait if there's an absolute timeout.
+ const rc = blk: {
+ if (timeout == null) break :blk c.pthread_cond_wait(&self.cond, &self.mutex);
+ break :blk c.pthread_cond_timedwait(&self.cond, &self.mutex, &ts);
+ };
+
+ // After waking up, check if the event was set.
+ if (self.state == .notified) {
+ return;
+ }
+
+ assert(self.state == .waiting);
+ switch (rc) {
+ .SUCCESS => {},
+ .TIMEDOUT => {
+ // If timed out, reset the event to avoid the set() thread doing an unnecessary signal().
+ self.state = .empty;
+ return error.Timeout;
+ },
+ .INVAL => recoverableOsBugDetected(), // cond, mutex, and potentially ts should all be valid
+ .PERM => recoverableOsBugDetected(), // mutex is locked when cond_*wait() functions are called
+ else => recoverableOsBugDetected(),
+ }
+ }
+ }
+
+ fn set(self: *Event) void {
+ assert(c.pthread_mutex_lock(&self.mutex) == .SUCCESS);
+ defer assert(c.pthread_mutex_unlock(&self.mutex) == .SUCCESS);
+
+ // Make sure that multiple calls to set() were not done on the same Event.
+ const old_state = self.state;
+ assert(old_state != .notified);
+
+ // Mark the event as set and wake up the waiting thread if there was one.
+ // This must be done while the mutex as the wait() thread could deallocate
+ // the condition variable once it observes the new state, potentially causing a UAF if done unlocked.
+ self.state = .notified;
+ if (old_state == .waiting) {
+ assert(c.pthread_cond_signal(&self.cond) == .SUCCESS);
+ }
+ }
+ };
+
+ const Treap = std.Treap(usize, std.math.order);
+ const Waiter = struct {
+ node: Treap.Node,
+ prev: ?*Waiter,
+ next: ?*Waiter,
+ tail: ?*Waiter,
+ is_queued: bool,
+ event: Event,
+ };
+
+ // An unordered set of Waiters
+ const WaitList = struct {
+ top: ?*Waiter = null,
+ len: usize = 0,
+
+ fn push(self: *WaitList, waiter: *Waiter) void {
+ waiter.next = self.top;
+ self.top = waiter;
+ self.len += 1;
+ }
+
+ fn pop(self: *WaitList) ?*Waiter {
+ const waiter = self.top orelse return null;
+ self.top = waiter.next;
+ self.len -= 1;
+ return waiter;
+ }
+ };
+
+ const WaitQueue = struct {
+ fn insert(treap: *Treap, address: usize, waiter: *Waiter) void {
+ // prepare the waiter to be inserted.
+ waiter.next = null;
+ waiter.is_queued = true;
+
+ // Find the wait queue entry associated with the address.
+ // If there isn't a wait queue on the address, this waiter creates the queue.
+ var entry = treap.getEntryFor(address);
+ const entry_node = entry.node orelse {
+ waiter.prev = null;
+ waiter.tail = waiter;
+ entry.set(&waiter.node);
+ return;
+ };
+
+ // There's a wait queue on the address; get the queue head and tail.
+ const head: *Waiter = @fieldParentPtr("node", entry_node);
+ const tail = head.tail orelse unreachable;
+
+ // Push the waiter to the tail by replacing it and linking to the previous tail.
+ head.tail = waiter;
+ tail.next = waiter;
+ waiter.prev = tail;
+ }
+
+ fn remove(treap: *Treap, address: usize, max_waiters: usize) WaitList {
+ // Find the wait queue associated with this address and get the head/tail if any.
+ var entry = treap.getEntryFor(address);
+ var queue_head: ?*Waiter = if (entry.node) |node| @fieldParentPtr("node", node) else null;
+ const queue_tail = if (queue_head) |head| head.tail else null;
+
+ // Once we're done updating the head, fix it's tail pointer and update the treap's queue head as well.
+ defer entry.set(blk: {
+ const new_head = queue_head orelse break :blk null;
+ new_head.tail = queue_tail;
+ break :blk &new_head.node;
+ });
+
+ var removed = WaitList{};
+ while (removed.len < max_waiters) {
+ // dequeue and collect waiters from their wait queue.
+ const waiter = queue_head orelse break;
+ queue_head = waiter.next;
+ removed.push(waiter);
+
+ // When dequeueing, we must mark is_queued as false.
+ // This ensures that a waiter which calls tryRemove() returns false.
+ assert(waiter.is_queued);
+ waiter.is_queued = false;
+ }
+
+ return removed;
+ }
+
+ fn tryRemove(treap: *Treap, address: usize, waiter: *Waiter) bool {
+ if (!waiter.is_queued) {
+ return false;
+ }
+
+ queue_remove: {
+ // Find the wait queue associated with the address.
+ var entry = blk: {
+ // A waiter without a previous link means it's the queue head that's in the treap so we can avoid lookup.
+ if (waiter.prev == null) {
+ assert(waiter.node.key == address);
+ break :blk treap.getEntryForExisting(&waiter.node);
+ }
+ break :blk treap.getEntryFor(address);
+ };
+
+ // The queue head and tail must exist if we're removing a queued waiter.
+ const head: *Waiter = @fieldParentPtr("node", entry.node orelse unreachable);
+ const tail = head.tail orelse unreachable;
+
+ // A waiter with a previous link is never the head of the queue.
+ if (waiter.prev) |prev| {
+ assert(waiter != head);
+ prev.next = waiter.next;
+
+ // A waiter with both a previous and next link is in the middle.
+ // We only need to update the surrounding waiter's links to remove it.
+ if (waiter.next) |next| {
+ assert(waiter != tail);
+ next.prev = waiter.prev;
+ break :queue_remove;
+ }
+
+ // A waiter with a previous but no next link means it's the tail of the queue.
+ // In that case, we need to update the head's tail reference.
+ assert(waiter == tail);
+ head.tail = waiter.prev;
+ break :queue_remove;
+ }
+
+ // A waiter with no previous link means it's the queue head of queue.
+ // We must replace (or remove) the head waiter reference in the treap.
+ assert(waiter == head);
+ entry.set(blk: {
+ const new_head = waiter.next orelse break :blk null;
+ new_head.tail = head.tail;
+ break :blk &new_head.node;
+ });
+ }
+
+ // Mark the waiter as successfully removed.
+ waiter.is_queued = false;
+ return true;
+ }
+ };
+
+ const Bucket = struct {
+ mutex: c.pthread_mutex_t align(atomic.cache_line) = .{},
+ pending: atomic.Value(usize) = atomic.Value(usize).init(0),
+ treap: Treap = .{},
+
+ // Global array of buckets that addresses map to.
+ // Bucket array size is pretty much arbitrary here, but it must be a power of two for fibonacci hashing.
+ var buckets = [_]Bucket{.{}} ** @bitSizeOf(usize);
+
+ // https://github.com/Amanieu/parking_lot/blob/1cf12744d097233316afa6c8b7d37389e4211756/core/src/parking_lot.rs#L343-L353
+ fn from(address: usize) *Bucket {
+ // The upper `@bitSizeOf(usize)` bits of the fibonacci golden ratio.
+ // Hashing this via (h * k) >> (64 - b) where k=golden-ration and b=bitsize-of-array
+ // evenly lays out h=hash values over the bit range even when the hash has poor entropy (identity-hash for pointers).
+ const max_multiplier_bits = @bitSizeOf(usize);
+ const fibonacci_multiplier = 0x9E3779B97F4A7C15 >> (64 - max_multiplier_bits);
+
+ const max_bucket_bits = @ctz(buckets.len);
+ comptime assert(std.math.isPowerOfTwo(buckets.len));
+
+ const index = (address *% fibonacci_multiplier) >> (max_multiplier_bits - max_bucket_bits);
+ return &buckets[index];
+ }
+ };
+
+ const Address = struct {
+ fn from(ptr: *const u32) usize {
+ // Get the alignment of the pointer.
+ const alignment = @alignOf(atomic.Value(u32));
+ comptime assert(std.math.isPowerOfTwo(alignment));
+
+ // Make sure the pointer is aligned,
+ // then cut off the zero bits from the alignment to get the unique address.
+ const addr = @intFromPtr(ptr);
+ assert(addr & (alignment - 1) == 0);
+ return addr >> @ctz(@as(usize, alignment));
+ }
+ };
+
+ fn wait(ptr: *const u32, expect: u32, timeout: ?u64) error{Timeout}!void {
+ const address = Address.from(ptr);
+ const bucket = Bucket.from(address);
+
+ // Announce that there's a waiter in the bucket before checking the ptr/expect condition.
+ // If the announcement is reordered after the ptr check, the waiter could deadlock:
+ //
+ // - T1: checks ptr == expect which is true
+ // - T2: updates ptr to != expect
+ // - T2: does Futex.wake(), sees no pending waiters, exits
+ // - T1: bumps pending waiters (was reordered after the ptr == expect check)
+ // - T1: goes to sleep and misses both the ptr change and T2's wake up
+ //
+ // acquire barrier to ensure the announcement happens before the ptr check below.
+ var pending = bucket.pending.fetchAdd(1, .acquire);
+ assert(pending < std.math.maxInt(usize));
+
+ // If the wait gets canceled, remove the pending count we previously added.
+ // This is done outside the mutex lock to keep the critical section short in case of contention.
+ var canceled = false;
+ defer if (canceled) {
+ pending = bucket.pending.fetchSub(1, .monotonic);
+ assert(pending > 0);
+ };
+
+ var waiter: Waiter = undefined;
+ {
+ assert(c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
+ defer assert(c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
+
+ canceled = @atomicLoad(u32, ptr, .monotonic) != expect;
+ if (canceled) {
+ return;
+ }
+
+ waiter.event.init();
+ WaitQueue.insert(&bucket.treap, address, &waiter);
+ }
+
+ defer {
+ assert(!waiter.is_queued);
+ waiter.event.deinit();
+ }
+
+ waiter.event.wait(timeout) catch {
+ // If we fail to cancel after a timeout, it means a wake() thread
+ // dequeued us and will wake us up. We must wait until the event is
+ // set as that's a signal that the wake() thread won't access the
+ // waiter memory anymore. If we return early without waiting, the
+ // waiter on the stack would be invalidated and the wake() thread
+ // risks a UAF.
+ defer if (!canceled) waiter.event.wait(null) catch unreachable;
+
+ assert(c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
+ defer assert(c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
+
+ canceled = WaitQueue.tryRemove(&bucket.treap, address, &waiter);
+ if (canceled) {
+ return error.Timeout;
+ }
+ };
+ }
+
+ fn wake(ptr: *const u32, max_waiters: u32) void {
+ const address = Address.from(ptr);
+ const bucket = Bucket.from(address);
+
+ // Quick check if there's even anything to wake up.
+ // The change to the ptr's value must happen before we check for pending waiters.
+ // If not, the wake() thread could miss a sleeping waiter and have it deadlock:
+ //
+ // - T2: p = has pending waiters (reordered before the ptr update)
+ // - T1: bump pending waiters
+ // - T1: if ptr == expected: sleep()
+ // - T2: update ptr != expected
+ // - T2: p is false from earlier so doesn't wake (T1 missed ptr update and T2 missed T1 sleeping)
+ //
+ // What we really want here is a Release load, but that doesn't exist under the C11 memory model.
+ // We could instead do `bucket.pending.fetchAdd(0, Release) == 0` which achieves effectively the same thing,
+ // LLVM lowers the fetchAdd(0, .release) into an mfence+load which avoids gaining ownership of the cache-line.
+ if (bucket.pending.fetchAdd(0, .release) == 0) {
+ return;
+ }
+
+ // Keep a list of all the waiters notified and wake then up outside the mutex critical section.
+ var notified = WaitList{};
+ defer if (notified.len > 0) {
+ const pending = bucket.pending.fetchSub(notified.len, .monotonic);
+ assert(pending >= notified.len);
+
+ while (notified.pop()) |waiter| {
+ assert(!waiter.is_queued);
+ waiter.event.set();
+ }
+ };
+
+ assert(c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
+ defer assert(c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
+
+ // Another pending check again to avoid the WaitQueue lookup if not necessary.
+ if (bucket.pending.load(.monotonic) > 0) {
+ notified = WaitQueue.remove(&bucket.treap, address, max_waiters);
+ }
+ }
+};
+
+fn scanEnviron(t: *Threaded) void {
+ t.mutex.lock();
+ defer t.mutex.unlock();
+
+ if (t.environ.initialized) return;
+ t.environ.initialized = true;
+
+ if (is_windows) {
+ const ptr = windows.peb().ProcessParameters.Environment;
+
+ var i: usize = 0;
+ while (ptr[i] != 0) {
+ const key_start = i;
+
+ // There are some special environment variables that start with =,
+ // so we need a special case to not treat = as a key/value separator
+ // if it's the first character.
+ // https://devblogs.microsoft.com/oldnewthing/20100506-00/?p=14133
+ if (ptr[key_start] == '=') i += 1;
+
+ while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {}
+ const key_w = ptr[key_start..i];
+ if (std.mem.eql(u16, key_w, &.{ 'N', 'O', '_', 'C', 'O', 'L', 'O', 'R' })) {
+ t.environ.exist.NO_COLOR = true;
+ } else if (std.mem.eql(u16, key_w, &.{ 'C', 'L', 'I', 'C', 'O', 'L', 'O', 'R', '_', 'F', 'O', 'R', 'C', 'E' })) {
+ t.environ.exist.CLICOLOR_FORCE = true;
+ }
+ comptime assert(@sizeOf(Environ.String) == 0);
+
+ while (ptr[i] != 0) : (i += 1) {} // skip over '=' and value
+ i += 1; // skip over null byte
+ }
+ } else if (native_os == .wasi and !builtin.link_libc) {
+ var environ_count: usize = undefined;
+ var environ_buf_size: usize = undefined;
+
+ switch (std.os.wasi.environ_sizes_get(&environ_count, &environ_buf_size)) {
+ .SUCCESS => {},
+ else => |err| {
+ t.environ.err = posix.unexpectedErrno(err);
+ return;
+ },
+ }
+ if (environ_count == 0) return;
+
+ const environ = t.allocator.alloc([*:0]u8, environ_count) catch |err| {
+ t.environ.err = err;
+ return;
+ };
+ defer t.allocator.free(environ);
+ const environ_buf = t.allocator.alloc(u8, environ_buf_size) catch |err| {
+ t.environ.err = err;
+ return;
+ };
+ defer t.allocator.free(environ_buf);
+
+ switch (std.os.wasi.environ_get(environ.ptr, environ_buf.ptr)) {
+ .SUCCESS => {},
+ else => |err| {
+ t.environ.err = posix.unexpectedErrno(err);
+ return;
+ },
+ }
+
+ for (environ) |env| {
+ const pair = std.mem.sliceTo(env, 0);
+ var parts = std.mem.splitScalar(u8, pair, '=');
+ const key = parts.first();
+ if (std.mem.eql(u8, key, "NO_COLOR")) {
+ t.environ.exist.NO_COLOR = true;
+ } else if (std.mem.eql(u8, key, "CLICOLOR_FORCE")) {
+ t.environ.exist.CLICOLOR_FORCE = true;
+ }
+ comptime assert(@sizeOf(Environ.String) == 0);
+ }
+ } else if (builtin.link_libc) {
+ var ptr = std.c.environ;
+ while (ptr[0]) |line| : (ptr += 1) {
+ var line_i: usize = 0;
+ while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {}
+ const key = line[0..line_i];
+
+ var end_i: usize = line_i;
+ while (line[end_i] != 0) : (end_i += 1) {}
+ const value = line[line_i + 1 .. end_i];
+
+ if (std.mem.eql(u8, key, "NO_COLOR")) {
+ t.environ.exist.NO_COLOR = true;
+ } else if (std.mem.eql(u8, key, "CLICOLOR_FORCE")) {
+ t.environ.exist.CLICOLOR_FORCE = true;
+ } else if (@hasField(Environ.String, "PATH") and std.mem.eql(u8, key, "PATH")) {
+ t.environ.string.PATH = value;
+ }
+ }
+ } else {
+ for (t.environ.block) |line| {
+ var line_i: usize = 0;
+ while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {}
+ const key = line[0..line_i];
+
+ var end_i: usize = line_i;
+ while (line[end_i] != 0) : (end_i += 1) {}
+ const value = line[line_i + 1 .. end_i];
+
+ if (std.mem.eql(u8, key, "NO_COLOR")) {
+ t.environ.exist.NO_COLOR = true;
+ } else if (std.mem.eql(u8, key, "CLICOLOR_FORCE")) {
+ t.environ.exist.CLICOLOR_FORCE = true;
+ } else if (@hasField(Environ.String, "PATH") and std.mem.eql(u8, key, "PATH")) {
+ t.environ.string.PATH = value;
+ }
+ }
+ }
+}
+
test {
_ = @import("Threaded/test.zig");
}
diff --git a/lib/std/Io/Threaded/test.zig b/lib/std/Io/Threaded/test.zig
index 9c54c3af1f..8169f6bb37 100644
--- a/lib/std/Io/Threaded/test.zig
+++ b/lib/std/Io/Threaded/test.zig
@@ -1,3 +1,5 @@
+//! Tests belong here if they access internal state of std.Io.Threaded or
+//! otherwise assume details of that particular implementation.
const builtin = @import("builtin");
const std = @import("std");
@@ -11,7 +13,7 @@ test "concurrent vs main prevents deadlock via oversubscription" {
return error.SkipZigTest;
}
- var threaded: Io.Threaded = .init(std.testing.allocator);
+ var threaded: Io.Threaded = .init(std.testing.allocator, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -44,7 +46,7 @@ test "concurrent vs concurrent prevents deadlock via oversubscription" {
return error.SkipZigTest;
}
- var threaded: Io.Threaded = .init(std.testing.allocator);
+ var threaded: Io.Threaded = .init(std.testing.allocator, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -78,7 +80,7 @@ test "async/concurrent context and result alignment" {
var buffer: [2048]u8 align(@alignOf(ByteArray512)) = undefined;
var fba: std.heap.FixedBufferAllocator = .init(&buffer);
- var threaded: std.Io.Threaded = .init(fba.allocator());
+ var threaded: std.Io.Threaded = .init(fba.allocator(), .{});
defer threaded.deinit();
const io = threaded.io();
@@ -111,7 +113,7 @@ test "Group.async context alignment" {
var buffer: [2048]u8 align(@alignOf(ByteArray512)) = undefined;
var fba: std.heap.FixedBufferAllocator = .init(&buffer);
- var threaded: std.Io.Threaded = .init(fba.allocator());
+ var threaded: std.Io.Threaded = .init(fba.allocator(), .{});
defer threaded.deinit();
const io = threaded.io();
@@ -131,7 +133,7 @@ fn returnArray() [32]u8 {
}
test "async with array return type" {
- var threaded: std.Io.Threaded = .init(std.testing.allocator);
+ var threaded: std.Io.Threaded = .init(std.testing.allocator, .{});
defer threaded.deinit();
const io = threaded.io();
diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig
index f49ef8eb67..49ca6056d4 100644
--- a/lib/std/Io/Writer.zig
+++ b/lib/std/Io/Writer.zig
@@ -1,7 +1,8 @@
+const Writer = @This();
+
const builtin = @import("builtin");
const native_endian = builtin.target.cpu.arch.endian();
-const Writer = @This();
const std = @import("../std.zig");
const assert = std.debug.assert;
const Limit = std.Io.Limit;
@@ -960,7 +961,7 @@ pub fn sendFileAll(w: *Writer, file_reader: *File.Reader, limit: Limit) FileAllE
const n = sendFile(w, file_reader, .limited(remaining)) catch |err| switch (err) {
error.EndOfStream => break,
error.Unimplemented => {
- file_reader.mode = file_reader.mode.toReading();
+ file_reader.mode = file_reader.mode.toSimple();
remaining -= try w.sendFileReadingAll(file_reader, .limited(remaining));
break;
},
@@ -2834,14 +2835,14 @@ test "discarding sendFile" {
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
- const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true });
+ defer file.close(io);
var r_buffer: [256]u8 = undefined;
- var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
+ var file_writer: File.Writer = .init(file, io, &r_buffer);
try file_writer.interface.writeByte('h');
try file_writer.interface.flush();
- var file_reader = file_writer.moveToReader(io);
+ var file_reader = file_writer.moveToReader();
try file_reader.seekTo(0);
var w_buffer: [256]u8 = undefined;
@@ -2856,14 +2857,14 @@ test "allocating sendFile" {
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
- const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true });
+ defer file.close(io);
var r_buffer: [2]u8 = undefined;
- var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
+ var file_writer: File.Writer = .init(file, io, &r_buffer);
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();
- var file_reader = file_writer.moveToReader(io);
+ var file_reader = file_writer.moveToReader();
try file_reader.seekTo(0);
try file_reader.interface.fill(2);
@@ -2880,14 +2881,14 @@ test sendFileReading {
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
- const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true });
+ defer file.close(io);
var r_buffer: [2]u8 = undefined;
- var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
+ var file_writer: File.Writer = .init(file, io, &r_buffer);
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();
- var file_reader = file_writer.moveToReader(io);
+ var file_reader = file_writer.moveToReader();
try file_reader.seekTo(0);
try file_reader.interface.fill(2);
diff --git a/lib/std/Io/net.zig b/lib/std/Io/net.zig
index 65d2dfd5e4..8b1523fbd3 100644
--- a/lib/std/Io/net.zig
+++ b/lib/std/Io/net.zig
@@ -1043,7 +1043,11 @@ pub const Socket = struct {
/// Leaves `address` in a valid state.
pub fn close(s: *const Socket, io: Io) void {
- io.vtable.netClose(io.userdata, s.handle);
+ io.vtable.netClose(io.userdata, (&s.handle)[0..1]);
+ }
+
+ pub fn closeMany(io: Io, sockets: []const Socket) void {
+ io.vtable.netClose(io.userdata, sockets);
}
pub const SendError = error{
@@ -1184,7 +1188,7 @@ pub const Stream = struct {
const max_iovecs_len = 8;
pub fn close(s: *const Stream, io: Io) void {
- io.vtable.netClose(io.userdata, s.socket.handle);
+ io.vtable.netClose(io.userdata, (&s.socket.handle)[0..1]);
}
pub const Reader = struct {
@@ -1256,6 +1260,7 @@ pub const Stream = struct {
interface: Io.Writer,
stream: Stream,
err: ?Error = null,
+ write_file_err: ?WriteFileError = null,
pub const Error = error{
/// Another TCP Fast Open is already in progress.
@@ -1285,12 +1290,19 @@ pub const Stream = struct {
SocketNotBound,
} || Io.UnexpectedError || Io.Cancelable;
+ pub const WriteFileError = error{
+ NetworkDown,
+ } || Io.Cancelable || Io.UnexpectedError;
+
pub fn init(stream: Stream, io: Io, buffer: []u8) Writer {
return .{
.io = io,
.stream = stream,
.interface = .{
- .vtable = &.{ .drain = drain },
+ .vtable = &.{
+ .drain = drain,
+ .sendFile = sendFile,
+ },
.buffer = buffer,
},
};
@@ -1307,6 +1319,13 @@ pub const Stream = struct {
};
return io_w.consume(n);
}
+
+ fn sendFile(io_w: *Io.Writer, file_reader: *Io.File.Reader, limit: Io.Limit) Io.Writer.FileError!usize {
+ _ = io_w;
+ _ = file_reader;
+ _ = limit;
+ return error.Unimplemented; // TODO
+ }
};
pub fn reader(stream: Stream, io: Io, buffer: []u8) Reader {
diff --git a/lib/std/Io/net/HostName.zig b/lib/std/Io/net/HostName.zig
index 628a97d1f8..84484b9dc1 100644
--- a/lib/std/Io/net/HostName.zig
+++ b/lib/std/Io/net/HostName.zig
@@ -343,7 +343,7 @@ pub const ResolvConf = struct {
.attempts = 2,
};
- const file = Io.File.openAbsolute(io, "/etc/resolv.conf", .{}) catch |err| switch (err) {
+ const file = Io.Dir.openFileAbsolute(io, "/etc/resolv.conf", .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.AccessDenied,
diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig
index e234a9edde..6ef8c15f4f 100644
--- a/lib/std/Io/net/test.zig
+++ b/lib/std/Io/net/test.zig
@@ -232,8 +232,10 @@ test "listen on an in use port" {
fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
const connection = try net.tcpConnectToHost(allocator, name, port);
- defer connection.close();
+ defer connection.close(io);
var buf: [100]u8 = undefined;
const len = try connection.read(&buf);
@@ -244,8 +246,10 @@ fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyer
fn testClient(addr: net.IpAddress) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
const socket_file = try net.tcpConnectToAddress(addr);
- defer socket_file.close();
+ defer socket_file.close(io);
var buf: [100]u8 = undefined;
const len = try socket_file.read(&buf);
@@ -267,6 +271,7 @@ test "listen on a unix socket, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (!net.has_unix_sockets) return error.SkipZigTest;
if (builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/25983
+ if (builtin.cpu.arch == .mipsel) return error.SkipZigTest; // TODO
const io = testing.io;
@@ -274,7 +279,7 @@ test "listen on a unix socket, send bytes, receive bytes" {
defer testing.allocator.free(socket_path);
const socket_addr = try net.UnixAddress.init(socket_path);
- defer std.fs.cwd().deleteFile(socket_path) catch {};
+ defer Io.Dir.cwd().deleteFile(io, socket_path) catch {};
var server = try socket_addr.listen(io, .{});
defer server.socket.close(io);
@@ -330,7 +335,7 @@ test "non-blocking tcp server" {
try testing.expectError(error.WouldBlock, accept_err);
const socket_file = try net.tcpConnectToAddress(server.socket.address);
- defer socket_file.close();
+ defer socket_file.close(io);
var stream = try server.accept(io);
defer stream.close(io);
diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig
index f7965ed14e..796800f000 100644
--- a/lib/std/Io/test.zig
+++ b/lib/std/Io/test.zig
@@ -3,16 +3,17 @@ const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const Io = std.Io;
-const testing = std.testing;
-const expect = std.testing.expect;
-const expectEqual = std.testing.expectEqual;
-const expectError = std.testing.expectError;
const DefaultPrng = std.Random.DefaultPrng;
const mem = std.mem;
const fs = std.fs;
-const File = std.fs.File;
+const File = std.Io.File;
const assert = std.debug.assert;
+const testing = std.testing;
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+const expectError = std.testing.expectError;
+const expectEqualStrings = std.testing.expectEqualStrings;
const tmpDir = std.testing.tmpDir;
test "write a file, read it, then delete it" {
@@ -27,10 +28,10 @@ test "write a file, read it, then delete it" {
random.bytes(data[0..]);
const tmp_file_name = "temp_test_file.txt";
{
- var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ var file = try tmp.dir.createFile(io, tmp_file_name, .{});
+ defer file.close(io);
- var file_writer = file.writer(&.{});
+ var file_writer = file.writer(io, &.{});
const st = &file_writer.interface;
try st.print("begin", .{});
try st.writeAll(&data);
@@ -40,14 +41,14 @@ test "write a file, read it, then delete it" {
{
// Make sure the exclusive flag is honored.
- try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(tmp_file_name, .{ .exclusive = true }));
+ try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(io, tmp_file_name, .{ .exclusive = true }));
}
{
- var file = try tmp.dir.openFile(tmp_file_name, .{});
- defer file.close();
+ var file = try tmp.dir.openFile(io, tmp_file_name, .{});
+ defer file.close(io);
- const file_size = try file.getEndPos();
+ const file_size = try file.length(io);
const expected_file_size: u64 = "begin".len + data.len + "end".len;
try expectEqual(expected_file_size, file_size);
@@ -60,71 +61,126 @@ test "write a file, read it, then delete it" {
try expect(mem.eql(u8, contents["begin".len .. contents.len - "end".len], &data));
try expect(mem.eql(u8, contents[contents.len - "end".len ..], "end"));
}
- try tmp.dir.deleteFile(tmp_file_name);
+ try tmp.dir.deleteFile(io, tmp_file_name);
}
-test "File seek ops" {
+test "File.Writer.seekTo" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
+ const io = testing.io;
+
+ var data: [8192]u8 = undefined;
+ @memset(&data, 0x55);
+
const tmp_file_name = "temp_test_file.txt";
- var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
-
- try file.writeAll(&([_]u8{0x55} ** 8192));
-
- // Seek to the end
- try file.seekFromEnd(0);
- try expect((try file.getPos()) == try file.getEndPos());
- // Negative delta
- try file.seekBy(-4096);
- try expect((try file.getPos()) == 4096);
- // Positive delta
- try file.seekBy(10);
- try expect((try file.getPos()) == 4106);
- // Absolute position
- try file.seekTo(1234);
- try expect((try file.getPos()) == 1234);
+ var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true });
+ defer file.close(io);
+
+ var fw = file.writerStreaming(io, &.{});
+
+ try fw.interface.writeAll(&data);
+ try expect(fw.logicalPos() == try file.length(io));
+ try fw.seekTo(1234);
+ try expect(fw.logicalPos() == 1234);
}
-test "setEndPos" {
+test "File.setLength" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "temp_test_file.txt";
- var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true });
+ defer file.close(io);
+
+ var fw = file.writerStreaming(io, &.{});
// Verify that the file size changes and the file offset is not moved
- try expect((try file.getEndPos()) == 0);
- try expect((try file.getPos()) == 0);
- try file.setEndPos(8192);
- try expect((try file.getEndPos()) == 8192);
- try expect((try file.getPos()) == 0);
- try file.seekTo(100);
- try file.setEndPos(4096);
- try expect((try file.getEndPos()) == 4096);
- try expect((try file.getPos()) == 100);
- try file.setEndPos(0);
- try expect((try file.getEndPos()) == 0);
- try expect((try file.getPos()) == 100);
+ try expect((try file.length(io)) == 0);
+ try expect(fw.logicalPos() == 0);
+ try file.setLength(io, 8192);
+ try expect((try file.length(io)) == 8192);
+ try expect(fw.logicalPos() == 0);
+ try fw.seekTo(100);
+ try file.setLength(io, 4096);
+ try expect((try file.length(io)) == 4096);
+ try expect(fw.logicalPos() == 100);
+ try file.setLength(io, 0);
+ try expect((try file.length(io)) == 0);
+ try expect(fw.logicalPos() == 100);
}
-test "updateTimes" {
+test "legacy setLength" {
+ // https://github.com/ziglang/zig/issues/20747 (open fd does not have write permission)
+ if (builtin.os.tag == .wasi and builtin.link_libc) return error.SkipZigTest;
+ if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23806
+
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const file_name = "afile.txt";
+ try tmp.dir.writeFile(io, .{ .sub_path = file_name, .data = "ninebytes" });
+ const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write });
+ defer f.close(io);
+
+ const initial_size = try f.length(io);
+ var buffer: [32]u8 = undefined;
+ var reader = f.reader(io, &.{});
+
+ {
+ try f.setLength(io, initial_size);
+ try expectEqual(initial_size, try f.length(io));
+ try reader.seekTo(0);
+ try expectEqual(initial_size, try reader.interface.readSliceShort(&buffer));
+ try expectEqualStrings("ninebytes", buffer[0..@intCast(initial_size)]);
+ }
+
+ {
+ const larger = initial_size + 4;
+ try f.setLength(io, larger);
+ try expectEqual(larger, try f.length(io));
+ try reader.seekTo(0);
+ try expectEqual(larger, try reader.interface.readSliceShort(&buffer));
+ try expectEqualStrings("ninebytes\x00\x00\x00\x00", buffer[0..@intCast(larger)]);
+ }
+
+ {
+ const smaller = initial_size - 5;
+ try f.setLength(io, smaller);
+ try expectEqual(smaller, try f.length(io));
+ try reader.seekTo(0);
+ try expectEqual(smaller, try reader.interface.readSliceShort(&buffer));
+ try expectEqualStrings("nine", buffer[0..@intCast(smaller)]);
+ }
+
+ try f.setLength(io, 0);
+ try expectEqual(0, try f.length(io));
+ try reader.seekTo(0);
+ try expectEqual(0, try reader.interface.readSliceShort(&buffer));
+}
+
+test "setTimestamps" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "just_a_temporary_file.txt";
- var file = try tmp.dir.createFile(tmp_file_name, .{ .read = true });
- defer file.close();
+ var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true });
+ defer file.close(io);
- const stat_old = try file.stat();
+ const stat_old = try file.stat(io);
// Set atime and mtime to 5s before
- try file.updateTimes(
+ try file.setTimestamps(
+ io,
stat_old.atime.subDuration(.fromSeconds(5)),
stat_old.mtime.subDuration(.fromSeconds(5)),
);
- const stat_new = try file.stat();
+ const stat_new = try file.stat(io);
try expect(stat_new.atime.nanoseconds < stat_old.atime.nanoseconds);
try expect(stat_new.mtime.nanoseconds < stat_old.mtime.nanoseconds);
}
diff --git a/lib/std/Io/tty.zig b/lib/std/Io/tty.zig
deleted file mode 100644
index 3e2bb0969d..0000000000
--- a/lib/std/Io/tty.zig
+++ /dev/null
@@ -1,131 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const File = std.fs.File;
-const process = std.process;
-const windows = std.os.windows;
-const native_os = builtin.os.tag;
-
-pub const Color = enum {
- black,
- red,
- green,
- yellow,
- blue,
- magenta,
- cyan,
- white,
- bright_black,
- bright_red,
- bright_green,
- bright_yellow,
- bright_blue,
- bright_magenta,
- bright_cyan,
- bright_white,
- dim,
- bold,
- reset,
-};
-
-/// Provides simple functionality for manipulating the terminal in some way,
-/// such as coloring text, etc.
-pub const Config = union(enum) {
- no_color,
- escape_codes,
- windows_api: if (native_os == .windows) WindowsContext else noreturn,
-
- /// Detect suitable TTY configuration options for the given file (commonly stdout/stderr).
- /// This includes feature checks for ANSI escape codes and the Windows console API, as well as
- /// respecting the `NO_COLOR` and `CLICOLOR_FORCE` environment variables to override the default.
- /// Will attempt to enable ANSI escape code support if necessary/possible.
- pub fn detect(file: File) Config {
- const force_color: ?bool = if (builtin.os.tag == .wasi)
- null // wasi does not support environment variables
- else if (process.hasNonEmptyEnvVarConstant("NO_COLOR"))
- false
- else if (process.hasNonEmptyEnvVarConstant("CLICOLOR_FORCE"))
- true
- else
- null;
-
- if (force_color == false) return .no_color;
-
- if (file.getOrEnableAnsiEscapeSupport()) return .escape_codes;
-
- if (native_os == .windows and file.isTty()) {
- var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
- if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) == windows.FALSE) {
- return if (force_color == true) .escape_codes else .no_color;
- }
- return .{ .windows_api = .{
- .handle = file.handle,
- .reset_attributes = info.wAttributes,
- } };
- }
-
- return if (force_color == true) .escape_codes else .no_color;
- }
-
- pub const WindowsContext = struct {
- handle: File.Handle,
- reset_attributes: u16,
- };
-
- pub const SetColorError = std.os.windows.SetConsoleTextAttributeError || std.Io.Writer.Error;
-
- pub fn setColor(conf: Config, w: *std.Io.Writer, color: Color) SetColorError!void {
- nosuspend switch (conf) {
- .no_color => return,
- .escape_codes => {
- const color_string = switch (color) {
- .black => "\x1b[30m",
- .red => "\x1b[31m",
- .green => "\x1b[32m",
- .yellow => "\x1b[33m",
- .blue => "\x1b[34m",
- .magenta => "\x1b[35m",
- .cyan => "\x1b[36m",
- .white => "\x1b[37m",
- .bright_black => "\x1b[90m",
- .bright_red => "\x1b[91m",
- .bright_green => "\x1b[92m",
- .bright_yellow => "\x1b[93m",
- .bright_blue => "\x1b[94m",
- .bright_magenta => "\x1b[95m",
- .bright_cyan => "\x1b[96m",
- .bright_white => "\x1b[97m",
- .bold => "\x1b[1m",
- .dim => "\x1b[2m",
- .reset => "\x1b[0m",
- };
- try w.writeAll(color_string);
- },
- .windows_api => |ctx| {
- const attributes = switch (color) {
- .black => 0,
- .red => windows.FOREGROUND_RED,
- .green => windows.FOREGROUND_GREEN,
- .yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN,
- .blue => windows.FOREGROUND_BLUE,
- .magenta => windows.FOREGROUND_RED | windows.FOREGROUND_BLUE,
- .cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE,
- .white => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE,
- .bright_black => windows.FOREGROUND_INTENSITY,
- .bright_red => windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY,
- .bright_green => windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
- .bright_yellow => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY,
- .bright_blue => windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
- .bright_magenta => windows.FOREGROUND_RED | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
- .bright_cyan => windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
- .bright_white, .bold => windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY,
- // "dim" is not supported using basic character attributes, but let's still make it do *something*.
- // This matches the old behavior of TTY.Color before the bright variants were added.
- .dim => windows.FOREGROUND_INTENSITY,
- .reset => ctx.reset_attributes,
- };
- try w.flush();
- try windows.SetConsoleTextAttribute(ctx.handle, attributes);
- },
- };
- }
-};
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index d1ab503661..6baa24d246 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -1,26 +1,30 @@
//! This API is non-allocating, non-fallible, thread-safe, and lock-free.
+const Progress = @This();
-const std = @import("std");
const builtin = @import("builtin");
+const is_big_endian = builtin.cpu.arch.endian() == .big;
+const is_windows = builtin.os.tag == .windows;
+
+const std = @import("std");
+const Io = std.Io;
const windows = std.os.windows;
const testing = std.testing;
const assert = std.debug.assert;
-const Progress = @This();
const posix = std.posix;
-const is_big_endian = builtin.cpu.arch.endian() == .big;
-const is_windows = builtin.os.tag == .windows;
const Writer = std.Io.Writer;
-/// `null` if the current node (and its children) should
-/// not print on update()
-terminal: std.fs.File,
+/// Currently this API only supports this value being set to stderr, which
+/// happens automatically inside `start`.
+terminal: Io.File,
+
+io: Io,
terminal_mode: TerminalMode,
-update_thread: ?std.Thread,
+update_worker: ?Io.Future(void),
/// Atomically set by SIGWINCH as well as the root done() function.
-redraw_event: std.Thread.ResetEvent,
+redraw_event: Io.Event,
/// Indicates a request to shut down and reset global state.
/// Accessed atomically.
done: bool,
@@ -48,6 +52,8 @@ node_freelist: Freelist,
/// value may at times temporarily exceed the node count.
node_end_index: u32,
+start_failure: StartFailure,
+
pub const Status = enum {
/// Indicates the application is progressing towards completion of a task.
/// Unless the application is interactive, this is the only status the
@@ -93,9 +99,9 @@ pub const Options = struct {
/// Must be at least 200 bytes.
draw_buffer: []u8 = &default_draw_buffer,
/// How many nanoseconds between writing updates to the terminal.
- refresh_rate_ns: u64 = 80 * std.time.ns_per_ms,
+ refresh_rate_ns: Io.Duration = .fromMilliseconds(80),
/// How many nanoseconds to keep the output hidden
- initial_delay_ns: u64 = 200 * std.time.ns_per_ms,
+ initial_delay_ns: Io.Duration = .fromMilliseconds(200),
/// If provided, causes the progress item to have a denominator.
/// 0 means unknown.
estimated_total_items: usize = 0,
@@ -121,20 +127,20 @@ pub const Node = struct {
name: [max_name_len]u8 align(@alignOf(usize)),
/// Not thread-safe.
- fn getIpcFd(s: Storage) ?posix.fd_t {
- return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(posix.fd_t)) {
+ fn getIpcFd(s: Storage) ?Io.File.Handle {
+ return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(Io.File.Handle)) {
.int => @bitCast(s.completed_count),
.pointer => @ptrFromInt(s.completed_count),
- else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
+ else => @compileError("unsupported fd_t of " ++ @typeName(Io.File.Handle)),
} else null;
}
/// Thread-safe.
- fn setIpcFd(s: *Storage, fd: posix.fd_t) void {
- const integer: u32 = switch (@typeInfo(posix.fd_t)) {
+ fn setIpcFd(s: *Storage, fd: Io.File.Handle) void {
+ const integer: u32 = switch (@typeInfo(Io.File.Handle)) {
.int => @bitCast(fd),
.pointer => @intFromPtr(fd),
- else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
+ else => @compileError("unsupported fd_t of " ++ @typeName(Io.File.Handle)),
};
// `estimated_total_count` max int indicates the special state that
// causes `completed_count` to be treated as a file descriptor, so
@@ -327,13 +333,14 @@ pub const Node = struct {
}
} else {
@atomicStore(bool, &global_progress.done, true, .monotonic);
- global_progress.redraw_event.set();
- if (global_progress.update_thread) |thread| thread.join();
+ const io = global_progress.io;
+ global_progress.redraw_event.set(io);
+ if (global_progress.update_worker) |*worker| worker.await(io);
}
}
/// Posix-only. Used by `std.process.Child`. Thread-safe.
- pub fn setIpcFd(node: Node, fd: posix.fd_t) void {
+ pub fn setIpcFd(node: Node, fd: Io.File.Handle) void {
const index = node.index.unwrap() orelse return;
assert(fd >= 0);
assert(fd != posix.STDOUT_FILENO);
@@ -344,14 +351,14 @@ pub const Node = struct {
/// Posix-only. Thread-safe. Assumes the node is storing an IPC file
/// descriptor.
- pub fn getIpcFd(node: Node) ?posix.fd_t {
+ pub fn getIpcFd(node: Node) ?Io.File.Handle {
const index = node.index.unwrap() orelse return null;
const storage = storageByIndex(index);
const int = @atomicLoad(u32, &storage.completed_count, .monotonic);
- return switch (@typeInfo(posix.fd_t)) {
+ return switch (@typeInfo(Io.File.Handle)) {
.int => @bitCast(int),
.pointer => @ptrFromInt(int),
- else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
+ else => @compileError("unsupported fd_t of " ++ @typeName(Io.File.Handle)),
};
}
@@ -389,9 +396,10 @@ pub const Node = struct {
};
var global_progress: Progress = .{
+ .io = undefined,
.terminal = undefined,
.terminal_mode = .off,
- .update_thread = null,
+ .update_worker = null,
.redraw_event = .unset,
.refresh_rate_ns = undefined,
.initial_delay_ns = undefined,
@@ -401,6 +409,7 @@ var global_progress: Progress = .{
.done = false,
.need_clear = false,
.status = .working,
+ .start_failure = .unstarted,
.node_parents = &node_parents_buffer,
.node_storage = &node_storage_buffer,
@@ -409,6 +418,13 @@ var global_progress: Progress = .{
.node_end_index = 0,
};
+pub const StartFailure = union(enum) {
+ unstarted,
+ spawn_ipc_worker: error{ConcurrencyUnavailable},
+ spawn_update_worker: error{ConcurrencyUnavailable},
+ parse_env_var: error{ InvalidCharacter, Overflow },
+};
+
const node_storage_buffer_len = 83;
var node_parents_buffer: [node_storage_buffer_len]Node.Parent = undefined;
var node_storage_buffer: [node_storage_buffer_len]Node.Storage = undefined;
@@ -435,7 +451,9 @@ const noop_impl = builtin.single_threaded or switch (builtin.os.tag) {
/// Asserts there is only one global Progress instance.
///
/// Call `Node.end` when done.
-pub fn start(options: Options) Node {
+///
+/// If an error occurs, `start_failure` will be populated.
+pub fn start(io: Io, options: Options) Node {
// Ensure there is only 1 global Progress object.
if (global_progress.node_end_index != 0) {
debug_start_trace.dump();
@@ -450,21 +468,24 @@ pub fn start(options: Options) Node {
assert(options.draw_buffer.len >= 200);
global_progress.draw_buffer = options.draw_buffer;
- global_progress.refresh_rate_ns = options.refresh_rate_ns;
- global_progress.initial_delay_ns = options.initial_delay_ns;
+ global_progress.refresh_rate_ns = @intCast(options.refresh_rate_ns.toNanoseconds());
+ global_progress.initial_delay_ns = @intCast(options.initial_delay_ns.toNanoseconds());
if (noop_impl)
return Node.none;
+ global_progress.io = io;
+
if (std.process.parseEnvVarInt("ZIG_PROGRESS", u31, 10)) |ipc_fd| {
- global_progress.update_thread = std.Thread.spawn(.{}, ipcThreadRun, .{
- @as(posix.fd_t, switch (@typeInfo(posix.fd_t)) {
+ global_progress.update_worker = io.concurrent(ipcThreadRun, .{
+ io,
+ @as(Io.File, .{ .handle = switch (@typeInfo(Io.File.Handle)) {
.int => ipc_fd,
.pointer => @ptrFromInt(ipc_fd),
- else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
- }),
+ else => @compileError("unsupported fd_t of " ++ @typeName(Io.File.Handle)),
+ } }),
}) catch |err| {
- std.log.warn("failed to spawn IPC thread for communicating progress to parent: {s}", .{@errorName(err)});
+ global_progress.start_failure = .{ .spawn_ipc_worker = err };
return Node.none;
};
} else |env_err| switch (env_err) {
@@ -472,14 +493,21 @@ pub fn start(options: Options) Node {
if (options.disable_printing) {
return Node.none;
}
- const stderr: std.fs.File = .stderr();
+ const stderr: Io.File = .stderr();
global_progress.terminal = stderr;
- if (stderr.getOrEnableAnsiEscapeSupport()) {
+ if (stderr.enableAnsiEscapeCodes(io)) |_| {
global_progress.terminal_mode = .ansi_escape_codes;
- } else if (is_windows and stderr.isTty()) {
- global_progress.terminal_mode = TerminalMode{ .windows_api = .{
- .code_page = windows.kernel32.GetConsoleOutputCP(),
- } };
+ } else |_| if (is_windows) {
+ if (stderr.isTty(io)) |is_tty| {
+ if (is_tty) global_progress.terminal_mode = TerminalMode{ .windows_api = .{
+ .code_page = windows.kernel32.GetConsoleOutputCP(),
+ } };
+ } else |err| switch (err) {
+ error.Canceled => {
+ io.recancel();
+ return Node.none;
+ },
+ }
}
if (global_progress.terminal_mode == .off) {
@@ -497,17 +525,17 @@ pub fn start(options: Options) Node {
if (switch (global_progress.terminal_mode) {
.off => unreachable, // handled a few lines above
- .ansi_escape_codes => std.Thread.spawn(.{}, updateThreadRun, .{}),
- .windows_api => if (is_windows) std.Thread.spawn(.{}, windowsApiUpdateThreadRun, .{}) else unreachable,
- }) |thread| {
- global_progress.update_thread = thread;
+ .ansi_escape_codes => io.concurrent(updateThreadRun, .{io}),
+ .windows_api => if (is_windows) io.concurrent(windowsApiUpdateThreadRun, .{io}) else unreachable,
+ }) |future| {
+ global_progress.update_worker = future;
} else |err| {
- std.log.warn("unable to spawn thread for printing progress to terminal: {s}", .{@errorName(err)});
+ global_progress.start_failure = .{ .spawn_update_worker = err };
return Node.none;
}
},
else => |e| {
- std.log.warn("invalid ZIG_PROGRESS file descriptor integer: {s}", .{@errorName(e)});
+ global_progress.start_failure = .{ .parse_env_var = e };
return Node.none;
},
}
@@ -521,48 +549,52 @@ pub fn setStatus(new_status: Status) void {
}
/// Returns whether a resize is needed to learn the terminal size.
-fn wait(timeout_ns: u64) bool {
- const resize_flag = if (global_progress.redraw_event.timedWait(timeout_ns)) |_| true else |err| switch (err) {
- error.Timeout => false,
+fn wait(io: Io, timeout_ns: u64) bool {
+ const timeout: Io.Timeout = .{ .duration = .{
+ .clock = .awake,
+ .raw = .fromNanoseconds(timeout_ns),
+ } };
+ const resize_flag = if (global_progress.redraw_event.waitTimeout(io, timeout)) |_| true else |err| switch (err) {
+ error.Timeout, error.Canceled => false,
};
global_progress.redraw_event.reset();
return resize_flag or (global_progress.cols == 0);
}
-fn updateThreadRun() void {
+fn updateThreadRun(io: Io) void {
// Store this data in the thread so that it does not need to be part of the
// linker data of the main executable.
var serialized_buffer: Serialized.Buffer = undefined;
{
- const resize_flag = wait(global_progress.initial_delay_ns);
+ const resize_flag = wait(io, global_progress.initial_delay_ns);
if (@atomicLoad(bool, &global_progress.done, .monotonic)) return;
maybeUpdateSize(resize_flag);
const buffer, _ = computeRedraw(&serialized_buffer);
- if (stderr_mutex.tryLock()) {
- defer stderr_mutex.unlock();
- write(buffer) catch return;
+ if (io.tryLockStderr(&.{}, null) catch return) |locked_stderr| {
+ defer io.unlockStderr();
global_progress.need_clear = true;
+ locked_stderr.file_writer.interface.writeAll(buffer) catch return;
}
}
while (true) {
- const resize_flag = wait(global_progress.refresh_rate_ns);
+ const resize_flag = wait(io, global_progress.refresh_rate_ns);
if (@atomicLoad(bool, &global_progress.done, .monotonic)) {
- stderr_mutex.lock();
- defer stderr_mutex.unlock();
- return clearWrittenWithEscapeCodes() catch {};
+ const stderr = io.lockStderr(&.{}, null) catch return;
+ defer io.unlockStderr();
+ return clearWrittenWithEscapeCodes(stderr.file_writer) catch {};
}
maybeUpdateSize(resize_flag);
const buffer, _ = computeRedraw(&serialized_buffer);
- if (stderr_mutex.tryLock()) {
- defer stderr_mutex.unlock();
- write(buffer) catch return;
+ if (io.tryLockStderr(&.{}, null) catch return) |locked_stderr| {
+ defer io.unlockStderr();
global_progress.need_clear = true;
+ locked_stderr.file_writer.interface.writeAll(buffer) catch return;
}
}
}
@@ -575,117 +607,72 @@ fn windowsApiWriteMarker() void {
_ = windows.kernel32.WriteConsoleW(handle, &[_]u16{windows_api_start_marker}, 1, &num_chars_written, null);
}
-fn windowsApiUpdateThreadRun() void {
+fn windowsApiUpdateThreadRun(io: Io) void {
var serialized_buffer: Serialized.Buffer = undefined;
{
- const resize_flag = wait(global_progress.initial_delay_ns);
+ const resize_flag = wait(io, global_progress.initial_delay_ns);
if (@atomicLoad(bool, &global_progress.done, .monotonic)) return;
maybeUpdateSize(resize_flag);
const buffer, const nl_n = computeRedraw(&serialized_buffer);
- if (stderr_mutex.tryLock()) {
- defer stderr_mutex.unlock();
+ if (io.tryLockStderr(&.{}, null) catch return) |locked_stderr| {
+ defer io.unlockStderr();
windowsApiWriteMarker();
- write(buffer) catch return;
global_progress.need_clear = true;
+ locked_stderr.file_writer.interface.writeAll(buffer) catch return;
windowsApiMoveToMarker(nl_n) catch return;
}
}
while (true) {
- const resize_flag = wait(global_progress.refresh_rate_ns);
+ const resize_flag = wait(io, global_progress.refresh_rate_ns);
if (@atomicLoad(bool, &global_progress.done, .monotonic)) {
- stderr_mutex.lock();
- defer stderr_mutex.unlock();
+ _ = io.lockStderr(&.{}, null) catch return;
+ defer io.unlockStderr();
return clearWrittenWindowsApi() catch {};
}
maybeUpdateSize(resize_flag);
const buffer, const nl_n = computeRedraw(&serialized_buffer);
- if (stderr_mutex.tryLock()) {
- defer stderr_mutex.unlock();
+ if (io.tryLockStderr(&.{}, null) catch return) |locked_stderr| {
+ defer io.unlockStderr();
clearWrittenWindowsApi() catch return;
windowsApiWriteMarker();
- write(buffer) catch return;
global_progress.need_clear = true;
+ locked_stderr.file_writer.interface.writeAll(buffer) catch return;
windowsApiMoveToMarker(nl_n) catch return;
}
}
}
-/// Allows the caller to freely write to stderr until `unlockStdErr` is called.
-///
-/// During the lock, any `std.Progress` information is cleared from the terminal.
-///
-/// The lock is recursive; the same thread may hold the lock multiple times.
-pub fn lockStdErr() void {
- stderr_mutex.lock();
- clearWrittenWithEscapeCodes() catch {};
-}
-
-pub fn unlockStdErr() void {
- stderr_mutex.unlock();
-}
-
-/// Protected by `stderr_mutex`.
-const stderr_writer: *Writer = &stderr_file_writer.interface;
-/// Protected by `stderr_mutex`.
-var stderr_file_writer: std.fs.File.Writer = .{
- .interface = std.fs.File.Writer.initInterface(&.{}),
- .file = if (is_windows) undefined else .stderr(),
- .mode = .streaming,
-};
-
-/// Allows the caller to freely write to the returned `Writer`,
-/// initialized with `buffer`, until `unlockStderrWriter` is called.
-///
-/// During the lock, any `std.Progress` information is cleared from the terminal.
-///
-/// The lock is recursive; the same thread may hold the lock multiple times.
-pub fn lockStderrWriter(buffer: []u8) *Writer {
- stderr_mutex.lock();
- clearWrittenWithEscapeCodes() catch {};
- if (is_windows) stderr_file_writer.file = .stderr();
- stderr_writer.flush() catch {};
- stderr_writer.buffer = buffer;
- return stderr_writer;
-}
-
-pub fn unlockStderrWriter() void {
- stderr_writer.flush() catch {};
- stderr_writer.end = 0;
- stderr_writer.buffer = &.{};
- stderr_mutex.unlock();
-}
-
-fn ipcThreadRun(fd: posix.fd_t) anyerror!void {
+fn ipcThreadRun(io: Io, file: Io.File) void {
// Store this data in the thread so that it does not need to be part of the
// linker data of the main executable.
var serialized_buffer: Serialized.Buffer = undefined;
{
- _ = wait(global_progress.initial_delay_ns);
+ _ = wait(io, global_progress.initial_delay_ns);
if (@atomicLoad(bool, &global_progress.done, .monotonic))
return;
const serialized = serialize(&serialized_buffer);
- writeIpc(fd, serialized) catch |err| switch (err) {
+ writeIpc(io, file, serialized) catch |err| switch (err) {
error.BrokenPipe => return,
};
}
while (true) {
- _ = wait(global_progress.refresh_rate_ns);
+ _ = wait(io, global_progress.refresh_rate_ns);
if (@atomicLoad(bool, &global_progress.done, .monotonic))
return;
const serialized = serialize(&serialized_buffer);
- writeIpc(fd, serialized) catch |err| switch (err) {
+ writeIpc(io, file, serialized) catch |err| switch (err) {
error.BrokenPipe => return,
};
}
@@ -784,11 +771,10 @@ fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize {
}
}
-fn clearWrittenWithEscapeCodes() anyerror!void {
+pub fn clearWrittenWithEscapeCodes(file_writer: *Io.File.Writer) Io.Writer.Error!void {
if (noop_impl or !global_progress.need_clear) return;
-
+ try file_writer.interface.writeAll(clear ++ progress_remove);
global_progress.need_clear = false;
- try write(clear ++ progress_remove);
}
/// U+25BA or ►
@@ -948,11 +934,11 @@ const SavedMetadata = struct {
const Fd = enum(i32) {
_,
- fn init(fd: posix.fd_t) Fd {
+ fn init(fd: Io.File.Handle) Fd {
return @enumFromInt(if (is_windows) @as(isize, @bitCast(@intFromPtr(fd))) else fd);
}
- fn get(fd: Fd) posix.fd_t {
+ fn get(fd: Fd) Io.File.Handle {
return if (is_windows)
@ptrFromInt(@as(usize, @bitCast(@as(isize, @intFromEnum(fd)))))
else
@@ -963,6 +949,7 @@ const Fd = enum(i32) {
var ipc_metadata_len: u8 = 0;
fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buffer) usize {
+ const io = global_progress.io;
const ipc_metadata_fds_copy = &serialized_buffer.ipc_metadata_fds_copy;
const ipc_metadata_copy = &serialized_buffer.ipc_metadata_copy;
const ipc_metadata_fds = &serialized_buffer.ipc_metadata_fds;
@@ -981,14 +968,14 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff
0..,
) |main_parent, *main_storage, main_index| {
if (main_parent == .unused) continue;
- const fd = main_storage.getIpcFd() orelse continue;
- const opt_saved_metadata = findOld(fd, old_ipc_metadata_fds, old_ipc_metadata);
+ const file: Io.File = .{ .handle = main_storage.getIpcFd() orelse continue };
+ const opt_saved_metadata = findOld(file.handle, old_ipc_metadata_fds, old_ipc_metadata);
var bytes_read: usize = 0;
while (true) {
- const n = posix.read(fd, pipe_buf[bytes_read..]) catch |err| switch (err) {
+ const n = file.readStreaming(io, &.{pipe_buf[bytes_read..]}) catch |err| switch (err) {
error.WouldBlock => break,
else => |e| {
- std.log.debug("failed to read child progress data: {s}", .{@errorName(e)});
+ std.log.debug("failed to read child progress data: {t}", .{e});
main_storage.completed_count = 0;
main_storage.estimated_total_count = 0;
continue :main_loop;
@@ -1014,7 +1001,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff
// Ignore all but the last message on the pipe.
var input: []u8 = pipe_buf[0..bytes_read];
if (input.len == 0) {
- serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, opt_saved_metadata, 0, fd);
+ serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, opt_saved_metadata, 0, file.handle);
continue;
}
@@ -1024,7 +1011,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff
if (input.len < expected_bytes) {
// Ignore short reads. We'll handle the next full message when it comes instead.
const remaining_read_trash_bytes: u16 = @intCast(expected_bytes - input.len);
- serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, opt_saved_metadata, remaining_read_trash_bytes, fd);
+ serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, opt_saved_metadata, remaining_read_trash_bytes, file.handle);
continue :main_loop;
}
if (input.len > expected_bytes) {
@@ -1042,7 +1029,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff
const nodes_len: u8 = @intCast(@min(parents.len - 1, serialized_buffer.storage.len - serialized_len));
// Remember in case the pipe is empty on next update.
- ipc_metadata_fds[ipc_metadata_len] = Fd.init(fd);
+ ipc_metadata_fds[ipc_metadata_len] = Fd.init(file.handle);
ipc_metadata[ipc_metadata_len] = .{
.remaining_read_trash_bytes = 0,
.start_index = @intCast(serialized_len),
@@ -1100,7 +1087,7 @@ fn copyRoot(dest: *Node.Storage, src: *align(1) Node.Storage) void {
}
fn findOld(
- ipc_fd: posix.fd_t,
+ ipc_fd: Io.File.Handle,
old_metadata_fds: []Fd,
old_metadata: []SavedMetadata,
) ?*SavedMetadata {
@@ -1118,7 +1105,7 @@ fn useSavedIpcData(
main_index: usize,
opt_saved_metadata: ?*SavedMetadata,
remaining_read_trash_bytes: u16,
- fd: posix.fd_t,
+ fd: Io.File.Handle,
) usize {
const parents_copy = &serialized_buffer.parents_copy;
const storage_copy = &serialized_buffer.storage_copy;
@@ -1415,13 +1402,9 @@ fn withinRowLimit(p: *Progress, nl_n: usize) bool {
return nl_n + 2 < p.rows;
}
-fn write(buf: []const u8) anyerror!void {
- try global_progress.terminal.writeAll(buf);
-}
-
var remaining_write_trash_bytes: usize = 0;
-fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void {
+fn writeIpc(io: Io, file: Io.File, serialized: Serialized) error{BrokenPipe}!void {
// Byteswap if necessary to ensure little endian over the pipe. This is
// needed because the parent or child process might be running in qemu.
if (is_big_endian) for (serialized.storage) |*s| s.byteSwap();
@@ -1432,11 +1415,7 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void {
const storage = std.mem.sliceAsBytes(serialized.storage);
const parents = std.mem.sliceAsBytes(serialized.parents);
- var vecs: [3]posix.iovec_const = .{
- .{ .base = header.ptr, .len = header.len },
- .{ .base = storage.ptr, .len = storage.len },
- .{ .base = parents.ptr, .len = parents.len },
- };
+ var vecs: [3][]const u8 = .{ header, storage, parents };
// Ensures the packet can fit in the pipe buffer.
const upper_bound_msg_len = 1 + node_storage_buffer_len * @sizeOf(Node.Storage) +
@@ -1447,14 +1426,14 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void {
// We do this in a separate write call to give a better chance for the
// writev below to be in a single packet.
const n = @min(parents.len, remaining_write_trash_bytes);
- if (posix.write(fd, parents[0..n])) |written| {
+ if (io.vtable.fileWriteStreaming(io.userdata, file, &.{}, &.{parents[0..n]}, 1)) |written| {
remaining_write_trash_bytes -= written;
continue;
} else |err| switch (err) {
error.WouldBlock => return,
error.BrokenPipe => return error.BrokenPipe,
else => |e| {
- std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)});
+ std.log.debug("failed to send progress to parent process: {t}", .{e});
return error.BrokenPipe;
},
}
@@ -1462,7 +1441,7 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void {
// If this write would block we do not want to keep trying, but we need to
// know if a partial message was written.
- if (writevNonblock(fd, &vecs)) |written| {
+ if (writevNonblock(io, file, &vecs)) |written| {
const total = header.len + storage.len + parents.len;
if (written < total) {
remaining_write_trash_bytes = total - written;
@@ -1471,13 +1450,13 @@ fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void {
error.WouldBlock => {},
error.BrokenPipe => return error.BrokenPipe,
else => |e| {
- std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)});
+ std.log.debug("failed to send progress to parent process: {t}", .{e});
return error.BrokenPipe;
},
}
}
-fn writevNonblock(fd: posix.fd_t, iov: []posix.iovec_const) posix.WriteError!usize {
+fn writevNonblock(io: Io, file: Io.File, iov: [][]const u8) Io.File.Writer.Error!usize {
var iov_index: usize = 0;
var written: usize = 0;
var total_written: usize = 0;
@@ -1486,9 +1465,9 @@ fn writevNonblock(fd: posix.fd_t, iov: []posix.iovec_const) posix.WriteError!usi
written >= iov[iov_index].len
else
return total_written) : (iov_index += 1) written -= iov[iov_index].len;
- iov[iov_index].base += written;
+ iov[iov_index].ptr += written;
iov[iov_index].len -= written;
- written = try posix.writev(fd, iov[iov_index..]);
+ written = try io.vtable.fileWriteStreaming(io.userdata, file, &.{}, iov, 1);
if (written == 0) return total_written;
total_written += written;
}
@@ -1538,7 +1517,7 @@ fn handleSigWinch(sig: posix.SIG, info: *const posix.siginfo_t, ctx_ptr: ?*anyop
_ = info;
_ = ctx_ptr;
assert(sig == .WINCH);
- global_progress.redraw_event.set();
+ global_progress.redraw_event.set(global_progress.io);
}
const have_sigwinch = switch (builtin.os.tag) {
@@ -1563,11 +1542,6 @@ const have_sigwinch = switch (builtin.os.tag) {
else => false,
};
-/// The primary motivation for recursive mutex here is so that a panic while
-/// stderr mutex is held still dumps the stack trace and other debug
-/// information.
-var stderr_mutex = std.Thread.Mutex.Recursive.init;
-
fn copyAtomicStore(dest: []align(@alignOf(usize)) u8, src: []const u8) void {
assert(dest.len == src.len);
const chunked_len = dest.len / @sizeOf(usize);
diff --git a/lib/std/Random/benchmark.zig b/lib/std/Random/benchmark.zig
index 57dc69051e..97afe23b95 100644
--- a/lib/std/Random/benchmark.zig
+++ b/lib/std/Random/benchmark.zig
@@ -1,7 +1,9 @@
// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
-const std = @import("std");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const time = std.time;
const Timer = time.Timer;
const Random = std.Random;
@@ -123,7 +125,7 @@ fn mode(comptime x: comptime_int) comptime_int {
pub fn main() !void {
var stdout_buffer: [0x100]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_writer.interface;
var buffer: [1024]u8 = undefined;
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 35b268b349..312cc25a99 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -7,6 +7,7 @@ const target = builtin.target;
const native_os = builtin.os.tag;
const std = @import("std.zig");
+const Io = std.Io;
const math = std.math;
const assert = std.debug.assert;
const posix = std.posix;
@@ -174,9 +175,9 @@ pub const SetNameError = error{
Unsupported,
Unexpected,
InvalidWtf8,
-} || posix.PrctlError || posix.WriteError || std.fs.File.OpenError || std.fmt.BufPrintError;
+} || posix.PrctlError || posix.WriteError || Io.File.OpenError || std.fmt.BufPrintError;
-pub fn setName(self: Thread, name: []const u8) SetNameError!void {
+pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void {
if (name.len > max_name_len) return error.NameTooLong;
const name_with_terminator = blk: {
@@ -207,10 +208,10 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
var buf: [32]u8 = undefined;
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
- const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only });
- defer file.close();
+ const file = try Io.Dir.cwd().openFile(io, path, .{ .mode = .write_only });
+ defer file.close(io);
- try file.writeAll(name);
+ try file.writeStreamingAll(io, name);
return;
},
.windows => {
@@ -292,7 +293,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
pub const GetNameError = error{
Unsupported,
Unexpected,
-} || posix.PrctlError || posix.ReadError || std.fs.File.OpenError || std.fmt.BufPrintError;
+} || posix.PrctlError || posix.ReadError || Io.File.OpenError || std.fmt.BufPrintError;
/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
@@ -321,11 +322,10 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
var buf: [32]u8 = undefined;
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
- var threaded: std.Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
+ const io = std.Options.debug_io;
- const file = try std.fs.cwd().openFile(path, .{});
- defer file.close();
+ const file = try Io.Dir.cwd().openFile(io, path, .{});
+ defer file.close(io);
var file_reader = file.readerStreaming(io, &.{});
const data_len = file_reader.interface.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
@@ -1675,14 +1675,14 @@ const LinuxThreadImpl = struct {
}
};
-fn testThreadName(thread: *Thread) !void {
+fn testThreadName(io: Io, thread: *Thread) !void {
const testCases = &[_][]const u8{
"mythread",
"b" ** max_name_len,
};
inline for (testCases) |tc| {
- try thread.setName(tc);
+ try thread.setName(io, tc);
var name_buffer: [max_name_len:0]u8 = undefined;
@@ -1697,6 +1697,8 @@ fn testThreadName(thread: *Thread) !void {
test "setName, getName" {
if (builtin.single_threaded) return error.SkipZigTest;
+ const io = testing.io;
+
const Context = struct {
start_wait_event: ResetEvent = .unset,
test_done_event: ResetEvent = .unset,
@@ -1710,11 +1712,11 @@ test "setName, getName" {
ctx.start_wait_event.wait();
switch (native_os) {
- .windows => testThreadName(&ctx.thread) catch |err| switch (err) {
+ .windows => testThreadName(io, &ctx.thread) catch |err| switch (err) {
error.Unsupported => return error.SkipZigTest,
else => return err,
},
- else => try testThreadName(&ctx.thread),
+ else => try testThreadName(io, &ctx.thread),
}
// Signal our test is done
@@ -1734,14 +1736,14 @@ test "setName, getName" {
switch (native_os) {
.driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => {
- const res = thread.setName("foobar");
+ const res = thread.setName(io, "foobar");
try std.testing.expectError(error.Unsupported, res);
},
- .windows => testThreadName(&thread) catch |err| switch (err) {
+ .windows => testThreadName(io, &thread) catch |err| switch (err) {
error.Unsupported => return error.SkipZigTest,
else => return err,
},
- else => try testThreadName(&thread),
+ else => try testThreadName(io, &thread),
}
context.thread_done_event.set();
diff --git a/lib/std/c.zig b/lib/std/c.zig
index 656bdc35bb..fdd544cba9 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -148,9 +148,10 @@ pub const dev_t = switch (native_os) {
pub const mode_t = switch (native_os) {
.linux => linux.mode_t,
.emscripten => emscripten.mode_t,
- .openbsd, .haiku, .netbsd, .illumos, .wasi, .windows => u32,
+ .openbsd, .haiku, .netbsd, .illumos, .windows => u32,
// https://github.com/SerenityOS/serenity/blob/b98f537f117b341788023ab82e0c11ca9ae29a57/Kernel/API/POSIX/sys/types.h#L44
.freebsd, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .dragonfly, .serenity => u16,
+ .wasi => if (builtin.link_libc) u32 else u0, // WASI libc emulates mode.
else => u0,
};
@@ -160,9 +161,10 @@ pub const nlink_t = switch (native_os) {
.wasi => c_ulonglong,
// https://github.com/SerenityOS/serenity/blob/b98f537f117b341788023ab82e0c11ca9ae29a57/Kernel/API/POSIX/sys/types.h#L45
.freebsd, .serenity => u64,
- .openbsd, .netbsd, .illumos => u32,
+ .openbsd, .netbsd, .dragonfly, .illumos => u32,
.haiku => i32,
- else => void,
+ .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => u16,
+ else => u0,
};
pub const uid_t = switch (native_os) {
@@ -10608,7 +10610,7 @@ pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_
pub extern "c" fn mremap(addr: ?*align(page_size) const anyopaque, old_len: usize, new_len: usize, flags: MREMAP, ...) *anyopaque;
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
-pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
+pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_uint) c_int;
pub extern "c" fn unlink(path: [*:0]const u8) c_int;
pub extern "c" fn unlinkat(dirfd: fd_t, path: [*:0]const u8, flags: c_uint) c_int;
pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
@@ -10740,7 +10742,7 @@ pub extern "c" fn free(?*anyopaque) void;
pub extern "c" fn futimes(fd: fd_t, times: ?*[2]timeval) c_int;
pub extern "c" fn utimes(path: [*:0]const u8, times: ?*[2]timeval) c_int;
-pub extern "c" fn utimensat(dirfd: fd_t, pathname: [*:0]const u8, times: ?*[2]timespec, flags: u32) c_int;
+pub extern "c" fn utimensat(dirfd: fd_t, pathname: [*:0]const u8, times: ?*const [2]timespec, flags: u32) c_int;
pub extern "c" fn futimens(fd: fd_t, times: ?*const [2]timespec) c_int;
pub extern "c" fn pthread_create(
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index f0c4f4c278..d4a7dfd5db 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -349,7 +349,7 @@ pub const VM = struct {
pub const exception_type_t = c_int;
pub extern "c" fn NSVersionOfRunTimeLibrary(library_name: [*:0]const u8) u32;
-pub extern "c" fn _NSGetExecutablePath(buf: [*:0]u8, bufsize: *u32) c_int;
+pub extern "c" fn _NSGetExecutablePath(buf: [*]u8, bufsize: *u32) c_int;
pub extern "c" fn _dyld_image_count() u32;
pub extern "c" fn _dyld_get_image_header(image_index: u32) ?*mach_header;
pub extern "c" fn _dyld_get_image_vmaddr_slide(image_index: u32) usize;
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index 45ad812ed1..ad67b23a14 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -250,7 +250,7 @@ pub const kinfo_file = extern struct {
/// Reserved for future cap_rights
_cap_spare: u64,
/// Path to file, if any.
- path: [PATH_MAX - 1:0]u8,
+ path: [PATH_MAX]u8,
comptime {
assert(@sizeOf(@This()) == KINFO_FILE_SIZE);
diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig
index 53fb638250..385ef23c9c 100644
--- a/lib/std/crypto/Certificate/Bundle.zig
+++ b/lib/std/crypto/Certificate/Bundle.zig
@@ -9,8 +9,8 @@ const builtin = @import("builtin");
const std = @import("../../std.zig");
const Io = std.Io;
+const Dir = std.Io.Dir;
const assert = std.debug.assert;
-const fs = std.fs;
const mem = std.mem;
const crypto = std.crypto;
const Allocator = std.mem.Allocator;
@@ -171,17 +171,17 @@ fn rescanWindows(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanW
cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
}
-pub const AddCertsFromDirPathError = fs.File.OpenError || AddCertsFromDirError;
+pub const AddCertsFromDirPathError = Io.File.OpenError || AddCertsFromDirError;
pub fn addCertsFromDirPath(
cb: *Bundle,
gpa: Allocator,
io: Io,
- dir: fs.Dir,
+ dir: Io.Dir,
sub_dir_path: []const u8,
) AddCertsFromDirPathError!void {
- var iterable_dir = try dir.openDir(sub_dir_path, .{ .iterate = true });
- defer iterable_dir.close();
+ var iterable_dir = try dir.openDir(io, sub_dir_path, .{ .iterate = true });
+ defer iterable_dir.close(io);
return addCertsFromDir(cb, gpa, io, iterable_dir);
}
@@ -192,27 +192,27 @@ pub fn addCertsFromDirPathAbsolute(
now: Io.Timestamp,
abs_dir_path: []const u8,
) AddCertsFromDirPathError!void {
- assert(fs.path.isAbsolute(abs_dir_path));
- var iterable_dir = try fs.openDirAbsolute(abs_dir_path, .{ .iterate = true });
- defer iterable_dir.close();
+ assert(Dir.path.isAbsolute(abs_dir_path));
+ var iterable_dir = try Dir.openDirAbsolute(io, abs_dir_path, .{ .iterate = true });
+ defer iterable_dir.close(io);
return addCertsFromDir(cb, gpa, io, now, iterable_dir);
}
pub const AddCertsFromDirError = AddCertsFromFilePathError;
-pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, iterable_dir: fs.Dir) AddCertsFromDirError!void {
+pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, iterable_dir: Io.Dir) AddCertsFromDirError!void {
var it = iterable_dir.iterate();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
switch (entry.kind) {
.file, .sym_link => {},
else => continue,
}
- try addCertsFromFilePath(cb, gpa, io, now, iterable_dir.adaptToNewApi(), entry.name);
+ try addCertsFromFilePath(cb, gpa, io, now, iterable_dir, entry.name);
}
}
-pub const AddCertsFromFilePathError = fs.File.OpenError || AddCertsFromFileError || Io.Clock.Error;
+pub const AddCertsFromFilePathError = Io.File.OpenError || AddCertsFromFileError || Io.Clock.Error;
pub fn addCertsFromFilePathAbsolute(
cb: *Bundle,
@@ -221,8 +221,8 @@ pub fn addCertsFromFilePathAbsolute(
now: Io.Timestamp,
abs_file_path: []const u8,
) AddCertsFromFilePathError!void {
- var file = try fs.openFileAbsolute(abs_file_path, .{});
- defer file.close();
+ var file = try Io.Dir.openFileAbsolute(io, abs_file_path, .{});
+ defer file.close(io);
var file_reader = file.reader(io, &.{});
return addCertsFromFile(cb, gpa, &file_reader, now.toSeconds());
}
@@ -242,8 +242,8 @@ pub fn addCertsFromFilePath(
}
pub const AddCertsFromFileError = Allocator.Error ||
- fs.File.GetSeekPosError ||
- fs.File.ReadError ||
+ Io.File.Reader.Error ||
+ Io.File.Reader.SizeError ||
ParseCertError ||
std.base64.Error ||
error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker, Streaming };
diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig
index d32f1be8e0..ea8a91702d 100644
--- a/lib/std/crypto/Certificate/Bundle/macos.zig
+++ b/lib/std/crypto/Certificate/Bundle/macos.zig
@@ -6,7 +6,7 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const Bundle = @import("../Bundle.zig");
-pub const RescanMacError = Allocator.Error || fs.File.OpenError || fs.File.ReadError || fs.File.SeekError || Bundle.ParseCertError || error{EndOfStream};
+pub const RescanMacError = Allocator.Error || Io.File.OpenError || Io.File.Reader.Error || Io.File.SeekError || Bundle.ParseCertError || error{EndOfStream};
pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanMacError!void {
cb.bytes.clearRetainingCapacity();
@@ -17,9 +17,8 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanM
"/Library/Keychains/System.keychain",
};
- _ = io; // TODO migrate file system to use std.Io
for (keychain_paths) |keychain_path| {
- const bytes = std.fs.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) {
+ const bytes = Io.Dir.cwd().readFileAlloc(io, keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) {
error.StreamTooLong => return error.FileTooBig,
else => |e| return e,
};
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index 54024f070e..23154324bc 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -1,10 +1,12 @@
// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
-const std = @import("std");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const time = std.time;
-const Timer = time.Timer;
+const Timer = std.time.Timer;
const crypto = std.crypto;
const KiB = 1024;
@@ -504,7 +506,7 @@ fn mode(comptime x: comptime_int) comptime_int {
pub fn main() !void {
// Size of buffer is about size of printed message.
var stdout_buffer: [0x100]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_writer.interface;
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
@@ -554,7 +556,7 @@ pub fn main() !void {
}
}
- var io_threaded = std.Io.Threaded.init(arena_allocator);
+ var io_threaded = std.Io.Threaded.init(arena_allocator, .{});
defer io_threaded.deinit();
const io = io_threaded.io();
diff --git a/lib/std/crypto/codecs/asn1/test.zig b/lib/std/crypto/codecs/asn1/test.zig
index fe12cba819..bdfb47e2df 100644
--- a/lib/std/crypto/codecs/asn1/test.zig
+++ b/lib/std/crypto/codecs/asn1/test.zig
@@ -73,8 +73,8 @@ test AllTypes {
try std.testing.expectEqualSlices(u8, encoded, buf);
// Use this to update test file.
- // const dir = try std.fs.cwd().openDir("lib/std/crypto/asn1", .{});
- // var file = try dir.createFile(path, .{});
- // defer file.close();
+ // const dir = try Io.Dir.cwd().openDir(io, "lib/std/crypto/asn1", .{});
+ // var file = try dir.createFile(io, path, .{});
+ // defer file.close(io);
// try file.writeAll(buf);
}
diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig
index c24283469c..252defcb2d 100644
--- a/lib/std/crypto/tls.zig
+++ b/lib/std/crypto/tls.zig
@@ -32,7 +32,6 @@
const std = @import("../std.zig");
const Tls = @This();
-const net = std.net;
const mem = std.mem;
const crypto = std.crypto;
const assert = std.debug.assert;
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index feea5f9a41..d000bda62e 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -1,14 +1,13 @@
const std = @import("std.zig");
const Io = std.Io;
const Writer = std.Io.Writer;
-const tty = std.Io.tty;
const math = std.math;
const mem = std.mem;
const posix = std.posix;
const fs = std.fs;
const testing = std.testing;
const Allocator = mem.Allocator;
-const File = std.fs.File;
+const File = std.Io.File;
const windows = std.os.windows;
const builtin = @import("builtin");
@@ -60,7 +59,7 @@ pub const cpu_context = @import("debug/cpu_context.zig");
/// };
/// /// Only required if `can_unwind == true`. Unwinds a single stack frame, returning the frame's
/// /// return address, or 0 if the end of the stack has been reached.
-/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) SelfInfoError!usize;
+/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) SelfInfoError!usize;
/// ```
pub const SelfInfo = if (@hasDecl(root, "debug") and @hasDecl(root.debug, "SelfInfo"))
root.debug.SelfInfo
@@ -262,53 +261,54 @@ pub const sys_can_stack_trace = switch (builtin.cpu.arch) {
else => true,
};
-/// Allows the caller to freely write to stderr until `unlockStdErr` is called.
+/// Allows the caller to freely write to stderr until `unlockStderr` is called.
///
/// During the lock, any `std.Progress` information is cleared from the terminal.
-pub fn lockStdErr() void {
- std.Progress.lockStdErr();
-}
-
-pub fn unlockStdErr() void {
- std.Progress.unlockStdErr();
-}
-
-/// Allows the caller to freely write to stderr until `unlockStderrWriter` is called.
///
-/// During the lock, any `std.Progress` information is cleared from the terminal.
+/// The lock is recursive, so it is valid for the same thread to call
+/// `lockStderr` multiple times, allowing the panic handler to safely
+/// dump the stack trace and panic message even if the mutex was held at the
+/// panic site.
///
-/// The lock is recursive, so it is valid for the same thread to call `lockStderrWriter` multiple
-/// times. The primary motivation is that this allows the panic handler to safely dump the stack
-/// trace and panic message even if the mutex was held at the panic site.
+/// The returned `Writer` does not need to be manually flushed: flushing is
+/// performed automatically when the matching `unlockStderr` call occurs.
///
-/// The returned `Writer` does not need to be manually flushed: flushing is performed automatically
-/// when the matching `unlockStderrWriter` call occurs.
-pub fn lockStderrWriter(buffer: []u8) struct { *Writer, tty.Config } {
- const global = struct {
- var conf: ?tty.Config = null;
+/// This is a low-level debugging primitive that bypasses the `Io` interface,
+/// writing directly to stderr using the most basic syscalls available. This
+/// function does not switch threads, switch stacks, or suspend.
+///
+/// Alternatively, use the higher-level `Io.lockStderr` to integrate with the
+/// application's chosen `Io` implementation.
+pub fn lockStderr(buffer: []u8) Io.LockedStderr {
+ const io = std.Options.debug_io;
+ const prev = io.swapCancelProtection(.blocked);
+ defer _ = io.swapCancelProtection(prev);
+ return io.lockStderr(buffer, null) catch |err| switch (err) {
+ error.Canceled => unreachable, // Cancel protection enabled above.
};
- const w = std.Progress.lockStderrWriter(buffer);
- // The stderr lock also locks access to `global.conf`.
- if (global.conf == null) {
- global.conf = .detect(.stderr());
- }
- return .{ w, global.conf.? };
}
-pub fn unlockStderrWriter() void {
- std.Progress.unlockStderrWriter();
+pub fn unlockStderr() void {
+ const io = std.Options.debug_io;
+ io.unlockStderr();
}
-/// Print to stderr, silently returning on failure. Intended for use in "printf
-/// debugging". Use `std.log` functions for proper logging.
+/// Writes to stderr, ignoring errors.
+///
+/// This is a low-level debugging primitive that bypasses the `Io` interface,
+/// writing directly to stderr using the most basic syscalls available. This
+/// function does not switch threads, switch stacks, or suspend.
///
/// Uses a 64-byte buffer for formatted printing which is flushed before this
/// function returns.
+///
+/// Alternatively, use the higher-level `std.log` or `Io.lockStderr` to
+/// integrate with the application's chosen `Io` implementation.
pub fn print(comptime fmt: []const u8, args: anytype) void {
var buffer: [64]u8 = undefined;
- const bw, _ = lockStderrWriter(&buffer);
- defer unlockStderrWriter();
- nosuspend bw.print(fmt, args) catch return;
+ const stderr = lockStderr(&buffer);
+ defer unlockStderr();
+ stderr.file_writer.interface.print(fmt, args) catch return;
}
/// Marked `inline` to propagate a comptime-known error to callers.
@@ -323,43 +323,44 @@ pub inline fn getSelfDebugInfo() !*SelfInfo {
/// Tries to print a hexadecimal view of the bytes, unbuffered, and ignores any error returned.
/// Obtains the stderr mutex while dumping.
pub fn dumpHex(bytes: []const u8) void {
- const bw, const ttyconf = lockStderrWriter(&.{});
- defer unlockStderrWriter();
- dumpHexFallible(bw, ttyconf, bytes) catch {};
+ const stderr = lockStderr(&.{}).terminal();
+ defer unlockStderr();
+ dumpHexFallible(stderr, bytes) catch {};
}
/// Prints a hexadecimal view of the bytes, returning any error that occurs.
-pub fn dumpHexFallible(bw: *Writer, tty_config: tty.Config, bytes: []const u8) !void {
+pub fn dumpHexFallible(t: Io.Terminal, bytes: []const u8) !void {
+ const w = t.writer;
var chunks = mem.window(u8, bytes, 16, 16);
while (chunks.next()) |window| {
// 1. Print the address.
const address = (@intFromPtr(bytes.ptr) + 0x10 * (std.math.divCeil(usize, chunks.index orelse bytes.len, 16) catch unreachable)) - 0x10;
- try tty_config.setColor(bw, .dim);
+ try t.setColor(.dim);
// We print the address in lowercase and the bytes in uppercase hexadecimal to distinguish them more.
// Also, make sure all lines are aligned by padding the address.
- try bw.print("{x:0>[1]} ", .{ address, @sizeOf(usize) * 2 });
- try tty_config.setColor(bw, .reset);
+ try w.print("{x:0>[1]} ", .{ address, @sizeOf(usize) * 2 });
+ try t.setColor(.reset);
// 2. Print the bytes.
for (window, 0..) |byte, index| {
- try bw.print("{X:0>2} ", .{byte});
- if (index == 7) try bw.writeByte(' ');
+ try w.print("{X:0>2} ", .{byte});
+ if (index == 7) try w.writeByte(' ');
}
- try bw.writeByte(' ');
+ try w.writeByte(' ');
if (window.len < 16) {
var missing_columns = (16 - window.len) * 3;
if (window.len < 8) missing_columns += 1;
- try bw.splatByteAll(' ', missing_columns);
+ try w.splatByteAll(' ', missing_columns);
}
// 3. Print the characters.
for (window) |byte| {
if (std.ascii.isPrint(byte)) {
- try bw.writeByte(byte);
+ try w.writeByte(byte);
} else {
// Related: https://github.com/ziglang/zig/issues/7600
- if (tty_config == .windows_api) {
- try bw.writeByte('.');
+ if (t.mode == .windows_api) {
+ try w.writeByte('.');
continue;
}
@@ -367,24 +368,25 @@ pub fn dumpHexFallible(bw: *Writer, tty_config: tty.Config, bytes: []const u8) !
// We don't want to do this for all control codes because most control codes apart from
// the ones that Zig has escape sequences for are likely not very useful to print as symbols.
switch (byte) {
- '\n' => try bw.writeAll("␊"),
- '\r' => try bw.writeAll("␍"),
- '\t' => try bw.writeAll("␉"),
- else => try bw.writeByte('.'),
+ '\n' => try w.writeAll("␊"),
+ '\r' => try w.writeAll("␍"),
+ '\t' => try w.writeAll("␉"),
+ else => try w.writeByte('.'),
}
}
}
- try bw.writeByte('\n');
+ try w.writeByte('\n');
}
}
test dumpHexFallible {
+ const gpa = testing.allocator;
const bytes: []const u8 = &.{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x01, 0x12, 0x13 };
- var aw: Writer.Allocating = .init(std.testing.allocator);
+ var aw: Writer.Allocating = .init(gpa);
defer aw.deinit();
- try dumpHexFallible(&aw.writer, .no_color, bytes);
- const expected = try std.fmt.allocPrint(std.testing.allocator,
+ try dumpHexFallible(.{ .writer = &aw.writer, .mode = .no_color }, bytes);
+ const expected = try std.fmt.allocPrint(gpa,
\\{x:0>[2]} 00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF .."3DUfw........
\\{x:0>[2]} 01 12 13 ...
\\
@@ -393,8 +395,8 @@ test dumpHexFallible {
@intFromPtr(bytes.ptr) + 16,
@sizeOf(usize) * 2,
});
- defer std.testing.allocator.free(expected);
- try std.testing.expectEqualStrings(expected, aw.written());
+ defer gpa.free(expected);
+ try testing.expectEqualStrings(expected, aw.written());
}
/// The pointer through which a `cpu_context.Native` is received from callers of stack tracing logic.
@@ -409,7 +411,7 @@ pub const CpuContextPtr = if (cpu_context.Native == noreturn) noreturn else *con
/// away, and in fact the optimizer is able to use the assertion in its
/// heuristics.
///
-/// Inside a test block, it is best to use the `std.testing` module rather than
+/// Inside a test block, it is best to use the `testing` module rather than
/// this function, because this function may not detect a test failure in
/// ReleaseFast and ReleaseSmall mode. Outside of a test block, this assert
/// function is the correct function to use.
@@ -483,10 +485,7 @@ const use_trap_panic = switch (builtin.zig_backend) {
};
/// Dumps a stack trace to standard error, then aborts.
-pub fn defaultPanic(
- msg: []const u8,
- first_trace_addr: ?usize,
-) noreturn {
+pub fn defaultPanic(msg: []const u8, first_trace_addr: ?usize) noreturn {
@branchHint(.cold);
if (use_trap_panic) @trap();
@@ -522,7 +521,7 @@ pub fn defaultPanic(
}
@trap();
},
- .cuda, .amdhsa => std.posix.abort(),
+ .cuda, .amdhsa => std.process.abort(),
.plan9 => {
var status: [std.os.plan9.ERRMAX]u8 = undefined;
const len = @min(msg.len, status.len - 1);
@@ -546,26 +545,27 @@ pub fn defaultPanic(
_ = panicking.fetchAdd(1, .seq_cst);
trace: {
- const stderr, const tty_config = lockStderrWriter(&.{});
- defer unlockStderrWriter();
+ const stderr = lockStderr(&.{}).terminal();
+ defer unlockStderr();
+ const writer = stderr.writer;
if (builtin.single_threaded) {
- stderr.print("panic: ", .{}) catch break :trace;
+ writer.print("panic: ", .{}) catch break :trace;
} else {
const current_thread_id = std.Thread.getCurrentId();
- stderr.print("thread {d} panic: ", .{current_thread_id}) catch break :trace;
+ writer.print("thread {d} panic: ", .{current_thread_id}) catch break :trace;
}
- stderr.print("{s}\n", .{msg}) catch break :trace;
+ writer.print("{s}\n", .{msg}) catch break :trace;
if (@errorReturnTrace()) |t| if (t.index > 0) {
- stderr.writeAll("error return context:\n") catch break :trace;
- writeStackTrace(t, stderr, tty_config) catch break :trace;
- stderr.writeAll("\nstack trace:\n") catch break :trace;
+ writer.writeAll("error return context:\n") catch break :trace;
+ writeStackTrace(t, stderr) catch break :trace;
+ writer.writeAll("\nstack trace:\n") catch break :trace;
};
writeCurrentStackTrace(.{
.first_address = first_trace_addr orelse @returnAddress(),
.allow_unsafe_unwind = true, // we're crashing anyway, give it our all!
- }, stderr, tty_config) catch break :trace;
+ }, stderr) catch break :trace;
}
waitForOtherThreadToFinishPanicking();
@@ -575,12 +575,13 @@ pub fn defaultPanic(
// A panic happened while trying to print a previous panic message.
// We're still holding the mutex but that's fine as we're going to
// call abort().
- fs.File.stderr().writeAll("aborting due to recursive panic\n") catch {};
+ const stderr = lockStderr(&.{}).terminal();
+ stderr.writer.writeAll("aborting due to recursive panic\n") catch {};
},
else => {}, // Panicked while printing the recursive panic message.
}
- posix.abort();
+ std.process.abort();
}
/// Must be called only after adding 1 to `panicking`. There are three callsites.
@@ -621,13 +622,16 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf:
var it: StackIterator = .init(options.context);
defer it.deinit();
if (!it.stratOk(options.allow_unsafe_unwind)) return empty_trace;
+
+ const io = std.Options.debug_io;
+
var total_frames: usize = 0;
var index: usize = 0;
var wait_for = options.first_address;
// Ideally, we would iterate the whole stack so that the `index` in the returned trace was
// indicative of how many frames were skipped. However, this has a significant runtime cost
// in some cases, so at least for now, we don't do that.
- while (index < addr_buf.len) switch (it.next()) {
+ while (index < addr_buf.len) switch (it.next(io)) {
.switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break,
.end => break,
.frame => |ret_addr| {
@@ -653,37 +657,36 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf:
/// Write the current stack trace to `writer`, annotated with source locations.
///
/// See `captureCurrentStackTrace` to capture the trace addresses into a buffer instead of printing.
-pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Writer, tty_config: tty.Config) Writer.Error!void {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
-
+pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, t: Io.Terminal) Writer.Error!void {
+ const writer = t.writer;
if (!std.options.allow_stack_tracing) {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
return;
}
const di_gpa = getDebugInfoAllocator();
const di = getSelfDebugInfo() catch |err| switch (err) {
error.UnsupportedTarget => {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print("Cannot print stack trace: debug info unavailable for target\n", .{});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
return;
},
};
var it: StackIterator = .init(options.context);
defer it.deinit();
if (!it.stratOk(options.allow_unsafe_unwind)) {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print("Cannot print stack trace: safe unwind unavailable for target\n", .{});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
return;
}
var total_frames: usize = 0;
var wait_for = options.first_address;
var printed_any_frame = false;
- while (true) switch (it.next()) {
+ const io = std.Options.debug_io;
+ while (true) switch (it.next(io)) {
.switch_to_fp => |unwind_error| {
switch (StackIterator.fp_usability) {
.useless, .unsafe => {},
@@ -700,31 +703,31 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri
error.Unexpected => "unexpected error",
};
if (it.stratOk(options.allow_unsafe_unwind)) {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print(
"Unwind error at address `{s}:0x{x}` ({s}), remaining frames may be incorrect\n",
.{ module_name, unwind_error.address, caption },
);
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
} else {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print(
"Unwind error at address `{s}:0x{x}` ({s}), stopping trace early\n",
.{ module_name, unwind_error.address, caption },
);
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
return;
}
},
.end => break,
.frame => |ret_addr| {
if (total_frames > 10_000) {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print(
"Stopping trace after {d} frames (large frame count may indicate broken debug info)\n",
.{total_frames},
);
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
return;
}
total_frames += 1;
@@ -734,7 +737,7 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri
}
// `ret_addr` is the return address, which is *after* the function call.
// Subtract 1 to get an address *in* the function call for a better source location.
- try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
+ try printSourceAtAddress(di_gpa, io, di, t, ret_addr -| StackIterator.ra_call_offset);
printed_any_frame = true;
},
};
@@ -742,8 +745,8 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri
}
/// A thin wrapper around `writeCurrentStackTrace` which writes to stderr and ignores write errors.
pub fn dumpCurrentStackTrace(options: StackUnwindOptions) void {
- const stderr, const tty_config = lockStderrWriter(&.{});
- defer unlockStderrWriter();
+ const stderr = lockStderr(&.{}).terminal();
+ defer unlockStderr();
writeCurrentStackTrace(.{
.first_address = a: {
if (options.first_address) |a| break :a a;
@@ -752,33 +755,30 @@ pub fn dumpCurrentStackTrace(options: StackUnwindOptions) void {
},
.context = options.context,
.allow_unsafe_unwind = options.allow_unsafe_unwind,
- }, stderr, tty_config) catch |err| switch (err) {
+ }, stderr) catch |err| switch (err) {
error.WriteFailed => {},
};
}
pub const FormatStackTrace = struct {
stack_trace: StackTrace,
- tty_config: tty.Config,
+ terminal_mode: Io.Terminal.Mode = .no_color,
- pub fn format(context: @This(), writer: *Io.Writer) Io.Writer.Error!void {
- try writer.writeAll("\n");
- try writeStackTrace(&context.stack_trace, writer, context.tty_config);
+ pub fn format(fst: FormatStackTrace, writer: *Writer) Writer.Error!void {
+ try writer.writeByte('\n');
+ try writeStackTrace(&fst.stack_trace, .{ .writer = writer, .mode = fst.terminal_mode });
}
};
/// Write a previously captured stack trace to `writer`, annotated with source locations.
-pub fn writeStackTrace(st: *const StackTrace, writer: *Writer, tty_config: tty.Config) Writer.Error!void {
+pub fn writeStackTrace(st: *const StackTrace, t: Io.Terminal) Writer.Error!void {
+ const writer = t.writer;
if (!std.options.allow_stack_tracing) {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
return;
}
- // We use an independent Io implementation here in case there was a problem
- // with the application's Io implementation itself.
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
// Fetch `st.index` straight away. Aside from avoiding redundant loads, this prevents issues if
// `st` is `@errorReturnTrace()` and errors are encountered while writing the stack trace.
@@ -787,29 +787,30 @@ pub fn writeStackTrace(st: *const StackTrace, writer: *Writer, tty_config: tty.C
const di_gpa = getDebugInfoAllocator();
const di = getSelfDebugInfo() catch |err| switch (err) {
error.UnsupportedTarget => {
- tty_config.setColor(writer, .dim) catch {};
+ t.setColor(.dim) catch {};
try writer.print("Cannot print stack trace: debug info unavailable for target\n\n", .{});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
return;
},
};
+ const io = std.Options.debug_io;
const captured_frames = @min(n_frames, st.instruction_addresses.len);
for (st.instruction_addresses[0..captured_frames]) |ret_addr| {
// `ret_addr` is the return address, which is *after* the function call.
// Subtract 1 to get an address *in* the function call for a better source location.
- try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
+ try printSourceAtAddress(di_gpa, io, di, t, ret_addr -| StackIterator.ra_call_offset);
}
if (n_frames > captured_frames) {
- tty_config.setColor(writer, .bold) catch {};
+ t.setColor(.bold) catch {};
try writer.print("({d} additional stack frames skipped...)\n", .{n_frames - captured_frames});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.reset) catch {};
}
}
/// A thin wrapper around `writeStackTrace` which writes to stderr and ignores write errors.
pub fn dumpStackTrace(st: *const StackTrace) void {
- const stderr, const tty_config = lockStderrWriter(&.{});
- defer unlockStderrWriter();
- writeStackTrace(st, stderr, tty_config) catch |err| switch (err) {
+ const stderr = lockStderr(&.{}).terminal();
+ defer unlockStderr();
+ writeStackTrace(st, stderr) catch |err| switch (err) {
error.WriteFailed => {},
};
}
@@ -960,7 +961,7 @@ const StackIterator = union(enum) {
},
};
- fn next(it: *StackIterator) Result {
+ fn next(it: *StackIterator, io: Io) Result {
switch (it.*) {
.ctx_first => |context_ptr| {
// After the first frame, start actually unwinding.
@@ -976,7 +977,7 @@ const StackIterator = union(enum) {
.di => |*unwind_context| {
const di = getSelfDebugInfo() catch unreachable;
const di_gpa = getDebugInfoAllocator();
- const ret_addr = di.unwindFrame(di_gpa, unwind_context) catch |err| {
+ const ret_addr = di.unwindFrame(di_gpa, io, unwind_context) catch |err| {
const pc = unwind_context.pc;
const fp = unwind_context.getFp();
it.* = .{ .fp = fp };
@@ -1104,170 +1105,146 @@ pub inline fn stripInstructionPtrAuthCode(ptr: usize) usize {
return ptr;
}
-fn printSourceAtAddress(gpa: Allocator, io: Io, debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) Writer.Error!void {
+fn printSourceAtAddress(
+ gpa: Allocator,
+ io: Io,
+ debug_info: *SelfInfo,
+ t: Io.Terminal,
+ address: usize,
+) Writer.Error!void {
const symbol: Symbol = debug_info.getSymbol(gpa, io, address) catch |err| switch (err) {
error.MissingDebugInfo,
error.UnsupportedDebugInfo,
error.InvalidDebugInfo,
=> .unknown,
error.ReadFailed, error.Unexpected, error.Canceled => s: {
- tty_config.setColor(writer, .dim) catch {};
- try writer.print("Failed to read debug info from filesystem, trace may be incomplete\n\n", .{});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.dim) catch {};
+ try t.writer.print("Failed to read debug info from filesystem, trace may be incomplete\n\n", .{});
+ t.setColor(.reset) catch {};
break :s .unknown;
},
error.OutOfMemory => s: {
- tty_config.setColor(writer, .dim) catch {};
- try writer.print("Ran out of memory loading debug info, trace may be incomplete\n\n", .{});
- tty_config.setColor(writer, .reset) catch {};
+ t.setColor(.dim) catch {};
+ try t.writer.print("Ran out of memory loading debug info, trace may be incomplete\n\n", .{});
+ t.setColor(.reset) catch {};
break :s .unknown;
},
};
defer if (symbol.source_location) |sl| gpa.free(sl.file_name);
return printLineInfo(
- writer,
+ io,
+ t,
symbol.source_location,
address,
symbol.name orelse "???",
symbol.compile_unit_name orelse debug_info.getModuleName(gpa, address) catch "???",
- tty_config,
);
}
fn printLineInfo(
- writer: *Writer,
+ io: Io,
+ t: Io.Terminal,
source_location: ?SourceLocation,
address: usize,
symbol_name: []const u8,
compile_unit_name: []const u8,
- tty_config: tty.Config,
) Writer.Error!void {
- nosuspend {
- tty_config.setColor(writer, .bold) catch {};
+ const writer = t.writer;
+ t.setColor(.bold) catch {};
- if (source_location) |*sl| {
- try writer.print("{s}:{d}:{d}", .{ sl.file_name, sl.line, sl.column });
- } else {
- try writer.writeAll("???:?:?");
- }
+ if (source_location) |*sl| {
+ try writer.print("{s}:{d}:{d}", .{ sl.file_name, sl.line, sl.column });
+ } else {
+ try writer.writeAll("???:?:?");
+ }
- tty_config.setColor(writer, .reset) catch {};
- try writer.writeAll(": ");
- tty_config.setColor(writer, .dim) catch {};
- try writer.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name });
- tty_config.setColor(writer, .reset) catch {};
- try writer.writeAll("\n");
-
- // Show the matching source code line if possible
- if (source_location) |sl| {
- if (printLineFromFile(writer, sl)) {
- if (sl.column > 0) {
- // The caret already takes one char
- const space_needed = @as(usize, @intCast(sl.column - 1));
-
- try writer.splatByteAll(' ', space_needed);
- tty_config.setColor(writer, .green) catch {};
- try writer.writeAll("^");
- tty_config.setColor(writer, .reset) catch {};
- }
- try writer.writeAll("\n");
- } else |_| {
- // Ignore all errors; it's a better UX to just print the source location without the
- // corresponding line number. The user can always open the source file themselves.
+ t.setColor(.reset) catch {};
+ try writer.writeAll(": ");
+ t.setColor(.dim) catch {};
+ try writer.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name });
+ t.setColor(.reset) catch {};
+ try writer.writeAll("\n");
+
+ // Show the matching source code line if possible
+ if (source_location) |sl| {
+ if (printLineFromFile(io, writer, sl)) {
+ if (sl.column > 0) {
+ // The caret already takes one char
+ const space_needed = @as(usize, @intCast(sl.column - 1));
+
+ try writer.splatByteAll(' ', space_needed);
+ t.setColor(.green) catch {};
+ try writer.writeAll("^");
+ t.setColor(.reset) catch {};
}
+ try writer.writeAll("\n");
+ } else |_| {
+ // Ignore all errors; it's a better UX to just print the source location without the
+ // corresponding line number. The user can always open the source file themselves.
}
}
}
-fn printLineFromFile(writer: *Writer, source_location: SourceLocation) !void {
+fn printLineFromFile(io: Io, writer: *Writer, source_location: SourceLocation) !void {
// Allow overriding the target-agnostic source line printing logic by exposing `root.debug.printLineFromFile`.
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "printLineFromFile")) {
- return root.debug.printLineFromFile(writer, source_location);
+ return root.debug.printLineFromFile(io, writer, source_location);
}
// Need this to always block even in async I/O mode, because this could potentially
// be called from e.g. the event loop code crashing.
- var f = try fs.cwd().openFile(source_location.file_name, .{});
- defer f.close();
- // TODO fstat and make sure that the file has the correct size
-
- var buf: [4096]u8 = undefined;
- var amt_read = try f.read(buf[0..]);
- const line_start = seek: {
- var current_line_start: usize = 0;
- var next_line: usize = 1;
- while (next_line != source_location.line) {
- const slice = buf[current_line_start..amt_read];
- if (mem.findScalar(u8, slice, '\n')) |pos| {
- next_line += 1;
- if (pos == slice.len - 1) {
- amt_read = try f.read(buf[0..]);
- current_line_start = 0;
- } else current_line_start += pos + 1;
- } else if (amt_read < buf.len) {
- return error.EndOfFile;
- } else {
- amt_read = try f.read(buf[0..]);
- current_line_start = 0;
- }
- }
- break :seek current_line_start;
- };
- const slice = buf[line_start..amt_read];
- if (mem.findScalar(u8, slice, '\n')) |pos| {
- const line = slice[0 .. pos + 1];
- mem.replaceScalar(u8, line, '\t', ' ');
- return writer.writeAll(line);
- } else { // Line is the last inside the buffer, and requires another read to find delimiter. Alternatively the file ends.
- mem.replaceScalar(u8, slice, '\t', ' ');
- try writer.writeAll(slice);
- while (amt_read == buf.len) {
- amt_read = try f.read(buf[0..]);
- if (mem.findScalar(u8, buf[0..amt_read], '\n')) |pos| {
- const line = buf[0 .. pos + 1];
- mem.replaceScalar(u8, line, '\t', ' ');
- return writer.writeAll(line);
- } else {
- const line = buf[0..amt_read];
- mem.replaceScalar(u8, line, '\t', ' ');
- try writer.writeAll(line);
- }
+ const cwd: Io.Dir = .cwd();
+ var file = try cwd.openFile(io, source_location.file_name, .{});
+ defer file.close(io);
+
+ var buffer: [4096]u8 = undefined;
+ var file_reader: File.Reader = .init(file, io, &buffer);
+ var line_index: usize = 0;
+ const r = &file_reader.interface;
+ while (true) {
+ line_index += 1;
+ if (line_index == source_location.line) {
+ // TODO delete hard tabs from the language
+ _ = try r.streamDelimiterEnding(writer, '\n');
+ try writer.writeByte('\n');
+ return;
}
- // Make sure printing last line of file inserts extra newline
- try writer.writeByte('\n');
+ _ = try r.discardDelimiterInclusive('\n');
}
}
test printLineFromFile {
- var aw: Writer.Allocating = .init(std.testing.allocator);
+ const io = testing.io;
+ const gpa = testing.allocator;
+
+ var aw: Writer.Allocating = .init(gpa);
defer aw.deinit();
const output_stream = &aw.writer;
- const allocator = std.testing.allocator;
const join = std.fs.path.join;
- const expectError = std.testing.expectError;
- const expectEqualStrings = std.testing.expectEqualStrings;
+ const expectError = testing.expectError;
+ const expectEqualStrings = testing.expectEqualStrings;
- var test_dir = std.testing.tmpDir(.{});
+ var test_dir = testing.tmpDir(.{});
defer test_dir.cleanup();
// Relies on testing.tmpDir internals which is not ideal, but SourceLocation requires paths.
- const test_dir_path = try join(allocator, &.{ ".zig-cache", "tmp", test_dir.sub_path[0..] });
- defer allocator.free(test_dir_path);
+ const test_dir_path = try join(gpa, &.{ ".zig-cache", "tmp", test_dir.sub_path[0..] });
+ defer gpa.free(test_dir_path);
// Cases
{
- const path = try join(allocator, &.{ test_dir_path, "one_line.zig" });
- defer allocator.free(path);
- try test_dir.dir.writeFile(.{ .sub_path = "one_line.zig", .data = "no new lines in this file, but one is printed anyway" });
+ const path = try join(gpa, &.{ test_dir_path, "one_line.zig" });
+ defer gpa.free(path);
+ try test_dir.dir.writeFile(io, .{ .sub_path = "one_line.zig", .data = "no new lines in this file, but one is printed anyway" });
- try expectError(error.EndOfFile, printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
+ try expectError(error.EndOfStream, printLineFromFile(io, output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings("no new lines in this file, but one is printed anyway\n", aw.written());
aw.clearRetainingCapacity();
}
{
- const path = try fs.path.join(allocator, &.{ test_dir_path, "three_lines.zig" });
- defer allocator.free(path);
- try test_dir.dir.writeFile(.{
+ const path = try fs.path.join(gpa, &.{ test_dir_path, "three_lines.zig" });
+ defer gpa.free(path);
+ try test_dir.dir.writeFile(io, .{
.sub_path = "three_lines.zig",
.data =
\\1
@@ -1276,90 +1253,90 @@ test printLineFromFile {
,
});
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings("1\n", aw.written());
aw.clearRetainingCapacity();
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 3, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 3, .column = 0 });
try expectEqualStrings("3\n", aw.written());
aw.clearRetainingCapacity();
}
{
- const file = try test_dir.dir.createFile("line_overlaps_page_boundary.zig", .{});
- defer file.close();
- const path = try fs.path.join(allocator, &.{ test_dir_path, "line_overlaps_page_boundary.zig" });
- defer allocator.free(path);
+ const file = try test_dir.dir.createFile(io, "line_overlaps_page_boundary.zig", .{});
+ defer file.close(io);
+ const path = try fs.path.join(gpa, &.{ test_dir_path, "line_overlaps_page_boundary.zig" });
+ defer gpa.free(path);
const overlap = 10;
var buf: [16]u8 = undefined;
- var file_writer = file.writer(&buf);
+ var file_writer = file.writer(io, &buf);
const writer = &file_writer.interface;
try writer.splatByteAll('a', std.heap.page_size_min - overlap);
try writer.writeByte('\n');
try writer.splatByteAll('a', overlap);
try writer.flush();
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 2, .column = 0 });
try expectEqualStrings(("a" ** overlap) ++ "\n", aw.written());
aw.clearRetainingCapacity();
}
{
- const file = try test_dir.dir.createFile("file_ends_on_page_boundary.zig", .{});
- defer file.close();
- const path = try fs.path.join(allocator, &.{ test_dir_path, "file_ends_on_page_boundary.zig" });
- defer allocator.free(path);
+ const file = try test_dir.dir.createFile(io, "file_ends_on_page_boundary.zig", .{});
+ defer file.close(io);
+ const path = try fs.path.join(gpa, &.{ test_dir_path, "file_ends_on_page_boundary.zig" });
+ defer gpa.free(path);
- var file_writer = file.writer(&.{});
+ var file_writer = file.writer(io, &.{});
const writer = &file_writer.interface;
try writer.splatByteAll('a', std.heap.page_size_max);
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", aw.written());
aw.clearRetainingCapacity();
}
{
- const file = try test_dir.dir.createFile("very_long_first_line_spanning_multiple_pages.zig", .{});
- defer file.close();
- const path = try fs.path.join(allocator, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" });
- defer allocator.free(path);
+ const file = try test_dir.dir.createFile(io, "very_long_first_line_spanning_multiple_pages.zig", .{});
+ defer file.close(io);
+ const path = try fs.path.join(gpa, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" });
+ defer gpa.free(path);
- var file_writer = file.writer(&.{});
+ var file_writer = file.writer(io, &.{});
const writer = &file_writer.interface;
try writer.splatByteAll('a', 3 * std.heap.page_size_max);
- try expectError(error.EndOfFile, printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
+ try expectError(error.EndOfStream, printLineFromFile(io, output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "\n", aw.written());
aw.clearRetainingCapacity();
try writer.writeAll("a\na");
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "a\n", aw.written());
aw.clearRetainingCapacity();
- try printLineFromFile(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = 2, .column = 0 });
try expectEqualStrings("a\n", aw.written());
aw.clearRetainingCapacity();
}
{
- const file = try test_dir.dir.createFile("file_of_newlines.zig", .{});
- defer file.close();
- const path = try fs.path.join(allocator, &.{ test_dir_path, "file_of_newlines.zig" });
- defer allocator.free(path);
+ const file = try test_dir.dir.createFile(io, "file_of_newlines.zig", .{});
+ defer file.close(io);
+ const path = try fs.path.join(gpa, &.{ test_dir_path, "file_of_newlines.zig" });
+ defer gpa.free(path);
- var file_writer = file.writer(&.{});
+ var file_writer = file.writer(io, &.{});
const writer = &file_writer.interface;
const real_file_start = 3 * std.heap.page_size_min;
try writer.splatByteAll('\n', real_file_start);
try writer.writeAll("abc\ndef");
- try printLineFromFile(output_stream, .{ .file_name = path, .line = real_file_start + 1, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = real_file_start + 1, .column = 0 });
try expectEqualStrings("abc\n", aw.written());
aw.clearRetainingCapacity();
- try printLineFromFile(output_stream, .{ .file_name = path, .line = real_file_start + 2, .column = 0 });
+ try printLineFromFile(io, output_stream, .{ .file_name = path, .line = real_file_start + 2, .column = 0 });
try expectEqualStrings("def\n", aw.written());
aw.clearRetainingCapacity();
}
@@ -1563,19 +1540,19 @@ pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContex
_ = panicking.fetchAdd(1, .seq_cst);
trace: {
- const stderr, const tty_config = lockStderrWriter(&.{});
- defer unlockStderrWriter();
+ const stderr = lockStderr(&.{}).terminal();
+ defer unlockStderr();
if (addr) |a| {
- stderr.print("{s} at address 0x{x}\n", .{ name, a }) catch break :trace;
+ stderr.writer.print("{s} at address 0x{x}\n", .{ name, a }) catch break :trace;
} else {
- stderr.print("{s} (no address available)\n", .{name}) catch break :trace;
+ stderr.writer.print("{s} (no address available)\n", .{name}) catch break :trace;
}
if (opt_ctx) |context| {
writeCurrentStackTrace(.{
.context = context,
.allow_unsafe_unwind = true, // we're crashing anyway, give it our all!
- }, stderr, tty_config) catch break :trace;
+ }, stderr) catch break :trace;
}
}
},
@@ -1584,7 +1561,8 @@ pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContex
// A segfault happened while trying to print a previous panic message.
// We're still holding the mutex but that's fine as we're going to
// call abort().
- fs.File.stderr().writeAll("aborting due to recursive panic\n") catch {};
+ const stderr = lockStderr(&.{}).terminal();
+ stderr.writer.writeAll("aborting due to recursive panic\n") catch {};
},
else => {}, // Panicked while printing the recursive panic message.
}
@@ -1592,7 +1570,7 @@ pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContex
// We cannot allow the signal handler to return because when it runs the original instruction
// again, the memory may be mapped and undefined behavior would occur rather than repeating
// the segfault. So we simply abort here.
- posix.abort();
+ std.process.abort();
}
pub fn dumpStackPointerAddr(prefix: []const u8) void {
@@ -1616,20 +1594,14 @@ test "manage resources correctly" {
return @returnAddress();
}
};
- const gpa = std.testing.allocator;
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- var discarding: Io.Writer.Discarding = .init(&.{});
+ const gpa = testing.allocator;
+ const io = testing.io;
+
+ var discarding: Writer.Discarding = .init(&.{});
var di: SelfInfo = .init;
defer di.deinit(gpa);
- try printSourceAtAddress(
- gpa,
- io,
- &di,
- &discarding.writer,
- S.showMyTrace(),
- .no_color,
- );
+ const t: Io.Terminal = .{ .writer = &discarding.writer, .mode = .no_color };
+ try printSourceAtAddress(gpa, io, &di, t, S.showMyTrace());
}
/// This API helps you track where a value originated and where it was mutated,
@@ -1690,21 +1662,21 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
pub fn dump(t: @This()) void {
if (!enabled) return;
- const stderr, const tty_config = lockStderrWriter(&.{});
- defer unlockStderrWriter();
+ const stderr = lockStderr(&.{}).terminal();
+ defer unlockStderr();
const end = @min(t.index, size);
for (t.addrs[0..end], 0..) |frames_array, i| {
- stderr.print("{s}:\n", .{t.notes[i]}) catch return;
+ stderr.writer.print("{s}:\n", .{t.notes[i]}) catch return;
var frames_array_mutable = frames_array;
const frames = mem.sliceTo(frames_array_mutable[0..], 0);
const stack_trace: StackTrace = .{
.index = frames.len,
.instruction_addresses = frames,
};
- writeStackTrace(&stack_trace, stderr, tty_config) catch return;
+ writeStackTrace(&stack_trace, stderr) catch return;
}
if (t.index > end) {
- stderr.print("{d} more traces not shown; consider increasing trace size\n", .{
+ stderr.writer.print("{d} more traces not shown; consider increasing trace size\n", .{
t.index - end,
}) catch return;
}
diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig
index e81943ab49..a101309d22 100644
--- a/lib/std/debug/ElfFile.zig
+++ b/lib/std/debug/ElfFile.zig
@@ -1,5 +1,13 @@
//! A helper type for loading an ELF file and collecting its DWARF debug information, unwind
//! information, and symbol table.
+const ElfFile = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const Endian = std.builtin.Endian;
+const Dwarf = std.debug.Dwarf;
+const Allocator = std.mem.Allocator;
+const elf = std.elf;
is_64: bool,
endian: Endian,
@@ -115,7 +123,8 @@ pub const LoadError = error{
pub fn load(
gpa: Allocator,
- elf_file: std.fs.File,
+ io: Io,
+ elf_file: Io.File,
opt_build_id: ?[]const u8,
di_search_paths: *const DebugInfoSearchPaths,
) LoadError!ElfFile {
@@ -123,7 +132,7 @@ pub fn load(
errdefer arena_instance.deinit();
const arena = arena_instance.allocator();
- var result = loadInner(arena, elf_file, null) catch |err| switch (err) {
+ var result = loadInner(arena, io, elf_file, null) catch |err| switch (err) {
error.CrcMismatch => unreachable, // we passed crc as null
else => |e| return e,
};
@@ -148,7 +157,7 @@ pub fn load(
if (build_id.len < 3) break :build_id;
for (di_search_paths.global_debug) |global_debug| {
- if (try loadSeparateDebugFile(arena, &result, null, "{s}/.build-id/{x}/{x}.debug", .{
+ if (try loadSeparateDebugFile(arena, io, &result, null, "{s}/.build-id/{x}/{x}.debug", .{
global_debug,
build_id[0..1],
build_id[1..],
@@ -156,7 +165,7 @@ pub fn load(
}
if (di_search_paths.debuginfod_client) |components| {
- if (try loadSeparateDebugFile(arena, &result, null, "{s}{s}/{x}/debuginfo", .{
+ if (try loadSeparateDebugFile(arena, io, &result, null, "{s}{s}/{x}/debuginfo", .{
components[0],
components[1],
build_id,
@@ -173,18 +182,18 @@ pub fn load(
const exe_dir = di_search_paths.exe_dir orelse break :debug_link;
- if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/{s}", .{
+ if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/{s}", .{
exe_dir,
debug_filename,
})) |mapped| break :load_di mapped;
- if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/.debug/{s}", .{
+ if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/.debug/{s}", .{
exe_dir,
debug_filename,
})) |mapped| break :load_di mapped;
for (di_search_paths.global_debug) |global_debug| {
// This looks like a bug; it isn't. They really do embed the absolute path to the
// exe's dirname, *under* the global debug path.
- if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/{s}/{s}", .{
+ if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/{s}/{s}", .{
global_debug,
exe_dir,
debug_filename,
@@ -358,12 +367,19 @@ const Section = struct {
const Array = std.enums.EnumArray(Section.Id, ?Section);
};
-fn loadSeparateDebugFile(arena: Allocator, main_loaded: *LoadInnerResult, opt_crc: ?u32, comptime fmt: []const u8, args: anytype) Allocator.Error!?[]align(std.heap.page_size_min) const u8 {
+fn loadSeparateDebugFile(
+ arena: Allocator,
+ io: Io,
+ main_loaded: *LoadInnerResult,
+ opt_crc: ?u32,
+ comptime fmt: []const u8,
+ args: anytype,
+) Allocator.Error!?[]align(std.heap.page_size_min) const u8 {
const path = try std.fmt.allocPrint(arena, fmt, args);
- const elf_file = std.fs.cwd().openFile(path, .{}) catch return null;
- defer elf_file.close();
+ const elf_file = Io.Dir.cwd().openFile(io, path, .{}) catch return null;
+ defer elf_file.close(io);
- const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) {
+ const result = loadInner(arena, io, elf_file, opt_crc) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.CrcMismatch => return null,
else => return null,
@@ -408,13 +424,14 @@ const LoadInnerResult = struct {
};
fn loadInner(
arena: Allocator,
- elf_file: std.fs.File,
+ io: Io,
+ elf_file: Io.File,
opt_crc: ?u32,
) (LoadError || error{ CrcMismatch, Streaming, Canceled })!LoadInnerResult {
const mapped_mem: []align(std.heap.page_size_min) const u8 = mapped: {
const file_len = std.math.cast(
usize,
- elf_file.getEndPos() catch |err| switch (err) {
+ elf_file.length(io) catch |err| switch (err) {
error.PermissionDenied => unreachable, // not asking for PROT_EXEC
else => |e| return e,
},
@@ -529,10 +546,3 @@ fn loadInner(
.mapped_mem = mapped_mem,
};
}
-
-const std = @import("std");
-const Endian = std.builtin.Endian;
-const Dwarf = std.debug.Dwarf;
-const ElfFile = @This();
-const Allocator = std.mem.Allocator;
-const elf = std.elf;
diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig
index 921cd36ab8..34e79227d1 100644
--- a/lib/std/debug/Info.zig
+++ b/lib/std/debug/Info.zig
@@ -5,19 +5,18 @@
//! Unlike `std.debug.SelfInfo`, this API does not assume the debug information
//! in question happens to match the host CPU architecture, OS, or other target
//! properties.
+const Info = @This();
const std = @import("../std.zig");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
const assert = std.debug.assert;
const Coverage = std.debug.Coverage;
const SourceLocation = std.debug.Coverage.SourceLocation;
-
const ElfFile = std.debug.ElfFile;
const MachOFile = std.debug.MachOFile;
-const Info = @This();
-
impl: union(enum) {
elf: ElfFile,
macho: MachOFile,
@@ -25,15 +24,25 @@ impl: union(enum) {
/// Externally managed, outlives this `Info` instance.
coverage: *Coverage,
-pub const LoadError = std.fs.File.OpenError || ElfFile.LoadError || MachOFile.Error || std.debug.Dwarf.ScanError || error{ MissingDebugInfo, UnsupportedDebugInfo };
+pub const LoadError = error{
+ MissingDebugInfo,
+ UnsupportedDebugInfo,
+} || Io.File.OpenError || ElfFile.LoadError || MachOFile.Error || std.debug.Dwarf.ScanError;
-pub fn load(gpa: Allocator, path: Path, coverage: *Coverage, format: std.Target.ObjectFormat, arch: std.Target.Cpu.Arch) LoadError!Info {
+pub fn load(
+ gpa: Allocator,
+ io: Io,
+ path: Path,
+ coverage: *Coverage,
+ format: std.Target.ObjectFormat,
+ arch: std.Target.Cpu.Arch,
+) LoadError!Info {
switch (format) {
.elf => {
- var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- defer file.close();
+ var file = try path.root_dir.handle.openFile(io, path.sub_path, .{});
+ defer file.close(io);
- var elf_file: ElfFile = try .load(gpa, file, null, &.none);
+ var elf_file: ElfFile = try .load(gpa, io, file, null, &.none);
errdefer elf_file.deinit(gpa);
if (elf_file.dwarf == null) return error.MissingDebugInfo;
@@ -49,7 +58,7 @@ pub fn load(gpa: Allocator, path: Path, coverage: *Coverage, format: std.Target.
const path_str = try path.toString(gpa);
defer gpa.free(path_str);
- var macho_file: MachOFile = try .load(gpa, path_str, arch);
+ var macho_file: MachOFile = try .load(gpa, io, path_str, arch);
errdefer macho_file.deinit(gpa);
return .{
@@ -76,6 +85,7 @@ pub const ResolveAddressesError = Coverage.ResolveAddressesDwarfError || error{U
pub fn resolveAddresses(
info: *Info,
gpa: Allocator,
+ io: Io,
/// Asserts the addresses are in ascending order.
sorted_pc_addrs: []const u64,
/// Asserts its length equals length of `sorted_pc_addrs`.
@@ -88,7 +98,7 @@ pub fn resolveAddresses(
// Resolving all of the addresses at once unfortunately isn't so easy in Mach-O binaries
// due to split debug information. For now, we'll just resolve the addreses one by one.
for (sorted_pc_addrs, output) |pc_addr, *src_loc| {
- const dwarf, const dwarf_pc_addr = mf.getDwarfForAddress(gpa, pc_addr) catch |err| switch (err) {
+ const dwarf, const dwarf_pc_addr = mf.getDwarfForAddress(gpa, io, pc_addr) catch |err| switch (err) {
error.InvalidMachO, error.InvalidDwarf => return error.InvalidDebugInfo,
else => |e| return e,
};
diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig
index 3be1b1daff..9e81fb8911 100644
--- a/lib/std/debug/MachOFile.zig
+++ b/lib/std/debug/MachOFile.zig
@@ -27,13 +27,13 @@ pub fn deinit(mf: *MachOFile, gpa: Allocator) void {
posix.munmap(mf.mapped_memory);
}
-pub fn load(gpa: Allocator, path: []const u8, arch: std.Target.Cpu.Arch) Error!MachOFile {
+pub fn load(gpa: Allocator, io: Io, path: []const u8, arch: std.Target.Cpu.Arch) Error!MachOFile {
switch (arch) {
.x86_64, .aarch64 => {},
else => unreachable,
}
- const all_mapped_memory = try mapDebugInfoFile(path);
+ const all_mapped_memory = try mapDebugInfoFile(io, path);
errdefer posix.munmap(all_mapped_memory);
// In most cases, the file we just mapped is a Mach-O binary. However, it could be a "universal
@@ -239,7 +239,7 @@ pub fn load(gpa: Allocator, path: []const u8, arch: std.Target.Cpu.Arch) Error!M
.text_vmaddr = text_vmaddr,
};
}
-pub fn getDwarfForAddress(mf: *MachOFile, gpa: Allocator, vaddr: u64) !struct { *Dwarf, u64 } {
+pub fn getDwarfForAddress(mf: *MachOFile, gpa: Allocator, io: Io, vaddr: u64) !struct { *Dwarf, u64 } {
const symbol = Symbol.find(mf.symbols, vaddr) orelse return error.MissingDebugInfo;
if (symbol.ofile == Symbol.unknown_ofile) return error.MissingDebugInfo;
@@ -254,7 +254,7 @@ pub fn getDwarfForAddress(mf: *MachOFile, gpa: Allocator, vaddr: u64) !struct {
const gop = try mf.ofiles.getOrPut(gpa, symbol.ofile);
if (!gop.found_existing) {
const name = mem.sliceTo(mf.strings[symbol.ofile..], 0);
- gop.value_ptr.* = loadOFile(gpa, name);
+ gop.value_ptr.* = loadOFile(gpa, io, name);
}
const of = &(gop.value_ptr.* catch |err| return err);
@@ -356,7 +356,7 @@ test {
_ = Symbol;
}
-fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
+fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile {
const all_mapped_memory, const mapped_ofile = map: {
const open_paren = paren: {
if (std.mem.endsWith(u8, o_file_name, ")")) {
@@ -365,7 +365,7 @@ fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
}
}
// Not an archive, just a normal path to a .o file
- const m = try mapDebugInfoFile(o_file_name);
+ const m = try mapDebugInfoFile(io, o_file_name);
break :map .{ m, m };
};
@@ -373,7 +373,7 @@ fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
const archive_path = o_file_name[0..open_paren];
const target_name_in_archive = o_file_name[open_paren + 1 .. o_file_name.len - 1];
- const mapped_archive = try mapDebugInfoFile(archive_path);
+ const mapped_archive = try mapDebugInfoFile(io, archive_path);
errdefer posix.munmap(mapped_archive);
var ar_reader: Io.Reader = .fixed(mapped_archive);
@@ -511,16 +511,16 @@ fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
}
/// Uses `mmap` to map the file at `path` into memory.
-fn mapDebugInfoFile(path: []const u8) ![]align(std.heap.page_size_min) const u8 {
- const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
+fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 {
+ const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return error.ReadFailed,
};
- defer file.close();
+ defer file.close(io);
const file_len = std.math.cast(
usize,
- file.getEndPos() catch return error.ReadFailed,
+ file.length(io) catch return error.ReadFailed,
) orelse return error.ReadFailed;
return posix.mmap(
diff --git a/lib/std/debug/Pdb.zig b/lib/std/debug/Pdb.zig
index c10b361f72..3ecfd1b363 100644
--- a/lib/std/debug/Pdb.zig
+++ b/lib/std/debug/Pdb.zig
@@ -1,5 +1,5 @@
const std = @import("../std.zig");
-const File = std.fs.File;
+const File = std.Io.File;
const Allocator = std.mem.Allocator;
const pdb = std.pdb;
const assert = std.debug.assert;
diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig
index 59c0b42451..be76f3a8c2 100644
--- a/lib/std/debug/SelfInfo/Elf.zig
+++ b/lib/std/debug/SelfInfo/Elf.zig
@@ -29,13 +29,12 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
}
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
- _ = io;
const module = try si.findModule(gpa, address, .exclusive);
defer si.rwlock.unlock();
const vaddr = address - module.load_offset;
- const loaded_elf = try module.getLoadedElf(gpa);
+ const loaded_elf = try module.getLoadedElf(gpa, io);
if (loaded_elf.file.dwarf) |*dwarf| {
if (!loaded_elf.scanned_dwarf) {
dwarf.open(gpa, native_endian) catch |err| switch (err) {
@@ -180,7 +179,7 @@ comptime {
}
}
pub const UnwindContext = Dwarf.SelfUnwinder;
-pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize {
+pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) Error!usize {
comptime assert(can_unwind);
{
@@ -201,7 +200,7 @@ pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error
@memset(si.unwind_cache.?, .empty);
}
- const unwind_sections = try module.getUnwindSections(gpa);
+ const unwind_sections = try module.getUnwindSections(gpa, io);
for (unwind_sections) |*unwind| {
if (context.computeRules(gpa, unwind, module.load_offset, null)) |entry| {
entry.populate(si.unwind_cache.?);
@@ -261,12 +260,12 @@ const Module = struct {
};
/// Assumes we already hold an exclusive lock.
- fn getUnwindSections(mod: *Module, gpa: Allocator) Error![]Dwarf.Unwind {
- if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa);
+ fn getUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error![]Dwarf.Unwind {
+ if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa, io);
const us = &(mod.unwind.? catch |err| return err);
return us.buf[0..us.len];
}
- fn loadUnwindSections(mod: *Module, gpa: Allocator) Error!UnwindSections {
+ fn loadUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error!UnwindSections {
var us: UnwindSections = .{
.buf = undefined,
.len = 0,
@@ -284,7 +283,7 @@ const Module = struct {
} else {
// There is no `.eh_frame_hdr` section. There may still be an `.eh_frame` or `.debug_frame`
// section, but we'll have to load the binary to get at it.
- const loaded = try mod.getLoadedElf(gpa);
+ const loaded = try mod.getLoadedElf(gpa, io);
// If both are present, we can't just pick one -- the info could be split between them.
// `.debug_frame` is likely to be the more complete section, so we'll prioritize that one.
if (loaded.file.debug_frame) |*debug_frame| {
@@ -319,24 +318,24 @@ const Module = struct {
}
/// Assumes we already hold an exclusive lock.
- fn getLoadedElf(mod: *Module, gpa: Allocator) Error!*LoadedElf {
- if (mod.loaded_elf == null) mod.loaded_elf = loadElf(mod, gpa);
+ fn getLoadedElf(mod: *Module, gpa: Allocator, io: Io) Error!*LoadedElf {
+ if (mod.loaded_elf == null) mod.loaded_elf = loadElf(mod, gpa, io);
return if (mod.loaded_elf.?) |*elf| elf else |err| err;
}
- fn loadElf(mod: *Module, gpa: Allocator) Error!LoadedElf {
+ fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf {
const load_result = if (mod.name.len > 0) res: {
- var file = std.fs.cwd().openFile(mod.name, .{}) catch return error.MissingDebugInfo;
- defer file.close();
- break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name));
+ var file = Io.Dir.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo;
+ defer file.close(io);
+ break :res std.debug.ElfFile.load(gpa, io, file, mod.build_id, &.native(mod.name));
} else res: {
- const path = std.fs.selfExePathAlloc(gpa) catch |err| switch (err) {
+ const path = std.process.executablePathAlloc(io, gpa) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => return error.ReadFailed,
};
defer gpa.free(path);
- var file = std.fs.cwd().openFile(path, .{}) catch return error.MissingDebugInfo;
- defer file.close();
- break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path));
+ var file = Io.Dir.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo;
+ defer file.close(io);
+ break :res std.debug.ElfFile.load(gpa, io, file, mod.build_id, &.native(path));
};
var elf_file = load_result catch |err| switch (err) {
diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig
index dd11b4c8bf..d09493adb0 100644
--- a/lib/std/debug/SelfInfo/MachO.zig
+++ b/lib/std/debug/SelfInfo/MachO.zig
@@ -21,11 +21,10 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
}
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
- _ = io;
const module = try si.findModule(gpa, address);
defer si.mutex.unlock();
- const file = try module.getFile(gpa);
+ const file = try module.getFile(gpa, io);
// This is not necessarily the same as the vmaddr_slide that dyld would report. This is
// because the segments in the file on disk might differ from the ones in memory. Normally
@@ -39,7 +38,7 @@ pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!st
const vaddr = address - vaddr_offset;
- const ofile_dwarf, const ofile_vaddr = file.getDwarfForAddress(gpa, vaddr) catch {
+ const ofile_dwarf, const ofile_vaddr = file.getDwarfForAddress(gpa, io, vaddr) catch {
// Return at least the symbol name if available.
return .{
.name = try file.lookupSymbolName(vaddr),
@@ -107,7 +106,8 @@ pub const UnwindContext = std.debug.Dwarf.SelfUnwinder;
/// Unwind a frame using MachO compact unwind info (from `__unwind_info`).
/// If the compact encoding can't encode a way to unwind a frame, it will
/// defer unwinding to DWARF, in which case `__eh_frame` will be used if available.
-pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize {
+pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) Error!usize {
+ _ = io;
return unwindFrameInner(si, gpa, context) catch |err| switch (err) {
error.InvalidDebugInfo,
error.MissingDebugInfo,
@@ -546,12 +546,12 @@ const Module = struct {
};
}
- fn getFile(module: *Module, gpa: Allocator) Error!*MachOFile {
+ fn getFile(module: *Module, gpa: Allocator, io: Io) Error!*MachOFile {
if (module.file == null) {
const path = std.mem.span(
std.c.dyld_image_path_containing_address(@ptrFromInt(module.text_base)).?,
);
- module.file = MachOFile.load(gpa, path, builtin.cpu.arch) catch |err| switch (err) {
+ module.file = MachOFile.load(gpa, io, path, builtin.cpu.arch) catch |err| switch (err) {
error.InvalidMachO, error.InvalidDwarf => error.InvalidDebugInfo,
error.MissingDebugInfo, error.OutOfMemory, error.UnsupportedDebugInfo, error.ReadFailed => |e| e,
};
@@ -615,14 +615,14 @@ test {
}
/// Uses `mmap` to map the file at `path` into memory.
-fn mapDebugInfoFile(path: []const u8) ![]align(std.heap.page_size_min) const u8 {
- const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
+fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 {
+ const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return error.ReadFailed,
};
- defer file.close();
+ defer file.close(io);
- const file_end_pos = file.getEndPos() catch |err| switch (err) {
+ const file_end_pos = file.length(io) catch |err| switch (err) {
error.Unexpected => |e| return e,
else => return error.ReadFailed,
};
diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig
index ddb6bf73f6..99d3e9f926 100644
--- a/lib/std/debug/SelfInfo/Windows.zig
+++ b/lib/std/debug/SelfInfo/Windows.zig
@@ -149,8 +149,9 @@ pub const UnwindContext = struct {
return ctx.cur.getRegs().bp;
}
};
-pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize {
+pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) Error!usize {
_ = si;
+ _ = io;
_ = gpa;
const current_regs = context.cur.getRegs();
@@ -204,14 +205,14 @@ const Module = struct {
coff_section_headers: []coff.SectionHeader,
const MappedFile = struct {
- file: fs.File,
+ file: Io.File,
section_handle: windows.HANDLE,
section_view: []const u8,
- fn deinit(mf: *const MappedFile) void {
+ fn deinit(mf: *const MappedFile, io: Io) void {
const process_handle = windows.GetCurrentProcess();
assert(windows.ntdll.NtUnmapViewOfSection(process_handle, @constCast(mf.section_view.ptr)) == .SUCCESS);
windows.CloseHandle(mf.section_handle);
- mf.file.close();
+ mf.file.close(io);
}
};
@@ -222,7 +223,7 @@ const Module = struct {
pdb.file_reader.file.close(io);
pdb.deinit();
}
- if (di.mapped_file) |*mf| mf.deinit();
+ if (di.mapped_file) |*mf| mf.deinit(io);
var arena = di.arena.promote(gpa);
arena.deinit();
@@ -314,8 +315,8 @@ const Module = struct {
);
if (len == 0) return error.MissingDebugInfo;
const name_w = name_buffer[0 .. len + 4 :0];
- var threaded: Io.Threaded = .init_single_threaded;
- const coff_file = threaded.dirOpenFileWtf16(null, name_w, .{}) catch |err| switch (err) {
+ // TODO eliminate the reference to Io.Threaded.global_single_threaded here
+ const coff_file = Io.Threaded.global_single_threaded.dirOpenFileWtf16(null, name_w, .{}) catch |err| switch (err) {
error.Canceled => |e| return e,
error.Unexpected => |e| return e,
error.FileNotFound => return error.MissingDebugInfo,
@@ -331,7 +332,6 @@ const Module = struct {
error.SystemResources,
error.WouldBlock,
error.AccessDenied,
- error.ProcessNotFound,
error.PermissionDenied,
error.NoSpaceLeft,
error.DeviceBusy,
@@ -343,7 +343,7 @@ const Module = struct {
error.AntivirusInterference,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
- error.FileLocksNotSupported,
+ error.FileLocksUnsupported,
error.FileBusy,
=> return error.ReadFailed,
};
@@ -387,12 +387,12 @@ const Module = struct {
const section_view = section_view_ptr.?[0..coff_len];
coff_obj = coff.Coff.init(section_view, false) catch return error.InvalidDebugInfo;
break :mapped .{
- .file = .adaptFromNewApi(coff_file),
+ .file = coff_file,
.section_handle = section_handle,
.section_view = section_view,
};
};
- errdefer if (mapped_file) |*mf| mf.deinit();
+ errdefer if (mapped_file) |*mf| mf.deinit(io);
const coff_image_base = coff_obj.getImageBase();
@@ -432,22 +432,22 @@ const Module = struct {
break :pdb null;
};
const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: {
- break :res std.fs.cwd().openFile(path, .{});
+ break :res Io.Dir.cwd().openFile(io, path, .{});
} else res: {
- const self_dir = fs.selfExeDirPathAlloc(gpa) catch |err| switch (err) {
+ const self_dir = std.process.executableDirPathAlloc(io, gpa) catch |err| switch (err) {
error.OutOfMemory, error.Unexpected => |e| return e,
else => return error.ReadFailed,
};
defer gpa.free(self_dir);
const abs_path = try fs.path.join(gpa, &.{ self_dir, path });
defer gpa.free(abs_path);
- break :res std.fs.cwd().openFile(abs_path, .{});
+ break :res Io.Dir.cwd().openFile(io, abs_path, .{});
};
const pdb_file = pdb_file_open_result catch |err| switch (err) {
error.FileNotFound, error.IsDir => break :pdb null,
else => return error.ReadFailed,
};
- errdefer pdb_file.close();
+ errdefer pdb_file.close(io);
const pdb_reader = try arena.create(Io.File.Reader);
pdb_reader.* = pdb_file.reader(io, try arena.alloc(u8, 4096));
diff --git a/lib/std/debug/simple_panic.zig b/lib/std/debug/simple_panic.zig
index 45e97777c4..a5a09fa116 100644
--- a/lib/std/debug/simple_panic.zig
+++ b/lib/std/debug/simple_panic.zig
@@ -14,9 +14,8 @@ const std = @import("../std.zig");
pub fn call(msg: []const u8, ra: ?usize) noreturn {
@branchHint(.cold);
_ = ra;
- std.debug.lockStdErr();
- const stderr: std.fs.File = .stderr();
- stderr.writeAll(msg) catch {};
+ const stderr_writer = &std.debug.lockStderr(&.{}).file_writer.interface;
+ stderr_writer.writeAll(msg) catch {};
@trap();
}
diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig
index a3490ed7db..ccd60ecd97 100644
--- a/lib/std/dynamic_library.zig
+++ b/lib/std/dynamic_library.zig
@@ -1,10 +1,12 @@
-const std = @import("std.zig");
const builtin = @import("builtin");
+const native_os = builtin.os.tag;
+
+const std = @import("std.zig");
+const Io = std.Io;
const mem = std.mem;
const testing = std.testing;
const elf = std.elf;
const windows = std.os.windows;
-const native_os = builtin.os.tag;
const posix = std.posix;
/// Cross-platform dynamic library loading and symbol lookup.
@@ -53,11 +55,11 @@ pub const DynLib = struct {
// An iterator is provided in order to traverse the linked list in a idiomatic
// fashion.
const LinkMap = extern struct {
- l_addr: usize,
- l_name: [*:0]const u8,
- l_ld: ?*elf.Dyn,
- l_next: ?*LinkMap,
- l_prev: ?*LinkMap,
+ addr: usize,
+ name: [*:0]const u8,
+ ld: ?*elf.Dyn,
+ next: ?*LinkMap,
+ prev: ?*LinkMap,
pub const Iterator = struct {
current: ?*LinkMap,
@@ -68,7 +70,7 @@ const LinkMap = extern struct {
pub fn next(self: *Iterator) ?*LinkMap {
if (self.current) |it| {
- self.current = it.l_next;
+ self.current = it.next;
return it;
}
return null;
@@ -77,10 +79,10 @@ const LinkMap = extern struct {
};
const RDebug = extern struct {
- r_version: i32,
- r_map: ?*LinkMap,
- r_brk: usize,
- r_ldbase: usize,
+ version: i32,
+ map: ?*LinkMap,
+ brk: usize,
+ ldbase: usize,
};
/// TODO fix comparisons of extern symbol pointers so we don't need this helper function.
@@ -105,8 +107,8 @@ pub fn linkmap_iterator() error{InvalidExe}!LinkMap.Iterator {
elf.DT_DEBUG => {
const ptr = @as(?*RDebug, @ptrFromInt(_DYNAMIC[i].d_val));
if (ptr) |r_debug| {
- if (r_debug.r_version != 1) return error.InvalidExe;
- break :init r_debug.r_map;
+ if (r_debug.version != 1) return error.InvalidExe;
+ break :init r_debug.map;
}
},
elf.DT_PLTGOT => {
@@ -155,24 +157,24 @@ pub const ElfDynLib = struct {
dt_gnu_hash: *elf.gnu_hash.Header,
};
- fn openPath(path: []const u8) !std.fs.Dir {
+ fn openPath(io: Io, path: []const u8) !Io.Dir {
if (path.len == 0) return error.NotDir;
var parts = std.mem.tokenizeScalar(u8, path, '/');
- var parent = if (path[0] == '/') try std.fs.cwd().openDir("/", .{}) else std.fs.cwd();
+ var parent = if (path[0] == '/') try Io.Dir.cwd().openDir(io, "/", .{}) else Io.Dir.cwd();
while (parts.next()) |part| {
- const child = try parent.openDir(part, .{});
- parent.close();
+ const child = try parent.openDir(io, part, .{});
+ parent.close(io);
parent = child;
}
return parent;
}
- fn resolveFromSearchPath(search_path: []const u8, file_name: []const u8, delim: u8) ?posix.fd_t {
+ fn resolveFromSearchPath(io: Io, search_path: []const u8, file_name: []const u8, delim: u8) ?posix.fd_t {
var paths = std.mem.tokenizeScalar(u8, search_path, delim);
while (paths.next()) |p| {
- var dir = openPath(p) catch continue;
- defer dir.close();
- const fd = posix.openat(dir.fd, file_name, .{
+ var dir = openPath(io, p) catch continue;
+ defer dir.close(io);
+ const fd = posix.openat(dir.handle, file_name, .{
.ACCMODE = .RDONLY,
.CLOEXEC = true,
}, 0) catch continue;
@@ -181,10 +183,10 @@ pub const ElfDynLib = struct {
return null;
}
- fn resolveFromParent(dir_path: []const u8, file_name: []const u8) ?posix.fd_t {
- var dir = std.fs.cwd().openDir(dir_path, .{}) catch return null;
- defer dir.close();
- return posix.openat(dir.fd, file_name, .{
+ fn resolveFromParent(io: Io, dir_path: []const u8, file_name: []const u8) ?posix.fd_t {
+ var dir = Io.Dir.cwd().openDir(io, dir_path, .{}) catch return null;
+ defer dir.close(io);
+ return posix.openat(dir.handle, file_name, .{
.ACCMODE = .RDONLY,
.CLOEXEC = true,
}, 0) catch null;
@@ -195,7 +197,7 @@ pub const ElfDynLib = struct {
// - DT_RPATH of the calling binary is not used as a search path
// - DT_RUNPATH of the calling binary is not used as a search path
// - /etc/ld.so.cache is not read
- fn resolveFromName(path_or_name: []const u8) !posix.fd_t {
+ fn resolveFromName(io: Io, path_or_name: []const u8) !posix.fd_t {
// If filename contains a slash ("/"), then it is interpreted as a (relative or absolute) pathname
if (std.mem.findScalarPos(u8, path_or_name, 0, '/')) |_| {
return posix.open(path_or_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
@@ -206,25 +208,27 @@ pub const ElfDynLib = struct {
std.os.linux.getegid() == std.os.linux.getgid())
{
if (posix.getenvZ("LD_LIBRARY_PATH")) |ld_library_path| {
- if (resolveFromSearchPath(ld_library_path, path_or_name, ':')) |fd| {
+ if (resolveFromSearchPath(io, ld_library_path, path_or_name, ':')) |fd| {
return fd;
}
}
}
// Lastly the directories /lib and /usr/lib are searched (in this exact order)
- if (resolveFromParent("/lib", path_or_name)) |fd| return fd;
- if (resolveFromParent("/usr/lib", path_or_name)) |fd| return fd;
+ if (resolveFromParent(io, "/lib", path_or_name)) |fd| return fd;
+ if (resolveFromParent(io, "/usr/lib", path_or_name)) |fd| return fd;
return error.FileNotFound;
}
/// Trusts the file. Malicious file will be able to execute arbitrary code.
pub fn open(path: []const u8) Error!ElfDynLib {
- const fd = try resolveFromName(path);
+ const io = std.Options.debug_io;
+
+ const fd = try resolveFromName(io, path);
defer posix.close(fd);
- const file: std.fs.File = .{ .handle = fd };
- const stat = try file.stat();
+ const file: Io.File = .{ .handle = fd };
+ const stat = try file.stat(io);
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
const page_size = std.heap.pageSize();
@@ -549,11 +553,9 @@ fn checkver(def_arg: *elf.Verdef, vsym_arg: elf.Versym, vername: []const u8, str
}
test "ElfDynLib" {
- if (native_os != .linux) {
- return error.SkipZigTest;
- }
-
+ if (native_os != .linux) return error.SkipZigTest;
try testing.expectError(error.FileNotFound, ElfDynLib.open("invalid_so.so"));
+ try testing.expectError(error.FileNotFound, ElfDynLib.openZ("invalid_so.so"));
}
/// Separated to avoid referencing `WindowsDynLib`, because its field types may not
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index edc3a5f985..5f2d36323a 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -1,576 +1,28 @@
//! File System.
-const builtin = @import("builtin");
-const native_os = builtin.os.tag;
const std = @import("std.zig");
-const Io = std.Io;
-const root = @import("root");
-const mem = std.mem;
-const base64 = std.base64;
-const crypto = std.crypto;
-const Allocator = std.mem.Allocator;
-const assert = std.debug.assert;
-const posix = std.posix;
-const windows = std.os.windows;
-const is_darwin = native_os.isDarwin();
-
-pub const AtomicFile = @import("fs/AtomicFile.zig");
-pub const Dir = @import("fs/Dir.zig");
-pub const File = @import("fs/File.zig");
+/// Deprecated, use `std.Io.Dir.path`.
pub const path = @import("fs/path.zig");
-
-pub const has_executable_bit = switch (native_os) {
- .windows, .wasi => false,
- else => true,
-};
-
pub const wasi = @import("fs/wasi.zig");
-// TODO audit these APIs with respect to Dir and absolute paths
-
-pub const realpath = posix.realpath;
-pub const realpathZ = posix.realpathZ;
-pub const realpathW = posix.realpathW;
-pub const realpathW2 = posix.realpathW2;
-
pub const getAppDataDir = @import("fs/get_app_data_dir.zig").getAppDataDir;
pub const GetAppDataDirError = @import("fs/get_app_data_dir.zig").GetAppDataDirError;
-/// The maximum length of a file path that the operating system will accept.
-///
-/// Paths, including those returned from file system operations, may be longer
-/// than this length, but such paths cannot be successfully passed back in
-/// other file system operations. However, all path components returned by file
-/// system operations are assumed to fit into a `u8` array of this length.
-///
-/// The byte count includes room for a null sentinel byte.
-///
-/// * On Windows, `[]u8` file paths are encoded as
-/// [WTF-8](https://wtf-8.codeberg.page/).
-/// * On WASI, `[]u8` file paths are encoded as valid UTF-8.
-/// * On other platforms, `[]u8` file paths are opaque sequences of bytes with
-/// no particular encoding.
-pub const max_path_bytes = switch (native_os) {
- .linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .freebsd, .openbsd, .netbsd, .dragonfly, .haiku, .illumos, .plan9, .emscripten, .wasi, .serenity => posix.PATH_MAX,
- // Each WTF-16LE code unit may be expanded to 3 WTF-8 bytes.
- // If it would require 4 WTF-8 bytes, then there would be a surrogate
- // pair in the WTF-16LE, and we (over)account 3 bytes for it that way.
- // +1 for the null byte at the end, which can be encoded in 1 byte.
- .windows => windows.PATH_MAX_WIDE * 3 + 1,
- else => if (@hasDecl(root, "os") and @hasDecl(root.os, "PATH_MAX"))
- root.os.PATH_MAX
- else
- @compileError("PATH_MAX not implemented for " ++ @tagName(native_os)),
-};
-
-/// This represents the maximum size of a `[]u8` file name component that
-/// the platform's common file systems support. File name components returned by file system
-/// operations are likely to fit into a `u8` array of this length, but
-/// (depending on the platform) this assumption may not hold for every configuration.
-/// The byte count does not include a null sentinel byte.
-/// On Windows, `[]u8` file name components are encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, file name components are encoded as valid UTF-8.
-/// On other platforms, `[]u8` components are an opaque sequence of bytes with no particular encoding.
-pub const max_name_bytes = switch (native_os) {
- .linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .freebsd, .openbsd, .netbsd, .dragonfly, .illumos, .serenity => posix.NAME_MAX,
- // Haiku's NAME_MAX includes the null terminator, so subtract one.
- .haiku => posix.NAME_MAX - 1,
- // Each WTF-16LE character may be expanded to 3 WTF-8 bytes.
- // If it would require 4 WTF-8 bytes, then there would be a surrogate
- // pair in the WTF-16LE, and we (over)account 3 bytes for it that way.
- .windows => windows.NAME_MAX * 3,
- // For WASI, the MAX_NAME will depend on the host OS, so it needs to be
- // as large as the largest max_name_bytes (Windows) in order to work on any host OS.
- // TODO determine if this is a reasonable approach
- .wasi => windows.NAME_MAX * 3,
- else => if (@hasDecl(root, "os") and @hasDecl(root.os, "NAME_MAX"))
- root.os.NAME_MAX
- else
- @compileError("NAME_MAX not implemented for " ++ @tagName(native_os)),
-};
-
pub const base64_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*;
/// Base64 encoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
-pub const base64_encoder = base64.Base64Encoder.init(base64_alphabet, null);
+pub const base64_encoder = std.base64.Base64Encoder.init(base64_alphabet, null);
/// Base64 decoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
-pub const base64_decoder = base64.Base64Decoder.init(base64_alphabet, null);
-
-/// Same as `Dir.copyFile`, except asserts that both `source_path` and `dest_path`
-/// are absolute. See `Dir.copyFile` for a function that operates on both
-/// absolute and relative paths.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn copyFileAbsolute(
- source_path: []const u8,
- dest_path: []const u8,
- args: Dir.CopyFileOptions,
-) !void {
- assert(path.isAbsolute(source_path));
- assert(path.isAbsolute(dest_path));
- const my_cwd = cwd();
- return Dir.copyFile(my_cwd, source_path, my_cwd, dest_path, args);
-}
-
-test copyFileAbsolute {}
-
-/// Create a new directory, based on an absolute path.
-/// Asserts that the path is absolute. See `Dir.makeDir` for a function that operates
-/// on both absolute and relative paths.
-/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `absolute_path` should be encoded as valid UTF-8.
-/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn makeDirAbsolute(absolute_path: []const u8) !void {
- assert(path.isAbsolute(absolute_path));
- return posix.mkdir(absolute_path, Dir.default_mode);
-}
-
-test makeDirAbsolute {}
-
-/// Same as `makeDirAbsolute` except the parameter is null-terminated.
-pub fn makeDirAbsoluteZ(absolute_path_z: [*:0]const u8) !void {
- assert(path.isAbsoluteZ(absolute_path_z));
- return posix.mkdirZ(absolute_path_z, Dir.default_mode);
-}
-
-test makeDirAbsoluteZ {}
-
-/// Same as `Dir.deleteDir` except the path is absolute.
-/// On Windows, `dir_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `dir_path` should be encoded as valid UTF-8.
-/// On other platforms, `dir_path` is an opaque sequence of bytes with no particular encoding.
-pub fn deleteDirAbsolute(dir_path: []const u8) !void {
- assert(path.isAbsolute(dir_path));
- return posix.rmdir(dir_path);
-}
-
-/// Same as `deleteDirAbsolute` except the path parameter is null-terminated.
-pub fn deleteDirAbsoluteZ(dir_path: [*:0]const u8) !void {
- assert(path.isAbsoluteZ(dir_path));
- return posix.rmdirZ(dir_path);
-}
-
-/// Same as `Dir.rename` except the paths are absolute.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn renameAbsolute(old_path: []const u8, new_path: []const u8) !void {
- assert(path.isAbsolute(old_path));
- assert(path.isAbsolute(new_path));
- return posix.rename(old_path, new_path);
-}
-
-/// Same as `renameAbsolute` except the path parameters are null-terminated.
-pub fn renameAbsoluteZ(old_path: [*:0]const u8, new_path: [*:0]const u8) !void {
- assert(path.isAbsoluteZ(old_path));
- assert(path.isAbsoluteZ(new_path));
- return posix.renameZ(old_path, new_path);
-}
-
-/// Same as `Dir.rename`, except `new_sub_path` is relative to `new_dir`
-pub fn rename(old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) !void {
- return posix.renameat(old_dir.fd, old_sub_path, new_dir.fd, new_sub_path);
-}
-
-/// Same as `rename` except the parameters are null-terminated.
-pub fn renameZ(old_dir: Dir, old_sub_path_z: [*:0]const u8, new_dir: Dir, new_sub_path_z: [*:0]const u8) !void {
- return posix.renameatZ(old_dir.fd, old_sub_path_z, new_dir.fd, new_sub_path_z);
-}
-
-/// Deprecated in favor of `Io.Dir.cwd`.
-pub fn cwd() Dir {
- if (native_os == .windows) {
- return .{ .fd = windows.peb().ProcessParameters.CurrentDirectory.Handle };
- } else if (native_os == .wasi) {
- return .{ .fd = std.options.wasiCwd() };
- } else {
- return .{ .fd = posix.AT.FDCWD };
- }
-}
-
-pub fn defaultWasiCwd() std.os.wasi.fd_t {
- // Expect the first preopen to be current working directory.
- return 3;
-}
-
-/// Opens a directory at the given path. The directory is a system resource that remains
-/// open until `close` is called on the result.
-/// See `openDirAbsoluteZ` for a function that accepts a null-terminated path.
-///
-/// Asserts that the path parameter has no null bytes.
-/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `absolute_path` should be encoded as valid UTF-8.
-/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn openDirAbsolute(absolute_path: []const u8, flags: Dir.OpenOptions) File.OpenError!Dir {
- assert(path.isAbsolute(absolute_path));
- return cwd().openDir(absolute_path, flags);
-}
-
-/// Same as `openDirAbsolute` but the path parameter is null-terminated.
-pub fn openDirAbsoluteZ(absolute_path_c: [*:0]const u8, flags: Dir.OpenOptions) File.OpenError!Dir {
- assert(path.isAbsoluteZ(absolute_path_c));
- return cwd().openDirZ(absolute_path_c, flags);
-}
-/// Opens a file for reading or writing, without attempting to create a new file, based on an absolute path.
-/// Call `File.close` to release the resource.
-/// Asserts that the path is absolute. See `Dir.openFile` for a function that
-/// operates on both absolute and relative paths.
-/// Asserts that the path parameter has no null bytes. See `openFileAbsoluteZ` for a function
-/// that accepts a null-terminated path.
-/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `absolute_path` should be encoded as valid UTF-8.
-/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn openFileAbsolute(absolute_path: []const u8, flags: File.OpenFlags) File.OpenError!File {
- assert(path.isAbsolute(absolute_path));
- return cwd().openFile(absolute_path, flags);
-}
-
-/// Test accessing `path`.
-/// Be careful of Time-Of-Check-Time-Of-Use race conditions when using this function.
-/// For example, instead of testing if a file exists and then opening it, just
-/// open it and handle the error for file not found.
-/// See `accessAbsoluteZ` for a function that accepts a null-terminated path.
-/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `absolute_path` should be encoded as valid UTF-8.
-/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn accessAbsolute(absolute_path: []const u8, flags: Io.Dir.AccessOptions) Dir.AccessError!void {
- assert(path.isAbsolute(absolute_path));
- try cwd().access(absolute_path, flags);
-}
-/// Creates, opens, or overwrites a file with write access, based on an absolute path.
-/// Call `File.close` to release the resource.
-/// Asserts that the path is absolute. See `Dir.createFile` for a function that
-/// operates on both absolute and relative paths.
-/// Asserts that the path parameter has no null bytes. See `createFileAbsoluteC` for a function
-/// that accepts a null-terminated path.
-/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `absolute_path` should be encoded as valid UTF-8.
-/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn createFileAbsolute(absolute_path: []const u8, flags: File.CreateFlags) File.OpenError!File {
- assert(path.isAbsolute(absolute_path));
- return cwd().createFile(absolute_path, flags);
-}
-
-/// Delete a file name and possibly the file it refers to, based on an absolute path.
-/// Asserts that the path is absolute. See `Dir.deleteFile` for a function that
-/// operates on both absolute and relative paths.
-/// Asserts that the path parameter has no null bytes.
-/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `absolute_path` should be encoded as valid UTF-8.
-/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn deleteFileAbsolute(absolute_path: []const u8) Dir.DeleteFileError!void {
- assert(path.isAbsolute(absolute_path));
- return cwd().deleteFile(absolute_path);
-}
-
-/// Removes a symlink, file, or directory.
-/// This is equivalent to `Dir.deleteTree` with the base directory.
-/// Asserts that the path is absolute. See `Dir.deleteTree` for a function that
-/// operates on both absolute and relative paths.
-/// Asserts that the path parameter has no null bytes.
-/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `absolute_path` should be encoded as valid UTF-8.
-/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn deleteTreeAbsolute(absolute_path: []const u8) !void {
- assert(path.isAbsolute(absolute_path));
- const dirname = path.dirname(absolute_path) orelse return error{
- /// Attempt to remove the root file system path.
- /// This error is unreachable if `absolute_path` is relative.
- CannotDeleteRootDirectory,
- }.CannotDeleteRootDirectory;
-
- var dir = try cwd().openDir(dirname, .{});
- defer dir.close();
-
- return dir.deleteTree(path.basename(absolute_path));
-}
-
-/// Same as `Dir.readLink`, except it asserts the path is absolute.
-/// On Windows, `pathname` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `pathname` should be encoded as valid UTF-8.
-/// On other platforms, `pathname` is an opaque sequence of bytes with no particular encoding.
-pub fn readLinkAbsolute(pathname: []const u8, buffer: *[max_path_bytes]u8) ![]u8 {
- assert(path.isAbsolute(pathname));
- return posix.readlink(pathname, buffer);
-}
-
-/// Creates a symbolic link named `sym_link_path` which contains the string `target_path`.
-/// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent
-/// one; the latter case is known as a dangling link.
-/// If `sym_link_path` exists, it will not be overwritten.
-/// See also `symLinkAbsoluteZ` and `symLinkAbsoluteW`.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn symLinkAbsolute(
- target_path: []const u8,
- sym_link_path: []const u8,
- flags: Dir.SymLinkFlags,
-) !void {
- assert(path.isAbsolute(target_path));
- assert(path.isAbsolute(sym_link_path));
- if (native_os == .windows) {
- const target_path_w = try windows.sliceToPrefixedFileW(null, target_path);
- const sym_link_path_w = try windows.sliceToPrefixedFileW(null, sym_link_path);
- return windows.CreateSymbolicLink(null, sym_link_path_w.span(), target_path_w.span(), flags.is_directory);
- }
- return posix.symlink(target_path, sym_link_path);
-}
-
-/// Windows-only. Same as `symLinkAbsolute` except the parameters are null-terminated, WTF16 LE encoded.
-/// Note that this function will by default try creating a symbolic link to a file. If you would
-/// like to create a symbolic link to a directory, specify this with `SymLinkFlags{ .is_directory = true }`.
-/// See also `symLinkAbsolute`, `symLinkAbsoluteZ`.
-pub fn symLinkAbsoluteW(
- target_path_w: [*:0]const u16,
- sym_link_path_w: [*:0]const u16,
- flags: Dir.SymLinkFlags,
-) !void {
- assert(path.isAbsoluteWindowsW(target_path_w));
- assert(path.isAbsoluteWindowsW(sym_link_path_w));
- return windows.CreateSymbolicLink(null, mem.span(sym_link_path_w), mem.span(target_path_w), flags.is_directory);
-}
-
-pub const OpenSelfExeError = Io.File.OpenSelfExeError;
-
-/// Deprecated in favor of `Io.File.openSelfExe`.
-pub fn openSelfExe(flags: File.OpenFlags) OpenSelfExeError!File {
- if (native_os == .linux or native_os == .serenity or native_os == .windows) {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return .adaptFromNewApi(try Io.File.openSelfExe(io, flags));
- }
- // Use of max_path_bytes here is valid as the resulting path is immediately
- // opened with no modification.
- var buf: [max_path_bytes]u8 = undefined;
- const self_exe_path = try selfExePath(&buf);
- buf[self_exe_path.len] = 0;
- return openFileAbsolute(buf[0..self_exe_path.len :0], flags);
-}
-
-// This is `posix.ReadLinkError || posix.RealPathError` with impossible errors excluded
-pub const SelfExePathError = error{
- FileNotFound,
- AccessDenied,
- NameTooLong,
- NotSupported,
- NotDir,
- SymLinkLoop,
- InputOutput,
- FileTooBig,
- IsDir,
- ProcessFdQuotaExceeded,
- SystemFdQuotaExceeded,
- NoDevice,
- SystemResources,
- NoSpaceLeft,
- FileSystem,
- BadPathName,
- DeviceBusy,
- SharingViolation,
- PipeBusy,
- NotLink,
- PathAlreadyExists,
-
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
- ProcessNotFound,
+pub const base64_decoder = std.base64.Base64Decoder.init(base64_alphabet, null);
- /// On Windows, antivirus software is enabled by default. It can be
- /// disabled, but Windows Update sometimes ignores the user's preference
- /// and re-enables it. When enabled, antivirus software on Windows
- /// intercepts file system operations and makes them significantly slower
- /// in addition to possibly failing with this error code.
- AntivirusInterference,
-
- /// On Windows, the volume does not contain a recognized file system. File
- /// system drivers might not be loaded, or the volume may be corrupt.
- UnrecognizedVolume,
-
- Canceled,
-} || posix.SysCtlError;
-
-/// `selfExePath` except allocates the result on the heap.
-/// Caller owns returned memory.
-pub fn selfExePathAlloc(allocator: Allocator) ![]u8 {
- // Use of max_path_bytes here is justified as, at least on one tested Linux
- // system, readlink will completely fail to return a result larger than
- // PATH_MAX even if given a sufficiently large buffer. This makes it
- // fundamentally impossible to get the selfExePath of a program running in
- // a very deeply nested directory chain in this way.
- // TODO(#4812): Investigate other systems and whether it is possible to get
- // this path by trying larger and larger buffers until one succeeds.
- var buf: [max_path_bytes]u8 = undefined;
- return allocator.dupe(u8, try selfExePath(&buf));
-}
-
-/// Get the path to the current executable. Follows symlinks.
-/// If you only need the directory, use selfExeDirPath.
-/// If you only want an open file handle, use openSelfExe.
-/// This function may return an error if the current executable
-/// was deleted after spawning.
-/// Returned value is a slice of out_buffer.
-/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-///
-/// On Linux, depends on procfs being mounted. If the currently executing binary has
-/// been deleted, the file path looks something like `/a/b/c/exe (deleted)`.
-/// TODO make the return type of this a null terminated pointer
-pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
- if (is_darwin) {
- // Note that _NSGetExecutablePath() will return "a path" to
- // the executable not a "real path" to the executable.
- var symlink_path_buf: [max_path_bytes:0]u8 = undefined;
- var u32_len: u32 = max_path_bytes + 1; // include the sentinel
- const rc = std.c._NSGetExecutablePath(&symlink_path_buf, &u32_len);
- if (rc != 0) return error.NameTooLong;
-
- var real_path_buf: [max_path_bytes]u8 = undefined;
- const real_path = std.posix.realpathZ(&symlink_path_buf, &real_path_buf) catch |err| switch (err) {
- error.NetworkNotFound => unreachable, // Windows-only
- else => |e| return e,
- };
- if (real_path.len > out_buffer.len) return error.NameTooLong;
- const result = out_buffer[0..real_path.len];
- @memcpy(result, real_path);
- return result;
- }
- switch (native_os) {
- .linux, .serenity => return posix.readlinkZ("/proc/self/exe", out_buffer) catch |err| switch (err) {
- error.UnsupportedReparsePointType => unreachable, // Windows-only
- error.NetworkNotFound => unreachable, // Windows-only
- else => |e| return e,
- },
- .illumos => return posix.readlinkZ("/proc/self/path/a.out", out_buffer) catch |err| switch (err) {
- error.UnsupportedReparsePointType => unreachable, // Windows-only
- error.NetworkNotFound => unreachable, // Windows-only
- else => |e| return e,
- },
- .freebsd, .dragonfly => {
- var mib = [4]c_int{ posix.CTL.KERN, posix.KERN.PROC, posix.KERN.PROC_PATHNAME, -1 };
- var out_len: usize = out_buffer.len;
- try posix.sysctl(&mib, out_buffer.ptr, &out_len, null, 0);
- // TODO could this slice from 0 to out_len instead?
- return mem.sliceTo(out_buffer, 0);
- },
- .netbsd => {
- var mib = [4]c_int{ posix.CTL.KERN, posix.KERN.PROC_ARGS, -1, posix.KERN.PROC_PATHNAME };
- var out_len: usize = out_buffer.len;
- try posix.sysctl(&mib, out_buffer.ptr, &out_len, null, 0);
- // TODO could this slice from 0 to out_len instead?
- return mem.sliceTo(out_buffer, 0);
- },
- .openbsd, .haiku => {
- // OpenBSD doesn't support getting the path of a running process, so try to guess it
- if (std.os.argv.len == 0)
- return error.FileNotFound;
-
- const argv0 = mem.span(std.os.argv[0]);
- if (mem.find(u8, argv0, "/") != null) {
- // argv[0] is a path (relative or absolute): use realpath(3) directly
- var real_path_buf: [max_path_bytes]u8 = undefined;
- const real_path = posix.realpathZ(std.os.argv[0], &real_path_buf) catch |err| switch (err) {
- error.NetworkNotFound => unreachable, // Windows-only
- else => |e| return e,
- };
- if (real_path.len > out_buffer.len)
- return error.NameTooLong;
- const result = out_buffer[0..real_path.len];
- @memcpy(result, real_path);
- return result;
- } else if (argv0.len != 0) {
- // argv[0] is not empty (and not a path): search it inside PATH
- const PATH = posix.getenvZ("PATH") orelse return error.FileNotFound;
- var path_it = mem.tokenizeScalar(u8, PATH, path.delimiter);
- while (path_it.next()) |a_path| {
- var resolved_path_buf: [max_path_bytes - 1:0]u8 = undefined;
- const resolved_path = std.fmt.bufPrintSentinel(&resolved_path_buf, "{s}/{s}", .{
- a_path,
- std.os.argv[0],
- }, 0) catch continue;
-
- var real_path_buf: [max_path_bytes]u8 = undefined;
- if (posix.realpathZ(resolved_path, &real_path_buf)) |real_path| {
- // found a file, and hope it is the right file
- if (real_path.len > out_buffer.len)
- return error.NameTooLong;
- const result = out_buffer[0..real_path.len];
- @memcpy(result, real_path);
- return result;
- } else |_| continue;
- }
- }
- return error.FileNotFound;
- },
- .windows => {
- const image_path_unicode_string = &windows.peb().ProcessParameters.ImagePathName;
- const image_path_name = image_path_unicode_string.Buffer.?[0 .. image_path_unicode_string.Length / 2 :0];
-
- // If ImagePathName is a symlink, then it will contain the path of the
- // symlink, not the path that the symlink points to. We want the path
- // that the symlink points to, though, so we need to get the realpath.
- var pathname_w = try windows.wToPrefixedFileW(null, image_path_name);
-
- const wide_slice = try std.fs.cwd().realpathW2(pathname_w.span(), &pathname_w.data);
-
- const len = std.unicode.calcWtf8Len(wide_slice);
- if (len > out_buffer.len)
- return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
- return out_buffer[0..end_index];
- },
- else => @compileError("std.fs.selfExePath not supported for this target"),
- }
-}
-
-/// `selfExeDirPath` except allocates the result on the heap.
-/// Caller owns returned memory.
-pub fn selfExeDirPathAlloc(allocator: Allocator) ![]u8 {
- // Use of max_path_bytes here is justified as, at least on one tested Linux
- // system, readlink will completely fail to return a result larger than
- // PATH_MAX even if given a sufficiently large buffer. This makes it
- // fundamentally impossible to get the selfExeDirPath of a program running
- // in a very deeply nested directory chain in this way.
- // TODO(#4812): Investigate other systems and whether it is possible to get
- // this path by trying larger and larger buffers until one succeeds.
- var buf: [max_path_bytes]u8 = undefined;
- return allocator.dupe(u8, try selfExeDirPath(&buf));
-}
-
-/// Get the directory path that contains the current executable.
-/// Returned value is a slice of out_buffer.
-/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 {
- const self_exe_path = try selfExePath(out_buffer);
- // Assume that the OS APIs return absolute paths, and therefore dirname
- // will not return null.
- return path.dirname(self_exe_path).?;
-}
-
-/// `realpath`, except caller must free the returned memory.
-/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-/// See also `Dir.realpath`.
-pub fn realpathAlloc(allocator: Allocator, pathname: []const u8) ![]u8 {
- // Use of max_path_bytes here is valid as the realpath function does not
- // have a variant that takes an arbitrary-size buffer.
- // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
- // NULL out parameter (GNU's canonicalize_file_name) to handle overelong
- // paths. musl supports passing NULL but restricts the output to PATH_MAX
- // anyway.
- var buf: [max_path_bytes]u8 = undefined;
- return allocator.dupe(u8, try posix.realpath(pathname, &buf));
-}
+/// Deprecated, use `std.Io.Dir.max_path_bytes`.
+pub const max_path_bytes = std.Io.Dir.max_path_bytes;
+/// Deprecated, use `std.Io.Dir.max_name_bytes`.
+pub const max_name_bytes = std.Io.Dir.max_name_bytes;
test {
- _ = AtomicFile;
- _ = Dir;
- _ = File;
_ = path;
_ = @import("fs/test.zig");
_ = @import("fs/get_app_data_dir.zig");
diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig
deleted file mode 100644
index ea9c6408bf..0000000000
--- a/lib/std/fs/Dir.zig
+++ /dev/null
@@ -1,2065 +0,0 @@
-//! Deprecated in favor of `Io.Dir`.
-const Dir = @This();
-
-const builtin = @import("builtin");
-const native_os = builtin.os.tag;
-
-const std = @import("../std.zig");
-const Io = std.Io;
-const File = std.fs.File;
-const AtomicFile = std.fs.AtomicFile;
-const base64_encoder = fs.base64_encoder;
-const posix = std.posix;
-const mem = std.mem;
-const path = fs.path;
-const fs = std.fs;
-const Allocator = std.mem.Allocator;
-const assert = std.debug.assert;
-const linux = std.os.linux;
-const windows = std.os.windows;
-const have_flock = @TypeOf(posix.system.flock) != void;
-
-fd: Handle,
-
-pub const Handle = posix.fd_t;
-
-pub const default_mode = 0o755;
-
-pub const Entry = struct {
- name: []const u8,
- kind: Kind,
-
- pub const Kind = File.Kind;
-};
-
-const IteratorError = error{
- AccessDenied,
- PermissionDenied,
- SystemResources,
-} || posix.UnexpectedError;
-
-pub const Iterator = switch (native_os) {
- .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd, .illumos => struct {
- dir: Dir,
- seek: i64,
- buf: [1024]u8 align(@alignOf(posix.system.dirent)),
- index: usize,
- end_index: usize,
- first_iter: bool,
-
- const Self = @This();
-
- pub const Error = IteratorError;
-
- /// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
- pub fn next(self: *Self) Error!?Entry {
- switch (native_os) {
- .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => return self.nextDarwin(),
- .freebsd, .netbsd, .dragonfly, .openbsd => return self.nextBsd(),
- .illumos => return self.nextIllumos(),
- else => @compileError("unimplemented"),
- }
- }
-
- fn nextDarwin(self: *Self) !?Entry {
- start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.first_iter) {
- posix.lseek_SET(self.dir.fd, 0) catch unreachable; // EBADF here likely means that the Dir was not opened with iteration permissions
- self.first_iter = false;
- }
- const rc = posix.system.getdirentries(
- self.dir.fd,
- &self.buf,
- self.buf.len,
- &self.seek,
- );
- if (rc == 0) return null;
- if (rc < 0) {
- switch (posix.errno(rc)) {
- .BADF => unreachable, // Dir is invalid or was opened without iteration ability
- .FAULT => unreachable,
- .NOTDIR => unreachable,
- .INVAL => unreachable,
- else => |err| return posix.unexpectedErrno(err),
- }
- }
- self.index = 0;
- self.end_index = @as(usize, @intCast(rc));
- }
- const darwin_entry = @as(*align(1) posix.system.dirent, @ptrCast(&self.buf[self.index]));
- const next_index = self.index + darwin_entry.reclen;
- self.index = next_index;
-
- const name = @as([*]u8, @ptrCast(&darwin_entry.name))[0..darwin_entry.namlen];
-
- if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (darwin_entry.ino == 0)) {
- continue :start_over;
- }
-
- const entry_kind: Entry.Kind = switch (darwin_entry.type) {
- posix.DT.BLK => .block_device,
- posix.DT.CHR => .character_device,
- posix.DT.DIR => .directory,
- posix.DT.FIFO => .named_pipe,
- posix.DT.LNK => .sym_link,
- posix.DT.REG => .file,
- posix.DT.SOCK => .unix_domain_socket,
- posix.DT.WHT => .whiteout,
- else => .unknown,
- };
- return Entry{
- .name = name,
- .kind = entry_kind,
- };
- }
- }
-
- fn nextIllumos(self: *Self) !?Entry {
- start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.first_iter) {
- posix.lseek_SET(self.dir.fd, 0) catch unreachable; // EBADF here likely means that the Dir was not opened with iteration permissions
- self.first_iter = false;
- }
- const rc = posix.system.getdents(self.dir.fd, &self.buf, self.buf.len);
- switch (posix.errno(rc)) {
- .SUCCESS => {},
- .BADF => unreachable, // Dir is invalid or was opened without iteration ability
- .FAULT => unreachable,
- .NOTDIR => unreachable,
- .INVAL => unreachable,
- else => |err| return posix.unexpectedErrno(err),
- }
- if (rc == 0) return null;
- self.index = 0;
- self.end_index = @as(usize, @intCast(rc));
- }
- const entry = @as(*align(1) posix.system.dirent, @ptrCast(&self.buf[self.index]));
- const next_index = self.index + entry.reclen;
- self.index = next_index;
-
- const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&entry.name)), 0);
- if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
- continue :start_over;
-
- // illumos dirent doesn't expose type, so we have to call stat to get it.
- const stat_info = posix.fstatat(
- self.dir.fd,
- name,
- posix.AT.SYMLINK_NOFOLLOW,
- ) catch |err| switch (err) {
- error.NameTooLong => unreachable,
- error.SymLinkLoop => unreachable,
- error.FileNotFound => unreachable, // lost the race
- else => |e| return e,
- };
- const entry_kind: Entry.Kind = switch (stat_info.mode & posix.S.IFMT) {
- posix.S.IFIFO => .named_pipe,
- posix.S.IFCHR => .character_device,
- posix.S.IFDIR => .directory,
- posix.S.IFBLK => .block_device,
- posix.S.IFREG => .file,
- posix.S.IFLNK => .sym_link,
- posix.S.IFSOCK => .unix_domain_socket,
- posix.S.IFDOOR => .door,
- posix.S.IFPORT => .event_port,
- else => .unknown,
- };
- return Entry{
- .name = name,
- .kind = entry_kind,
- };
- }
- }
-
- fn nextBsd(self: *Self) !?Entry {
- start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.first_iter) {
- posix.lseek_SET(self.dir.fd, 0) catch unreachable; // EBADF here likely means that the Dir was not opened with iteration permissions
- self.first_iter = false;
- }
- const rc = posix.system.getdents(self.dir.fd, &self.buf, self.buf.len);
- switch (posix.errno(rc)) {
- .SUCCESS => {},
- .BADF => unreachable, // Dir is invalid or was opened without iteration ability
- .FAULT => unreachable,
- .NOTDIR => unreachable,
- .INVAL => unreachable,
- // Introduced in freebsd 13.2: directory unlinked but still open.
- // To be consistent, iteration ends if the directory being iterated is deleted during iteration.
- .NOENT => return null,
- else => |err| return posix.unexpectedErrno(err),
- }
- if (rc == 0) return null;
- self.index = 0;
- self.end_index = @as(usize, @intCast(rc));
- }
- const bsd_entry = @as(*align(1) posix.system.dirent, @ptrCast(&self.buf[self.index]));
- const next_index = self.index +
- if (@hasField(posix.system.dirent, "reclen")) bsd_entry.reclen else bsd_entry.reclen();
- self.index = next_index;
-
- const name = @as([*]u8, @ptrCast(&bsd_entry.name))[0..bsd_entry.namlen];
-
- const skip_zero_fileno = switch (native_os) {
- // fileno=0 is used to mark invalid entries or deleted files.
- .openbsd, .netbsd => true,
- else => false,
- };
- if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or
- (skip_zero_fileno and bsd_entry.fileno == 0))
- {
- continue :start_over;
- }
-
- const entry_kind: Entry.Kind = switch (bsd_entry.type) {
- posix.DT.BLK => .block_device,
- posix.DT.CHR => .character_device,
- posix.DT.DIR => .directory,
- posix.DT.FIFO => .named_pipe,
- posix.DT.LNK => .sym_link,
- posix.DT.REG => .file,
- posix.DT.SOCK => .unix_domain_socket,
- posix.DT.WHT => .whiteout,
- else => .unknown,
- };
- return Entry{
- .name = name,
- .kind = entry_kind,
- };
- }
- }
-
- pub fn reset(self: *Self) void {
- self.index = 0;
- self.end_index = 0;
- self.first_iter = true;
- }
- },
- .haiku => struct {
- dir: Dir,
- buf: [@sizeOf(DirEnt) + posix.PATH_MAX]u8 align(@alignOf(DirEnt)),
- offset: usize,
- index: usize,
- end_index: usize,
- first_iter: bool,
-
- const Self = @This();
- const DirEnt = posix.system.DirEnt;
-
- pub const Error = IteratorError;
-
- /// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
- pub fn next(self: *Self) Error!?Entry {
- while (true) {
- if (self.index >= self.end_index) {
- if (self.first_iter) {
- switch (@as(posix.E, @enumFromInt(posix.system._kern_rewind_dir(self.dir.fd)))) {
- .SUCCESS => {},
- .BADF => unreachable, // Dir is invalid
- .FAULT => unreachable,
- .NOTDIR => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- else => |err| return posix.unexpectedErrno(err),
- }
- self.first_iter = false;
- }
- const rc = posix.system._kern_read_dir(
- self.dir.fd,
- &self.buf,
- self.buf.len,
- self.buf.len / @sizeOf(DirEnt),
- );
- if (rc == 0) return null;
- if (rc < 0) {
- switch (@as(posix.E, @enumFromInt(rc))) {
- .BADF => unreachable, // Dir is invalid
- .FAULT => unreachable,
- .NOTDIR => unreachable,
- .INVAL => unreachable,
- .OVERFLOW => unreachable,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- else => |err| return posix.unexpectedErrno(err),
- }
- }
- self.offset = 0;
- self.index = 0;
- self.end_index = @intCast(rc);
- }
- const dirent: *DirEnt = @ptrCast(@alignCast(&self.buf[self.offset]));
- self.offset += dirent.reclen;
- self.index += 1;
- const name = mem.span(dirent.getName());
- if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or dirent.ino == 0) continue;
-
- var stat_info: posix.Stat = undefined;
- switch (@as(posix.E, @enumFromInt(posix.system._kern_read_stat(
- self.dir.fd,
- name,
- false,
- &stat_info,
- @sizeOf(posix.Stat),
- )))) {
- .SUCCESS => {},
- .INVAL => unreachable,
- .BADF => unreachable, // Dir is invalid
- .NOMEM => return error.SystemResources,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .FAULT => unreachable,
- .NAMETOOLONG => unreachable,
- .LOOP => unreachable,
- .NOENT => continue,
- else => |err| return posix.unexpectedErrno(err),
- }
- const statmode = stat_info.mode & posix.S.IFMT;
-
- const entry_kind: Entry.Kind = switch (statmode) {
- posix.S.IFDIR => .directory,
- posix.S.IFBLK => .block_device,
- posix.S.IFCHR => .character_device,
- posix.S.IFLNK => .sym_link,
- posix.S.IFREG => .file,
- posix.S.IFIFO => .named_pipe,
- else => .unknown,
- };
-
- return Entry{
- .name = name,
- .kind = entry_kind,
- };
- }
- }
-
- pub fn reset(self: *Self) void {
- self.index = 0;
- self.end_index = 0;
- self.first_iter = true;
- }
- },
- .linux => struct {
- dir: Dir,
- buf: [1024]u8 align(@alignOf(linux.dirent64)),
- index: usize,
- end_index: usize,
- first_iter: bool,
-
- const Self = @This();
-
- pub const Error = IteratorError;
-
- /// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
- pub fn next(self: *Self) Error!?Entry {
- return self.nextLinux() catch |err| switch (err) {
- // To be consistent across platforms, iteration ends if the directory being iterated is deleted during iteration.
- // This matches the behavior of non-Linux UNIX platforms.
- error.DirNotFound => null,
- else => |e| return e,
- };
- }
-
- pub const ErrorLinux = error{DirNotFound} || IteratorError;
-
- /// Implementation of `next` that can return `error.DirNotFound` if the directory being
- /// iterated was deleted during iteration (this error is Linux specific).
- pub fn nextLinux(self: *Self) ErrorLinux!?Entry {
- start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.first_iter) {
- posix.lseek_SET(self.dir.fd, 0) catch unreachable; // EBADF here likely means that the Dir was not opened with iteration permissions
- self.first_iter = false;
- }
- const rc = linux.getdents64(self.dir.fd, &self.buf, self.buf.len);
- switch (linux.errno(rc)) {
- .SUCCESS => {},
- .BADF => unreachable, // Dir is invalid or was opened without iteration ability
- .FAULT => unreachable,
- .NOTDIR => unreachable,
- .NOENT => return error.DirNotFound, // The directory being iterated was deleted during iteration.
- .INVAL => return error.Unexpected, // Linux may in some cases return EINVAL when reading /proc/$PID/net.
- .ACCES => return error.AccessDenied, // Do not have permission to iterate this directory.
- else => |err| return posix.unexpectedErrno(err),
- }
- if (rc == 0) return null;
- self.index = 0;
- self.end_index = rc;
- }
- const linux_entry = @as(*align(1) linux.dirent64, @ptrCast(&self.buf[self.index]));
- const next_index = self.index + linux_entry.reclen;
- self.index = next_index;
-
- const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&linux_entry.name)), 0);
-
- // skip . and .. entries
- if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
- continue :start_over;
- }
-
- const entry_kind: Entry.Kind = switch (linux_entry.type) {
- linux.DT.BLK => .block_device,
- linux.DT.CHR => .character_device,
- linux.DT.DIR => .directory,
- linux.DT.FIFO => .named_pipe,
- linux.DT.LNK => .sym_link,
- linux.DT.REG => .file,
- linux.DT.SOCK => .unix_domain_socket,
- else => .unknown,
- };
- return Entry{
- .name = name,
- .kind = entry_kind,
- };
- }
- }
-
- pub fn reset(self: *Self) void {
- self.index = 0;
- self.end_index = 0;
- self.first_iter = true;
- }
- },
- .windows => struct {
- dir: Dir,
- buf: [1024]u8 align(@alignOf(windows.FILE_BOTH_DIR_INFORMATION)),
- index: usize,
- end_index: usize,
- first_iter: bool,
- name_data: [fs.max_name_bytes]u8,
-
- const Self = @This();
-
- pub const Error = IteratorError;
-
- /// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
- pub fn next(self: *Self) Error!?Entry {
- const w = windows;
- while (true) {
- if (self.index >= self.end_index) {
- var io: w.IO_STATUS_BLOCK = undefined;
- const rc = w.ntdll.NtQueryDirectoryFile(
- self.dir.fd,
- null,
- null,
- null,
- &io,
- &self.buf,
- self.buf.len,
- .BothDirectory,
- w.FALSE,
- null,
- @intFromBool(self.first_iter),
- );
- self.first_iter = false;
- if (io.Information == 0) return null;
- self.index = 0;
- self.end_index = io.Information;
- switch (rc) {
- .SUCCESS => {},
- .ACCESS_DENIED => return error.AccessDenied, // Double-check that the Dir was opened with iteration ability
-
- else => return w.unexpectedStatus(rc),
- }
- }
-
- // While the official api docs guarantee FILE_BOTH_DIR_INFORMATION to be aligned properly
- // this may not always be the case (e.g. due to faulty VM/Sandboxing tools)
- const dir_info: *align(2) w.FILE_BOTH_DIR_INFORMATION = @ptrCast(@alignCast(&self.buf[self.index]));
- if (dir_info.NextEntryOffset != 0) {
- self.index += dir_info.NextEntryOffset;
- } else {
- self.index = self.buf.len;
- }
-
- const name_wtf16le = @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2];
-
- if (mem.eql(u16, name_wtf16le, &[_]u16{'.'}) or mem.eql(u16, name_wtf16le, &[_]u16{ '.', '.' }))
- continue;
- const name_wtf8_len = std.unicode.wtf16LeToWtf8(self.name_data[0..], name_wtf16le);
- const name_wtf8 = self.name_data[0..name_wtf8_len];
- const kind: Entry.Kind = blk: {
- const attrs = dir_info.FileAttributes;
- if (attrs.DIRECTORY) break :blk .directory;
- if (attrs.REPARSE_POINT) break :blk .sym_link;
- break :blk .file;
- };
- return Entry{
- .name = name_wtf8,
- .kind = kind,
- };
- }
- }
-
- pub fn reset(self: *Self) void {
- self.index = 0;
- self.end_index = 0;
- self.first_iter = true;
- }
- },
- .wasi => struct {
- dir: Dir,
- buf: [1024]u8 align(@alignOf(std.os.wasi.dirent_t)),
- cookie: u64,
- index: usize,
- end_index: usize,
-
- const Self = @This();
-
- pub const Error = IteratorError;
-
- /// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
- pub fn next(self: *Self) Error!?Entry {
- return self.nextWasi() catch |err| switch (err) {
- // To be consistent across platforms, iteration ends if the directory being iterated is deleted during iteration.
- // This matches the behavior of non-Linux UNIX platforms.
- error.DirNotFound => null,
- else => |e| return e,
- };
- }
-
- pub const ErrorWasi = error{DirNotFound} || IteratorError;
-
- /// Implementation of `next` that can return platform-dependent errors depending on the host platform.
- /// When the host platform is Linux, `error.DirNotFound` can be returned if the directory being
- /// iterated was deleted during iteration.
- pub fn nextWasi(self: *Self) ErrorWasi!?Entry {
- // We intentinally use fd_readdir even when linked with libc,
- // since its implementation is exactly the same as below,
- // and we avoid the code complexity here.
- const w = std.os.wasi;
- start_over: while (true) {
- // According to the WASI spec, the last entry might be truncated,
- // so we need to check if the left buffer contains the whole dirent.
- if (self.end_index - self.index < @sizeOf(w.dirent_t)) {
- var bufused: usize = undefined;
- switch (w.fd_readdir(self.dir.fd, &self.buf, self.buf.len, self.cookie, &bufused)) {
- .SUCCESS => {},
- .BADF => unreachable, // Dir is invalid or was opened without iteration ability
- .FAULT => unreachable,
- .NOTDIR => unreachable,
- .INVAL => unreachable,
- .NOENT => return error.DirNotFound, // The directory being iterated was deleted during iteration.
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return posix.unexpectedErrno(err),
- }
- if (bufused == 0) return null;
- self.index = 0;
- self.end_index = bufused;
- }
- const entry = @as(*align(1) w.dirent_t, @ptrCast(&self.buf[self.index]));
- const entry_size = @sizeOf(w.dirent_t);
- const name_index = self.index + entry_size;
- if (name_index + entry.namlen > self.end_index) {
- // This case, the name is truncated, so we need to call readdir to store the entire name.
- self.end_index = self.index; // Force fd_readdir in the next loop.
- continue :start_over;
- }
- const name = self.buf[name_index .. name_index + entry.namlen];
-
- const next_index = name_index + entry.namlen;
- self.index = next_index;
- self.cookie = entry.next;
-
- // skip . and .. entries
- if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
- continue :start_over;
- }
-
- const entry_kind: Entry.Kind = switch (entry.type) {
- .BLOCK_DEVICE => .block_device,
- .CHARACTER_DEVICE => .character_device,
- .DIRECTORY => .directory,
- .SYMBOLIC_LINK => .sym_link,
- .REGULAR_FILE => .file,
- .SOCKET_STREAM, .SOCKET_DGRAM => .unix_domain_socket,
- else => .unknown,
- };
- return Entry{
- .name = name,
- .kind = entry_kind,
- };
- }
- }
-
- pub fn reset(self: *Self) void {
- self.index = 0;
- self.end_index = 0;
- self.cookie = std.os.wasi.DIRCOOKIE_START;
- }
- },
- else => @compileError("unimplemented"),
-};
-
-pub fn iterate(self: Dir) Iterator {
- return self.iterateImpl(true);
-}
-
-/// Like `iterate`, but will not reset the directory cursor before the first
-/// iteration. This should only be used in cases where it is known that the
-/// `Dir` has not had its cursor modified yet (e.g. it was just opened).
-pub fn iterateAssumeFirstIteration(self: Dir) Iterator {
- return self.iterateImpl(false);
-}
-
-fn iterateImpl(self: Dir, first_iter_start_value: bool) Iterator {
- switch (native_os) {
- .driverkit,
- .ios,
- .maccatalyst,
- .macos,
- .tvos,
- .visionos,
- .watchos,
- .freebsd,
- .netbsd,
- .dragonfly,
- .openbsd,
- .illumos,
- => return Iterator{
- .dir = self,
- .seek = 0,
- .index = 0,
- .end_index = 0,
- .buf = undefined,
- .first_iter = first_iter_start_value,
- },
- .linux => return Iterator{
- .dir = self,
- .index = 0,
- .end_index = 0,
- .buf = undefined,
- .first_iter = first_iter_start_value,
- },
- .haiku => return Iterator{
- .dir = self,
- .offset = 0,
- .index = 0,
- .end_index = 0,
- .buf = undefined,
- .first_iter = first_iter_start_value,
- },
- .windows => return Iterator{
- .dir = self,
- .index = 0,
- .end_index = 0,
- .first_iter = first_iter_start_value,
- .buf = undefined,
- .name_data = undefined,
- },
- .wasi => return Iterator{
- .dir = self,
- .cookie = std.os.wasi.DIRCOOKIE_START,
- .index = 0,
- .end_index = 0,
- .buf = undefined,
- },
- else => @compileError("unimplemented"),
- }
-}
-
-pub const SelectiveWalker = struct {
- stack: std.ArrayList(Walker.StackItem),
- name_buffer: std.ArrayList(u8),
- allocator: Allocator,
-
- pub const Error = IteratorError || Allocator.Error;
-
- /// After each call to this function, and on deinit(), the memory returned
- /// from this function becomes invalid. A copy must be made in order to keep
- /// a reference to the path.
- pub fn next(self: *SelectiveWalker) Error!?Walker.Entry {
- while (self.stack.items.len > 0) {
- const top = &self.stack.items[self.stack.items.len - 1];
- var dirname_len = top.dirname_len;
- if (top.iter.next() catch |err| {
- // If we get an error, then we want the user to be able to continue
- // walking if they want, which means that we need to pop the directory
- // that errored from the stack. Otherwise, all future `next` calls would
- // likely just fail with the same error.
- var item = self.stack.pop().?;
- if (self.stack.items.len != 0) {
- item.iter.dir.close();
- }
- return err;
- }) |entry| {
- self.name_buffer.shrinkRetainingCapacity(dirname_len);
- if (self.name_buffer.items.len != 0) {
- try self.name_buffer.append(self.allocator, fs.path.sep);
- dirname_len += 1;
- }
- try self.name_buffer.ensureUnusedCapacity(self.allocator, entry.name.len + 1);
- self.name_buffer.appendSliceAssumeCapacity(entry.name);
- self.name_buffer.appendAssumeCapacity(0);
- const walker_entry: Walker.Entry = .{
- .dir = top.iter.dir,
- .basename = self.name_buffer.items[dirname_len .. self.name_buffer.items.len - 1 :0],
- .path = self.name_buffer.items[0 .. self.name_buffer.items.len - 1 :0],
- .kind = entry.kind,
- };
- return walker_entry;
- } else {
- var item = self.stack.pop().?;
- if (self.stack.items.len != 0) {
- item.iter.dir.close();
- }
- }
- }
- return null;
- }
-
- /// Traverses into the directory, continuing walking one level down.
- pub fn enter(self: *SelectiveWalker, entry: Walker.Entry) !void {
- if (entry.kind != .directory) {
- @branchHint(.cold);
- return;
- }
-
- var new_dir = entry.dir.openDir(entry.basename, .{ .iterate = true }) catch |err| {
- switch (err) {
- error.NameTooLong => unreachable,
- else => |e| return e,
- }
- };
- errdefer new_dir.close();
-
- try self.stack.append(self.allocator, .{
- .iter = new_dir.iterateAssumeFirstIteration(),
- .dirname_len = self.name_buffer.items.len - 1,
- });
- }
-
- pub fn deinit(self: *SelectiveWalker) void {
- self.name_buffer.deinit(self.allocator);
- self.stack.deinit(self.allocator);
- }
-
- /// Leaves the current directory, continuing walking one level up.
- /// If the current entry is a directory entry, then the "current directory"
- /// will pertain to that entry if `enter` is called before `leave`.
- pub fn leave(self: *SelectiveWalker) void {
- var item = self.stack.pop().?;
- if (self.stack.items.len != 0) {
- @branchHint(.likely);
- item.iter.dir.close();
- }
- }
-};
-
-/// Recursively iterates over a directory, but requires the user to
-/// opt-in to recursing into each directory entry.
-///
-/// `self` must have been opened with `OpenOptions{.iterate = true}`.
-///
-/// `Walker.deinit` releases allocated memory and directory handles.
-///
-/// The order of returned file system entries is undefined.
-///
-/// `self` will not be closed after walking it.
-///
-/// See also `walk`.
-pub fn walkSelectively(self: Dir, allocator: Allocator) !SelectiveWalker {
- var stack: std.ArrayList(Walker.StackItem) = .empty;
-
- try stack.append(allocator, .{
- .iter = self.iterate(),
- .dirname_len = 0,
- });
-
- return .{
- .stack = stack,
- .name_buffer = .{},
- .allocator = allocator,
- };
-}
-
-pub const Walker = struct {
- inner: SelectiveWalker,
-
- pub const Entry = struct {
- /// The containing directory. This can be used to operate directly on `basename`
- /// rather than `path`, avoiding `error.NameTooLong` for deeply nested paths.
- /// The directory remains open until `next` or `deinit` is called.
- dir: Dir,
- basename: [:0]const u8,
- path: [:0]const u8,
- kind: Dir.Entry.Kind,
-
- /// Returns the depth of the entry relative to the initial directory.
- /// Returns 1 for a direct child of the initial directory, 2 for an entry
- /// within a direct child of the initial directory, etc.
- pub fn depth(self: Walker.Entry) usize {
- return mem.countScalar(u8, self.path, fs.path.sep) + 1;
- }
- };
-
- const StackItem = struct {
- iter: Dir.Iterator,
- dirname_len: usize,
- };
-
- /// After each call to this function, and on deinit(), the memory returned
- /// from this function becomes invalid. A copy must be made in order to keep
- /// a reference to the path.
- pub fn next(self: *Walker) !?Walker.Entry {
- const entry = try self.inner.next();
- if (entry != null and entry.?.kind == .directory) {
- try self.inner.enter(entry.?);
- }
- return entry;
- }
-
- pub fn deinit(self: *Walker) void {
- self.inner.deinit();
- }
-
- /// Leaves the current directory, continuing walking one level up.
- /// If the current entry is a directory entry, then the "current directory"
- /// is the directory pertaining to the current entry.
- pub fn leave(self: *Walker) void {
- self.inner.leave();
- }
-};
-
-/// Recursively iterates over a directory.
-///
-/// `self` must have been opened with `OpenOptions{.iterate = true}`.
-///
-/// `Walker.deinit` releases allocated memory and directory handles.
-///
-/// The order of returned file system entries is undefined.
-///
-/// `self` will not be closed after walking it.
-///
-/// See also `walkSelectively`.
-pub fn walk(self: Dir, allocator: Allocator) Allocator.Error!Walker {
- return .{
- .inner = try walkSelectively(self, allocator),
- };
-}
-
-pub const OpenError = Io.Dir.OpenError;
-
-pub fn close(self: *Dir) void {
- posix.close(self.fd);
- self.* = undefined;
-}
-
-/// Deprecated in favor of `Io.Dir.openFile`.
-pub fn openFile(self: Dir, sub_path: []const u8, flags: File.OpenFlags) File.OpenError!File {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return .adaptFromNewApi(try Io.Dir.openFile(self.adaptToNewApi(), io, sub_path, flags));
-}
-
-/// Deprecated in favor of `Io.Dir.createFile`.
-pub fn createFile(self: Dir, sub_path: []const u8, flags: File.CreateFlags) File.OpenError!File {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- const new_file = try Io.Dir.createFile(self.adaptToNewApi(), io, sub_path, flags);
- return .adaptFromNewApi(new_file);
-}
-
-/// Deprecated in favor of `Io.Dir.MakeError`.
-pub const MakeError = Io.Dir.MakeError;
-
-/// Deprecated in favor of `Io.Dir.makeDir`.
-pub fn makeDir(self: Dir, sub_path: []const u8) MakeError!void {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return Io.Dir.makeDir(.{ .handle = self.fd }, io, sub_path);
-}
-
-/// Deprecated in favor of `Io.Dir.makeDir`.
-pub fn makeDirZ(self: Dir, sub_path: [*:0]const u8) MakeError!void {
- try posix.mkdiratZ(self.fd, sub_path, default_mode);
-}
-
-/// Deprecated in favor of `Io.Dir.makeDir`.
-pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) MakeError!void {
- try posix.mkdiratW(self.fd, mem.span(sub_path), default_mode);
-}
-
-/// Deprecated in favor of `Io.Dir.makePath`.
-pub fn makePath(self: Dir, sub_path: []const u8) MakePathError!void {
- _ = try self.makePathStatus(sub_path);
-}
-
-/// Deprecated in favor of `Io.Dir.MakePathStatus`.
-pub const MakePathStatus = Io.Dir.MakePathStatus;
-/// Deprecated in favor of `Io.Dir.MakePathError`.
-pub const MakePathError = Io.Dir.MakePathError;
-
-/// Deprecated in favor of `Io.Dir.makePathStatus`.
-pub fn makePathStatus(self: Dir, sub_path: []const u8) MakePathError!MakePathStatus {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return Io.Dir.makePathStatus(.{ .handle = self.fd }, io, sub_path);
-}
-
-/// Deprecated in favor of `Io.Dir.makeOpenPath`.
-pub fn makeOpenPath(dir: Dir, sub_path: []const u8, options: OpenOptions) Io.Dir.MakeOpenPathError!Dir {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return .adaptFromNewApi(try Io.Dir.makeOpenPath(dir.adaptToNewApi(), io, sub_path, options));
-}
-
-pub const RealPathError = posix.RealPathError || error{Canceled};
-
-/// This function returns the canonicalized absolute pathname of
-/// `pathname` relative to this `Dir`. If `pathname` is absolute, ignores this
-/// `Dir` handle and returns the canonicalized absolute pathname of `pathname`
-/// argument.
-/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-/// This function is not universally supported by all platforms.
-/// Currently supported hosts are: Linux, macOS, and Windows.
-/// See also `Dir.realpathZ`, `Dir.realpathW`, and `Dir.realpathAlloc`.
-pub fn realpath(self: Dir, pathname: []const u8, out_buffer: []u8) RealPathError![]u8 {
- if (native_os == .wasi) {
- @compileError("realpath is not available on WASI");
- }
- if (native_os == .windows) {
- var pathname_w = try windows.sliceToPrefixedFileW(self.fd, pathname);
-
- const wide_slice = try self.realpathW2(pathname_w.span(), &pathname_w.data);
-
- const len = std.unicode.calcWtf8Len(wide_slice);
- if (len > out_buffer.len)
- return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
- return out_buffer[0..end_index];
- }
- const pathname_c = try posix.toPosixPath(pathname);
- return self.realpathZ(&pathname_c, out_buffer);
-}
-
-/// Same as `Dir.realpath` except `pathname` is null-terminated.
-/// See also `Dir.realpath`, `realpathZ`.
-pub fn realpathZ(self: Dir, pathname: [*:0]const u8, out_buffer: []u8) RealPathError![]u8 {
- if (native_os == .windows) {
- var pathname_w = try windows.cStrToPrefixedFileW(self.fd, pathname);
-
- const wide_slice = try self.realpathW2(pathname_w.span(), &pathname_w.data);
-
- const len = std.unicode.calcWtf8Len(wide_slice);
- if (len > out_buffer.len)
- return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
- return out_buffer[0..end_index];
- }
-
- var flags: posix.O = .{};
- if (@hasField(posix.O, "NONBLOCK")) flags.NONBLOCK = true;
- if (@hasField(posix.O, "CLOEXEC")) flags.CLOEXEC = true;
- if (@hasField(posix.O, "PATH")) flags.PATH = true;
-
- const fd = posix.openatZ(self.fd, pathname, flags, 0) catch |err| switch (err) {
- error.FileLocksNotSupported => return error.Unexpected,
- error.FileBusy => return error.Unexpected,
- error.WouldBlock => return error.Unexpected,
- else => |e| return e,
- };
- defer posix.close(fd);
-
- var buffer: [fs.max_path_bytes]u8 = undefined;
- const out_path = try std.os.getFdPath(fd, &buffer);
-
- if (out_path.len > out_buffer.len) {
- return error.NameTooLong;
- }
-
- const result = out_buffer[0..out_path.len];
- @memcpy(result, out_path);
- return result;
-}
-
-/// Deprecated: use `realpathW2`.
-///
-/// Windows-only. Same as `Dir.realpath` except `pathname` is WTF16 LE encoded.
-/// The result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// See also `Dir.realpath`, `realpathW`.
-pub fn realpathW(self: Dir, pathname: []const u16, out_buffer: []u8) RealPathError![]u8 {
- var wide_buf: [std.os.windows.PATH_MAX_WIDE]u16 = undefined;
- const wide_slice = try self.realpathW2(pathname, &wide_buf);
-
- const len = std.unicode.calcWtf8Len(wide_slice);
- if (len > out_buffer.len) return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(&out_buffer, wide_slice);
- return out_buffer[0..end_index];
-}
-
-/// Windows-only. Same as `Dir.realpath` except
-/// * `pathname` and the result are WTF-16 LE encoded
-/// * `pathname` is relative or has the NT namespace prefix. See `windows.wToPrefixedFileW` for details.
-///
-/// Additionally, `pathname` will never be accessed after `out_buffer` has been written to, so it
-/// is safe to reuse a single buffer for both.
-///
-/// See also `Dir.realpath`, `realpathW`.
-pub fn realpathW2(self: Dir, pathname: []const u16, out_buffer: []u16) RealPathError![]u16 {
- const w = windows;
-
- const h_file = blk: {
- const res = w.OpenFile(pathname, .{
- .dir = self.fd,
- .access_mask = .{
- .STANDARD = .{ .SYNCHRONIZE = true },
- .GENERIC = .{ .READ = true },
- },
- .creation = .OPEN,
- .filter = .any,
- }) catch |err| switch (err) {
- error.WouldBlock => unreachable,
- else => |e| return e,
- };
- break :blk res;
- };
- defer w.CloseHandle(h_file);
-
- return w.GetFinalPathNameByHandle(h_file, .{}, out_buffer);
-}
-
-pub const RealPathAllocError = RealPathError || Allocator.Error;
-
-/// Same as `Dir.realpath` except caller must free the returned memory.
-/// See also `Dir.realpath`.
-pub fn realpathAlloc(self: Dir, allocator: Allocator, pathname: []const u8) RealPathAllocError![]u8 {
- // Use of max_path_bytes here is valid as the realpath function does not
- // have a variant that takes an arbitrary-size buffer.
- // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
- // NULL out parameter (GNU's canonicalize_file_name) to handle overelong
- // paths. musl supports passing NULL but restricts the output to PATH_MAX
- // anyway.
- var buf: [fs.max_path_bytes]u8 = undefined;
- return allocator.dupe(u8, try self.realpath(pathname, buf[0..]));
-}
-
-/// Changes the current working directory to the open directory handle.
-/// This modifies global state and can have surprising effects in multi-
-/// threaded applications. Most applications and especially libraries should
-/// not call this function as a general rule, however it can have use cases
-/// in, for example, implementing a shell, or child process execution.
-/// Not all targets support this. For example, WASI does not have the concept
-/// of a current working directory.
-pub fn setAsCwd(self: Dir) !void {
- if (native_os == .wasi) {
- @compileError("changing cwd is not currently possible in WASI");
- }
- if (native_os == .windows) {
- var dir_path_buffer: [windows.PATH_MAX_WIDE]u16 = undefined;
- const dir_path = try windows.GetFinalPathNameByHandle(self.fd, .{}, &dir_path_buffer);
- if (builtin.link_libc) {
- return posix.chdirW(dir_path);
- }
- return windows.SetCurrentDirectory(dir_path);
- }
- try posix.fchdir(self.fd);
-}
-
-/// Deprecated in favor of `Io.Dir.OpenOptions`.
-pub const OpenOptions = Io.Dir.OpenOptions;
-
-/// Deprecated in favor of `Io.Dir.openDir`.
-pub fn openDir(self: Dir, sub_path: []const u8, args: OpenOptions) OpenError!Dir {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return .adaptFromNewApi(try Io.Dir.openDir(.{ .handle = self.fd }, io, sub_path, args));
-}
-
-pub const DeleteFileError = posix.UnlinkError;
-
-/// Delete a file name and possibly the file it refers to, based on an open directory handle.
-/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `sub_path` should be encoded as valid UTF-8.
-/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-/// Asserts that the path parameter has no null bytes.
-pub fn deleteFile(self: Dir, sub_path: []const u8) DeleteFileError!void {
- if (native_os == .windows) {
- const sub_path_w = try windows.sliceToPrefixedFileW(self.fd, sub_path);
- return self.deleteFileW(sub_path_w.span());
- } else if (native_os == .wasi and !builtin.link_libc) {
- posix.unlinkat(self.fd, sub_path, 0) catch |err| switch (err) {
- error.DirNotEmpty => unreachable, // not passing AT.REMOVEDIR
- else => |e| return e,
- };
- } else {
- const sub_path_c = try posix.toPosixPath(sub_path);
- return self.deleteFileZ(&sub_path_c);
- }
-}
-
-/// Same as `deleteFile` except the parameter is null-terminated.
-pub fn deleteFileZ(self: Dir, sub_path_c: [*:0]const u8) DeleteFileError!void {
- posix.unlinkatZ(self.fd, sub_path_c, 0) catch |err| switch (err) {
- error.DirNotEmpty => unreachable, // not passing AT.REMOVEDIR
- error.AccessDenied, error.PermissionDenied => |e| switch (native_os) {
- // non-Linux POSIX systems return permission errors when trying to delete a
- // directory, so we need to handle that case specifically and translate the error
- .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd, .illumos => {
- // Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them)
- const fstat = posix.fstatatZ(self.fd, sub_path_c, posix.AT.SYMLINK_NOFOLLOW) catch return e;
- const is_dir = fstat.mode & posix.S.IFMT == posix.S.IFDIR;
- return if (is_dir) error.IsDir else e;
- },
- else => return e,
- },
- else => |e| return e,
- };
-}
-
-/// Same as `deleteFile` except the parameter is WTF-16 LE encoded.
-pub fn deleteFileW(self: Dir, sub_path_w: []const u16) DeleteFileError!void {
- posix.unlinkatW(self.fd, sub_path_w, 0) catch |err| switch (err) {
- error.DirNotEmpty => unreachable, // not passing AT.REMOVEDIR
- else => |e| return e,
- };
-}
-
-pub const DeleteDirError = error{
- DirNotEmpty,
- FileNotFound,
- AccessDenied,
- PermissionDenied,
- FileBusy,
- FileSystem,
- SymLinkLoop,
- NameTooLong,
- NotDir,
- SystemResources,
- ReadOnlyFileSystem,
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- BadPathName,
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
- ProcessNotFound,
- Unexpected,
-};
-
-/// Returns `error.DirNotEmpty` if the directory is not empty.
-/// To delete a directory recursively, see `deleteTree`.
-/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `sub_path` should be encoded as valid UTF-8.
-/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-/// Asserts that the path parameter has no null bytes.
-pub fn deleteDir(self: Dir, sub_path: []const u8) DeleteDirError!void {
- if (native_os == .windows) {
- const sub_path_w = try windows.sliceToPrefixedFileW(self.fd, sub_path);
- return self.deleteDirW(sub_path_w.span());
- } else if (native_os == .wasi and !builtin.link_libc) {
- posix.unlinkat(self.fd, sub_path, posix.AT.REMOVEDIR) catch |err| switch (err) {
- error.IsDir => unreachable, // not possible since we pass AT.REMOVEDIR
- else => |e| return e,
- };
- } else {
- const sub_path_c = try posix.toPosixPath(sub_path);
- return self.deleteDirZ(&sub_path_c);
- }
-}
-
-/// Same as `deleteDir` except the parameter is null-terminated.
-pub fn deleteDirZ(self: Dir, sub_path_c: [*:0]const u8) DeleteDirError!void {
- posix.unlinkatZ(self.fd, sub_path_c, posix.AT.REMOVEDIR) catch |err| switch (err) {
- error.IsDir => unreachable, // not possible since we pass AT.REMOVEDIR
- else => |e| return e,
- };
-}
-
-/// Same as `deleteDir` except the parameter is WTF16LE, NT prefixed.
-/// This function is Windows-only.
-pub fn deleteDirW(self: Dir, sub_path_w: []const u16) DeleteDirError!void {
- posix.unlinkatW(self.fd, sub_path_w, posix.AT.REMOVEDIR) catch |err| switch (err) {
- error.IsDir => unreachable, // not possible since we pass AT.REMOVEDIR
- else => |e| return e,
- };
-}
-
-pub const RenameError = posix.RenameError;
-
-/// Change the name or location of a file or directory.
-/// If new_sub_path already exists, it will be replaced.
-/// Renaming a file over an existing directory or a directory
-/// over an existing file will fail with `error.IsDir` or `error.NotDir`
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn rename(self: Dir, old_sub_path: []const u8, new_sub_path: []const u8) RenameError!void {
- return posix.renameat(self.fd, old_sub_path, self.fd, new_sub_path);
-}
-
-/// Same as `rename` except the parameters are null-terminated.
-pub fn renameZ(self: Dir, old_sub_path_z: [*:0]const u8, new_sub_path_z: [*:0]const u8) RenameError!void {
- return posix.renameatZ(self.fd, old_sub_path_z, self.fd, new_sub_path_z);
-}
-
-/// Same as `rename` except the parameters are WTF16LE, NT prefixed.
-/// This function is Windows-only.
-pub fn renameW(self: Dir, old_sub_path_w: []const u16, new_sub_path_w: []const u16) RenameError!void {
- return posix.renameatW(self.fd, old_sub_path_w, self.fd, new_sub_path_w, windows.TRUE);
-}
-
-/// Use with `Dir.symLink`, `Dir.atomicSymLink`, and `symLinkAbsolute` to
-/// specify whether the symlink will point to a file or a directory. This value
-/// is ignored on all hosts except Windows where creating symlinks to different
-/// resource types, requires different flags. By default, `symLinkAbsolute` is
-/// assumed to point to a file.
-pub const SymLinkFlags = struct {
- is_directory: bool = false,
-};
-
-/// Creates a symbolic link named `sym_link_path` which contains the string `target_path`.
-/// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent
-/// one; the latter case is known as a dangling link.
-/// If `sym_link_path` exists, it will not be overwritten.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn symLink(
- self: Dir,
- target_path: []const u8,
- sym_link_path: []const u8,
- flags: SymLinkFlags,
-) !void {
- if (native_os == .wasi and !builtin.link_libc) {
- return self.symLinkWasi(target_path, sym_link_path, flags);
- }
- if (native_os == .windows) {
- // Target path does not use sliceToPrefixedFileW because certain paths
- // are handled differently when creating a symlink than they would be
- // when converting to an NT namespaced path. CreateSymbolicLink in
- // symLinkW will handle the necessary conversion.
- var target_path_w: windows.PathSpace = undefined;
- target_path_w.len = try windows.wtf8ToWtf16Le(&target_path_w.data, target_path);
- target_path_w.data[target_path_w.len] = 0;
- // However, we need to canonicalize any path separators to `\`, since if
- // the target path is relative, then it must use `\` as the path separator.
- mem.replaceScalar(
- u16,
- target_path_w.data[0..target_path_w.len],
- mem.nativeToLittle(u16, '/'),
- mem.nativeToLittle(u16, '\\'),
- );
-
- const sym_link_path_w = try windows.sliceToPrefixedFileW(self.fd, sym_link_path);
- return self.symLinkW(target_path_w.span(), sym_link_path_w.span(), flags);
- }
- const target_path_c = try posix.toPosixPath(target_path);
- const sym_link_path_c = try posix.toPosixPath(sym_link_path);
- return self.symLinkZ(&target_path_c, &sym_link_path_c, flags);
-}
-
-/// WASI-only. Same as `symLink` except targeting WASI.
-pub fn symLinkWasi(
- self: Dir,
- target_path: []const u8,
- sym_link_path: []const u8,
- _: SymLinkFlags,
-) !void {
- return posix.symlinkat(target_path, self.fd, sym_link_path);
-}
-
-/// Same as `symLink`, except the pathname parameters are null-terminated.
-pub fn symLinkZ(
- self: Dir,
- target_path_c: [*:0]const u8,
- sym_link_path_c: [*:0]const u8,
- flags: SymLinkFlags,
-) !void {
- if (native_os == .windows) {
- const target_path_w = try windows.cStrToPrefixedFileW(self.fd, target_path_c);
- const sym_link_path_w = try windows.cStrToPrefixedFileW(self.fd, sym_link_path_c);
- return self.symLinkW(target_path_w.span(), sym_link_path_w.span(), flags);
- }
- return posix.symlinkatZ(target_path_c, self.fd, sym_link_path_c);
-}
-
-/// Windows-only. Same as `symLink` except the pathname parameters
-/// are WTF16 LE encoded.
-pub fn symLinkW(
- self: Dir,
- /// WTF-16, does not need to be NT-prefixed. The NT-prefixing
- /// of this path is handled by CreateSymbolicLink.
- /// Any path separators must be `\`, not `/`.
- target_path_w: [:0]const u16,
- /// WTF-16, must be NT-prefixed or relative
- sym_link_path_w: []const u16,
- flags: SymLinkFlags,
-) !void {
- return windows.CreateSymbolicLink(self.fd, sym_link_path_w, target_path_w, flags.is_directory);
-}
-
-/// Same as `symLink`, except tries to create the symbolic link until it
-/// succeeds or encounters an error other than `error.PathAlreadyExists`.
-///
-/// * On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// * On WASI, both paths should be encoded as valid UTF-8.
-/// * On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn atomicSymLink(
- dir: Dir,
- target_path: []const u8,
- sym_link_path: []const u8,
- flags: SymLinkFlags,
-) !void {
- if (dir.symLink(target_path, sym_link_path, flags)) {
- return;
- } else |err| switch (err) {
- error.PathAlreadyExists => {},
- else => |e| return e,
- }
-
- const dirname = path.dirname(sym_link_path) orelse ".";
-
- const rand_len = @sizeOf(u64) * 2;
- const temp_path_len = dirname.len + 1 + rand_len;
- var temp_path_buf: [fs.max_path_bytes]u8 = undefined;
-
- if (temp_path_len > temp_path_buf.len) return error.NameTooLong;
- @memcpy(temp_path_buf[0..dirname.len], dirname);
- temp_path_buf[dirname.len] = path.sep;
-
- const temp_path = temp_path_buf[0..temp_path_len];
-
- while (true) {
- const random_integer = std.crypto.random.int(u64);
- temp_path[dirname.len + 1 ..][0..rand_len].* = std.fmt.hex(random_integer);
-
- if (dir.symLink(target_path, temp_path, flags)) {
- return dir.rename(temp_path, sym_link_path);
- } else |err| switch (err) {
- error.PathAlreadyExists => continue,
- else => |e| return e,
- }
- }
-}
-
-pub const ReadLinkError = posix.ReadLinkError;
-
-/// Read value of a symbolic link.
-/// The return value is a slice of `buffer`, from index `0`.
-/// Asserts that the path parameter has no null bytes.
-/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `sub_path` should be encoded as valid UTF-8.
-/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-pub fn readLink(self: Dir, sub_path: []const u8, buffer: []u8) ReadLinkError![]u8 {
- if (native_os == .wasi and !builtin.link_libc) {
- return self.readLinkWasi(sub_path, buffer);
- }
- if (native_os == .windows) {
- var sub_path_w = try windows.sliceToPrefixedFileW(self.fd, sub_path);
- const result_w = try self.readLinkW(sub_path_w.span(), &sub_path_w.data);
-
- const len = std.unicode.calcWtf8Len(result_w);
- if (len > buffer.len) return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(buffer, result_w);
- return buffer[0..end_index];
- }
- const sub_path_c = try posix.toPosixPath(sub_path);
- return self.readLinkZ(&sub_path_c, buffer);
-}
-
-/// WASI-only. Same as `readLink` except targeting WASI.
-pub fn readLinkWasi(self: Dir, sub_path: []const u8, buffer: []u8) ![]u8 {
- return posix.readlinkat(self.fd, sub_path, buffer);
-}
-
-/// Same as `readLink`, except the `sub_path_c` parameter is null-terminated.
-pub fn readLinkZ(self: Dir, sub_path_c: [*:0]const u8, buffer: []u8) ![]u8 {
- if (native_os == .windows) {
- var sub_path_w = try windows.cStrToPrefixedFileW(self.fd, sub_path_c);
- const result_w = try self.readLinkW(sub_path_w.span(), &sub_path_w.data);
-
- const len = std.unicode.calcWtf8Len(result_w);
- if (len > buffer.len) return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(buffer, result_w);
- return buffer[0..end_index];
- }
- return posix.readlinkatZ(self.fd, sub_path_c, buffer);
-}
-
-/// Windows-only. Same as `readLink` except the path parameter
-/// is WTF-16 LE encoded, NT-prefixed.
-///
-/// `sub_path_w` will never be accessed after `buffer` has been written to, so it
-/// is safe to reuse a single buffer for both.
-pub fn readLinkW(self: Dir, sub_path_w: []const u16, buffer: []u16) ![]u16 {
- return windows.ReadLink(self.fd, sub_path_w, buffer);
-}
-
-/// Deprecated in favor of `Io.Dir.readFile`.
-pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return Io.Dir.readFile(.{ .handle = self.fd }, io, file_path, buffer);
-}
-
-pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{
- /// File size reached or exceeded the provided limit.
- StreamTooLong,
-};
-
-/// Reads all the bytes from the named file. On success, caller owns returned
-/// buffer.
-///
-/// If the file size is already known, a better alternative is to initialize a
-/// `File.Reader`.
-///
-/// If the file size cannot be obtained, an error is returned. If
-/// this is a realistic possibility, a better alternative is to initialize a
-/// `File.Reader` which handles this seamlessly.
-pub fn readFileAlloc(
- dir: Dir,
- /// On Windows, should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
- /// On WASI, should be encoded as valid UTF-8.
- /// On other platforms, an opaque sequence of bytes with no particular encoding.
- sub_path: []const u8,
- /// Used to allocate the result.
- gpa: Allocator,
- /// If reached or exceeded, `error.StreamTooLong` is returned instead.
- limit: Io.Limit,
-) ReadFileAllocError![]u8 {
- return readFileAllocOptions(dir, sub_path, gpa, limit, .of(u8), null);
-}
-
-/// Reads all the bytes from the named file. On success, caller owns returned
-/// buffer.
-///
-/// If the file size is already known, a better alternative is to initialize a
-/// `File.Reader`.
-///
-/// TODO move this function to Io.Dir
-pub fn readFileAllocOptions(
- dir: Dir,
- /// On Windows, should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
- /// On WASI, should be encoded as valid UTF-8.
- /// On other platforms, an opaque sequence of bytes with no particular encoding.
- sub_path: []const u8,
- /// Used to allocate the result.
- gpa: Allocator,
- /// If reached or exceeded, `error.StreamTooLong` is returned instead.
- limit: Io.Limit,
- comptime alignment: std.mem.Alignment,
- comptime sentinel: ?u8,
-) ReadFileAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
-
- var file = try dir.openFile(sub_path, .{});
- defer file.close();
- var file_reader = file.reader(io, &.{});
- return file_reader.interface.allocRemainingAlignedSentinel(gpa, limit, alignment, sentinel) catch |err| switch (err) {
- error.ReadFailed => return file_reader.err.?,
- error.OutOfMemory, error.StreamTooLong => |e| return e,
- };
-}
-
-pub const DeleteTreeError = error{
- AccessDenied,
- PermissionDenied,
- FileTooBig,
- SymLinkLoop,
- ProcessFdQuotaExceeded,
- NameTooLong,
- SystemFdQuotaExceeded,
- NoDevice,
- SystemResources,
- ReadOnlyFileSystem,
- FileSystem,
- FileBusy,
- DeviceBusy,
- ProcessNotFound,
- /// One of the path components was not a directory.
- /// This error is unreachable if `sub_path` does not contain a path separator.
- NotDir,
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- /// On Windows, file paths cannot contain these characters:
- /// '/', '*', '?', '"', '<', '>', '|'
- BadPathName,
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
-
- Canceled,
-} || posix.UnexpectedError;
-
-/// Whether `sub_path` describes a symlink, file, or directory, this function
-/// removes it. If it cannot be removed because it is a non-empty directory,
-/// this function recursively removes its entries and then tries again.
-/// This operation is not atomic on most file systems.
-/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `sub_path` should be encoded as valid UTF-8.
-/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-pub fn deleteTree(self: Dir, sub_path: []const u8) DeleteTreeError!void {
- var initial_iterable_dir = (try self.deleteTreeOpenInitialSubpath(sub_path, .file)) orelse return;
-
- const StackItem = struct {
- name: []const u8,
- parent_dir: Dir,
- iter: Dir.Iterator,
-
- fn closeAll(items: []@This()) void {
- for (items) |*item| item.iter.dir.close();
- }
- };
-
- var stack_buffer: [16]StackItem = undefined;
- var stack = std.ArrayList(StackItem).initBuffer(&stack_buffer);
- defer StackItem.closeAll(stack.items);
-
- stack.appendAssumeCapacity(.{
- .name = sub_path,
- .parent_dir = self,
- .iter = initial_iterable_dir.iterateAssumeFirstIteration(),
- });
-
- process_stack: while (stack.items.len != 0) {
- var top = &stack.items[stack.items.len - 1];
- while (try top.iter.next()) |entry| {
- var treat_as_dir = entry.kind == .directory;
- handle_entry: while (true) {
- if (treat_as_dir) {
- if (stack.unusedCapacitySlice().len >= 1) {
- var iterable_dir = top.iter.dir.openDir(entry.name, .{
- .follow_symlinks = false,
- .iterate = true,
- }) catch |err| switch (err) {
- error.NotDir => {
- treat_as_dir = false;
- continue :handle_entry;
- },
- error.FileNotFound => {
- // That's fine, we were trying to remove this directory anyway.
- break :handle_entry;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.ProcessFdQuotaExceeded,
- error.NameTooLong,
- error.SystemFdQuotaExceeded,
- error.NoDevice,
- error.SystemResources,
- error.Unexpected,
- error.BadPathName,
- error.NetworkNotFound,
- error.DeviceBusy,
- error.Canceled,
- => |e| return e,
- };
- stack.appendAssumeCapacity(.{
- .name = entry.name,
- .parent_dir = top.iter.dir,
- .iter = iterable_dir.iterateAssumeFirstIteration(),
- });
- continue :process_stack;
- } else {
- try top.iter.dir.deleteTreeMinStackSizeWithKindHint(entry.name, entry.kind);
- break :handle_entry;
- }
- } else {
- if (top.iter.dir.deleteFile(entry.name)) {
- break :handle_entry;
- } else |err| switch (err) {
- error.FileNotFound => break :handle_entry,
-
- // Impossible because we do not pass any path separators.
- error.NotDir => unreachable,
-
- error.IsDir => {
- treat_as_dir = true;
- continue :handle_entry;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.NameTooLong,
- error.SystemResources,
- error.ReadOnlyFileSystem,
- error.FileSystem,
- error.FileBusy,
- error.BadPathName,
- error.NetworkNotFound,
- error.Unexpected,
- => |e| return e,
- }
- }
- }
- }
-
- // On Windows, we can't delete until the dir's handle has been closed, so
- // close it before we try to delete.
- top.iter.dir.close();
-
- // In order to avoid double-closing the directory when cleaning up
- // the stack in the case of an error, we save the relevant portions and
- // pop the value from the stack.
- const parent_dir = top.parent_dir;
- const name = top.name;
- stack.items.len -= 1;
-
- var need_to_retry: bool = false;
- parent_dir.deleteDir(name) catch |err| switch (err) {
- error.FileNotFound => {},
- error.DirNotEmpty => need_to_retry = true,
- else => |e| return e,
- };
-
- if (need_to_retry) {
- // Since we closed the handle that the previous iterator used, we
- // need to re-open the dir and re-create the iterator.
- var iterable_dir = iterable_dir: {
- var treat_as_dir = true;
- handle_entry: while (true) {
- if (treat_as_dir) {
- break :iterable_dir parent_dir.openDir(name, .{
- .follow_symlinks = false,
- .iterate = true,
- }) catch |err| switch (err) {
- error.NotDir => {
- treat_as_dir = false;
- continue :handle_entry;
- },
- error.FileNotFound => {
- // That's fine, we were trying to remove this directory anyway.
- continue :process_stack;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.ProcessFdQuotaExceeded,
- error.NameTooLong,
- error.SystemFdQuotaExceeded,
- error.NoDevice,
- error.SystemResources,
- error.Unexpected,
- error.BadPathName,
- error.NetworkNotFound,
- error.DeviceBusy,
- error.Canceled,
- => |e| return e,
- };
- } else {
- if (parent_dir.deleteFile(name)) {
- continue :process_stack;
- } else |err| switch (err) {
- error.FileNotFound => continue :process_stack,
-
- // Impossible because we do not pass any path separators.
- error.NotDir => unreachable,
-
- error.IsDir => {
- treat_as_dir = true;
- continue :handle_entry;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.NameTooLong,
- error.SystemResources,
- error.ReadOnlyFileSystem,
- error.FileSystem,
- error.FileBusy,
- error.BadPathName,
- error.NetworkNotFound,
- error.Unexpected,
- => |e| return e,
- }
- }
- }
- };
- // We know there is room on the stack since we are just re-adding
- // the StackItem that we previously popped.
- stack.appendAssumeCapacity(.{
- .name = name,
- .parent_dir = parent_dir,
- .iter = iterable_dir.iterateAssumeFirstIteration(),
- });
- continue :process_stack;
- }
- }
-}
-
-/// Like `deleteTree`, but only keeps one `Iterator` active at a time to minimize the function's stack size.
-/// This is slower than `deleteTree` but uses less stack space.
-/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `sub_path` should be encoded as valid UTF-8.
-/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-pub fn deleteTreeMinStackSize(self: Dir, sub_path: []const u8) DeleteTreeError!void {
- return self.deleteTreeMinStackSizeWithKindHint(sub_path, .file);
-}
-
-fn deleteTreeMinStackSizeWithKindHint(self: Dir, sub_path: []const u8, kind_hint: File.Kind) DeleteTreeError!void {
- start_over: while (true) {
- var dir = (try self.deleteTreeOpenInitialSubpath(sub_path, kind_hint)) orelse return;
- var cleanup_dir_parent: ?Dir = null;
- defer if (cleanup_dir_parent) |*d| d.close();
-
- var cleanup_dir = true;
- defer if (cleanup_dir) dir.close();
-
- // Valid use of max_path_bytes because dir_name_buf will only
- // ever store a single path component that was returned from the
- // filesystem.
- var dir_name_buf: [fs.max_path_bytes]u8 = undefined;
- var dir_name: []const u8 = sub_path;
-
- // Here we must avoid recursion, in order to provide O(1) memory guarantee of this function.
- // Go through each entry and if it is not a directory, delete it. If it is a directory,
- // open it, and close the original directory. Repeat. Then start the entire operation over.
-
- scan_dir: while (true) {
- var dir_it = dir.iterateAssumeFirstIteration();
- dir_it: while (try dir_it.next()) |entry| {
- var treat_as_dir = entry.kind == .directory;
- handle_entry: while (true) {
- if (treat_as_dir) {
- const new_dir = dir.openDir(entry.name, .{
- .follow_symlinks = false,
- .iterate = true,
- }) catch |err| switch (err) {
- error.NotDir => {
- treat_as_dir = false;
- continue :handle_entry;
- },
- error.FileNotFound => {
- // That's fine, we were trying to remove this directory anyway.
- continue :dir_it;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.ProcessFdQuotaExceeded,
- error.NameTooLong,
- error.SystemFdQuotaExceeded,
- error.NoDevice,
- error.SystemResources,
- error.Unexpected,
- error.BadPathName,
- error.NetworkNotFound,
- error.DeviceBusy,
- error.Canceled,
- => |e| return e,
- };
- if (cleanup_dir_parent) |*d| d.close();
- cleanup_dir_parent = dir;
- dir = new_dir;
- const result = dir_name_buf[0..entry.name.len];
- @memcpy(result, entry.name);
- dir_name = result;
- continue :scan_dir;
- } else {
- if (dir.deleteFile(entry.name)) {
- continue :dir_it;
- } else |err| switch (err) {
- error.FileNotFound => continue :dir_it,
-
- // Impossible because we do not pass any path separators.
- error.NotDir => unreachable,
-
- error.IsDir => {
- treat_as_dir = true;
- continue :handle_entry;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.NameTooLong,
- error.SystemResources,
- error.ReadOnlyFileSystem,
- error.FileSystem,
- error.FileBusy,
- error.BadPathName,
- error.NetworkNotFound,
- error.Unexpected,
- => |e| return e,
- }
- }
- }
- }
- // Reached the end of the directory entries, which means we successfully deleted all of them.
- // Now to remove the directory itself.
- dir.close();
- cleanup_dir = false;
-
- if (cleanup_dir_parent) |d| {
- d.deleteDir(dir_name) catch |err| switch (err) {
- // These two things can happen due to file system race conditions.
- error.FileNotFound, error.DirNotEmpty => continue :start_over,
- else => |e| return e,
- };
- continue :start_over;
- } else {
- self.deleteDir(sub_path) catch |err| switch (err) {
- error.FileNotFound => return,
- error.DirNotEmpty => continue :start_over,
- else => |e| return e,
- };
- return;
- }
- }
- }
-}
-
-/// On successful delete, returns null.
-fn deleteTreeOpenInitialSubpath(self: Dir, sub_path: []const u8, kind_hint: File.Kind) !?Dir {
- return iterable_dir: {
- // Treat as a file by default
- var treat_as_dir = kind_hint == .directory;
-
- handle_entry: while (true) {
- if (treat_as_dir) {
- break :iterable_dir self.openDir(sub_path, .{
- .follow_symlinks = false,
- .iterate = true,
- }) catch |err| switch (err) {
- error.NotDir => {
- treat_as_dir = false;
- continue :handle_entry;
- },
- error.FileNotFound => {
- // That's fine, we were trying to remove this directory anyway.
- return null;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.ProcessFdQuotaExceeded,
- error.NameTooLong,
- error.SystemFdQuotaExceeded,
- error.NoDevice,
- error.SystemResources,
- error.Unexpected,
- error.BadPathName,
- error.DeviceBusy,
- error.NetworkNotFound,
- error.Canceled,
- => |e| return e,
- };
- } else {
- if (self.deleteFile(sub_path)) {
- return null;
- } else |err| switch (err) {
- error.FileNotFound => return null,
-
- error.IsDir => {
- treat_as_dir = true;
- continue :handle_entry;
- },
-
- error.AccessDenied,
- error.PermissionDenied,
- error.SymLinkLoop,
- error.NameTooLong,
- error.SystemResources,
- error.ReadOnlyFileSystem,
- error.NotDir,
- error.FileSystem,
- error.FileBusy,
- error.BadPathName,
- error.NetworkNotFound,
- error.Unexpected,
- => |e| return e,
- }
- }
- }
- };
-}
-
-pub const WriteFileError = File.WriteError || File.OpenError;
-
-pub const WriteFileOptions = struct {
- /// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
- /// On WASI, `sub_path` should be encoded as valid UTF-8.
- /// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
- sub_path: []const u8,
- data: []const u8,
- flags: File.CreateFlags = .{},
-};
-
-/// Writes content to the file system, using the file creation flags provided.
-pub fn writeFile(self: Dir, options: WriteFileOptions) WriteFileError!void {
- var file = try self.createFile(options.sub_path, options.flags);
- defer file.close();
- try file.writeAll(options.data);
-}
-
-/// Deprecated in favor of `Io.Dir.AccessError`.
-pub const AccessError = Io.Dir.AccessError;
-
-/// Deprecated in favor of `Io.Dir.access`.
-pub fn access(self: Dir, sub_path: []const u8, options: Io.Dir.AccessOptions) AccessError!void {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return Io.Dir.access(self.adaptToNewApi(), io, sub_path, options);
-}
-
-pub const CopyFileOptions = struct {
- /// When this is `null` the mode is copied from the source file.
- override_mode: ?File.Mode = null,
-};
-
-pub const CopyFileError = File.OpenError || File.StatError ||
- AtomicFile.InitError || AtomicFile.FinishError ||
- File.ReadError || File.WriteError || error{InvalidFileName};
-
-/// Atomically creates a new file at `dest_path` within `dest_dir` with the
-/// same contents as `source_path` within `source_dir`, overwriting any already
-/// existing file.
-///
-/// On Linux, until https://patchwork.kernel.org/patch/9636735/ is merged and
-/// readily available, there is a possibility of power loss or application
-/// termination leaving temporary files present in the same directory as
-/// dest_path.
-///
-/// On Windows, both paths should be encoded as
-/// [WTF-8](https://wtf-8.codeberg.page/). On WASI, both paths should be
-/// encoded as valid UTF-8. On other platforms, both paths are an opaque
-/// sequence of bytes with no particular encoding.
-///
-/// TODO move this function to Io.Dir
-pub fn copyFile(
- source_dir: Dir,
- source_path: []const u8,
- dest_dir: Dir,
- dest_path: []const u8,
- options: CopyFileOptions,
-) CopyFileError!void {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
-
- const file = try source_dir.openFile(source_path, .{});
- var file_reader: File.Reader = .init(.{ .handle = file.handle }, io, &.{});
- defer file_reader.file.close(io);
-
- const mode = options.override_mode orelse blk: {
- const st = try file_reader.file.stat(io);
- file_reader.size = st.size;
- break :blk st.mode;
- };
-
- var buffer: [1024]u8 = undefined; // Used only when direct fd-to-fd is not available.
- var atomic_file = try dest_dir.atomicFile(dest_path, .{
- .mode = mode,
- .write_buffer = &buffer,
- });
- defer atomic_file.deinit();
-
- _ = atomic_file.file_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
- error.ReadFailed => return file_reader.err.?,
- error.WriteFailed => return atomic_file.file_writer.err.?,
- };
-
- try atomic_file.finish();
-}
-
-pub const AtomicFileOptions = struct {
- mode: File.Mode = File.default_mode,
- make_path: bool = false,
- write_buffer: []u8,
-};
-
-/// Directly access the `.file` field, and then call `AtomicFile.finish` to
-/// atomically replace `dest_path` with contents.
-/// Always call `AtomicFile.deinit` to clean up, regardless of whether
-/// `AtomicFile.finish` succeeded. `dest_path` must remain valid until
-/// `AtomicFile.deinit` is called.
-/// On Windows, `dest_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `dest_path` should be encoded as valid UTF-8.
-/// On other platforms, `dest_path` is an opaque sequence of bytes with no particular encoding.
-pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions) !AtomicFile {
- if (fs.path.dirname(dest_path)) |dirname| {
- const dir = if (options.make_path)
- try self.makeOpenPath(dirname, .{})
- else
- try self.openDir(dirname, .{});
-
- return .init(fs.path.basename(dest_path), options.mode, dir, true, options.write_buffer);
- } else {
- return .init(dest_path, options.mode, self, false, options.write_buffer);
- }
-}
-
-pub const Stat = File.Stat;
-pub const StatError = File.StatError;
-
-/// Deprecated in favor of `Io.Dir.stat`.
-pub fn stat(self: Dir) StatError!Stat {
- const file: File = .{ .handle = self.fd };
- return file.stat();
-}
-
-pub const StatFileError = File.OpenError || File.StatError || posix.FStatAtError;
-
-/// Deprecated in favor of `Io.Dir.statPath`.
-pub fn statFile(self: Dir, sub_path: []const u8) StatFileError!Stat {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return Io.Dir.statPath(.{ .handle = self.fd }, io, sub_path, .{});
-}
-
-pub const ChmodError = File.ChmodError;
-
-/// Changes the mode of the directory.
-/// The process must have the correct privileges in order to do this
-/// successfully, or must have the effective user ID matching the owner
-/// of the directory. Additionally, the directory must have been opened
-/// with `OpenOptions{ .iterate = true }`.
-pub fn chmod(self: Dir, new_mode: File.Mode) ChmodError!void {
- const file: File = .{ .handle = self.fd };
- try file.chmod(new_mode);
-}
-
-/// Changes the owner and group of the directory.
-/// The process must have the correct privileges in order to do this
-/// successfully. The group may be changed by the owner of the directory to
-/// any group of which the owner is a member. Additionally, the directory
-/// must have been opened with `OpenOptions{ .iterate = true }`. If the
-/// owner or group is specified as `null`, the ID is not changed.
-pub fn chown(self: Dir, owner: ?File.Uid, group: ?File.Gid) ChownError!void {
- const file: File = .{ .handle = self.fd };
- try file.chown(owner, group);
-}
-
-pub const ChownError = File.ChownError;
-
-const Permissions = File.Permissions;
-pub const SetPermissionsError = File.SetPermissionsError;
-
-/// Sets permissions according to the provided `Permissions` struct.
-/// This method is *NOT* available on WASI
-pub fn setPermissions(self: Dir, permissions: Permissions) SetPermissionsError!void {
- const file: File = .{ .handle = self.fd };
- try file.setPermissions(permissions);
-}
-
-pub fn adaptToNewApi(dir: Dir) Io.Dir {
- return .{ .handle = dir.fd };
-}
-
-pub fn adaptFromNewApi(dir: Io.Dir) Dir {
- return .{ .fd = dir.handle };
-}
diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig
deleted file mode 100644
index 5e54ba5f7a..0000000000
--- a/lib/std/fs/File.zig
+++ /dev/null
@@ -1,1437 +0,0 @@
-const File = @This();
-
-const builtin = @import("builtin");
-const native_os = builtin.os.tag;
-const is_windows = native_os == .windows;
-
-const std = @import("../std.zig");
-const Io = std.Io;
-const Os = std.builtin.Os;
-const Allocator = std.mem.Allocator;
-const posix = std.posix;
-const math = std.math;
-const assert = std.debug.assert;
-const linux = std.os.linux;
-const windows = std.os.windows;
-const maxInt = std.math.maxInt;
-const Alignment = std.mem.Alignment;
-
-/// The OS-specific file descriptor or file handle.
-handle: Handle,
-
-pub const Handle = Io.File.Handle;
-pub const Mode = Io.File.Mode;
-pub const INode = Io.File.INode;
-pub const Uid = posix.uid_t;
-pub const Gid = posix.gid_t;
-pub const Kind = Io.File.Kind;
-
-/// This is the default mode given to POSIX operating systems for creating
-/// files. `0o666` is "-rw-rw-rw-" which is counter-intuitive at first,
-/// since most people would expect "-rw-r--r--", for example, when using
-/// the `touch` command, which would correspond to `0o644`. However, POSIX
-/// libc implementations use `0o666` inside `fopen` and then rely on the
-/// process-scoped "umask" setting to adjust this number for file creation.
-pub const default_mode: Mode = if (Mode == u0) 0 else 0o666;
-
-/// Deprecated in favor of `Io.File.OpenError`.
-pub const OpenError = Io.File.OpenError || error{WouldBlock};
-/// Deprecated in favor of `Io.File.OpenMode`.
-pub const OpenMode = Io.File.OpenMode;
-/// Deprecated in favor of `Io.File.Lock`.
-pub const Lock = Io.File.Lock;
-/// Deprecated in favor of `Io.File.OpenFlags`.
-pub const OpenFlags = Io.File.OpenFlags;
-
-pub const CreateFlags = struct {
- /// Whether the file will be created with read access.
- read: bool = false,
-
- /// If the file already exists, and is a regular file, and the access
- /// mode allows writing, it will be truncated to length 0.
- truncate: bool = true,
-
- /// Ensures that this open call creates the file, otherwise causes
- /// `error.PathAlreadyExists` to be returned.
- exclusive: bool = false,
-
- /// Open the file with an advisory lock to coordinate with other processes
- /// accessing it at the same time. An exclusive lock will prevent other
- /// processes from acquiring a lock. A shared lock will prevent other
- /// processes from acquiring a exclusive lock, but does not prevent
- /// other process from getting their own shared locks.
- ///
- /// The lock is advisory, except on Linux in very specific circumstances[1].
- /// This means that a process that does not respect the locking API can still get access
- /// to the file, despite the lock.
- ///
- /// On these operating systems, the lock is acquired atomically with
- /// opening the file:
- /// * Darwin
- /// * DragonFlyBSD
- /// * FreeBSD
- /// * Haiku
- /// * NetBSD
- /// * OpenBSD
- /// On these operating systems, the lock is acquired via a separate syscall
- /// after opening the file:
- /// * Linux
- /// * Windows
- ///
- /// [1]: https://www.kernel.org/doc/Documentation/filesystems/mandatory-locking.txt
- lock: Lock = .none,
-
- /// Sets whether or not to wait until the file is locked to return. If set to true,
- /// `error.WouldBlock` will be returned. Otherwise, the file will wait until the file
- /// is available to proceed.
- lock_nonblocking: bool = false,
-
- /// For POSIX systems this is the file system mode the file will
- /// be created with. On other systems this is always 0.
- mode: Mode = default_mode,
-};
-
-pub fn stdout() File {
- return .{ .handle = if (is_windows) windows.peb().ProcessParameters.hStdOutput else posix.STDOUT_FILENO };
-}
-
-pub fn stderr() File {
- return .{ .handle = if (is_windows) windows.peb().ProcessParameters.hStdError else posix.STDERR_FILENO };
-}
-
-pub fn stdin() File {
- return .{ .handle = if (is_windows) windows.peb().ProcessParameters.hStdInput else posix.STDIN_FILENO };
-}
-
-/// Upon success, the stream is in an uninitialized state. To continue using it,
-/// you must use the open() function.
-pub fn close(self: File) void {
- if (is_windows) {
- windows.CloseHandle(self.handle);
- } else {
- posix.close(self.handle);
- }
-}
-
-pub const SyncError = posix.SyncError;
-
-/// Blocks until all pending file contents and metadata modifications
-/// for the file have been synchronized with the underlying filesystem.
-///
-/// Note that this does not ensure that metadata for the
-/// directory containing the file has also reached disk.
-pub fn sync(self: File) SyncError!void {
- return posix.fsync(self.handle);
-}
-
-/// Test whether the file refers to a terminal.
-/// See also `getOrEnableAnsiEscapeSupport` and `supportsAnsiEscapeCodes`.
-pub fn isTty(self: File) bool {
- return posix.isatty(self.handle);
-}
-
-pub fn isCygwinPty(file: File) bool {
- if (builtin.os.tag != .windows) return false;
-
- const handle = file.handle;
-
- // If this is a MSYS2/cygwin pty, then it will be a named pipe with a name in one of these formats:
- // msys-[...]-ptyN-[...]
- // cygwin-[...]-ptyN-[...]
- //
- // Example: msys-1888ae32e00d56aa-pty0-to-master
-
- // First, just check that the handle is a named pipe.
- // This allows us to avoid the more costly NtQueryInformationFile call
- // for handles that aren't named pipes.
- {
- var io_status: windows.IO_STATUS_BLOCK = undefined;
- var device_info: windows.FILE.FS_DEVICE_INFORMATION = undefined;
- const rc = windows.ntdll.NtQueryVolumeInformationFile(handle, &io_status, &device_info, @sizeOf(windows.FILE.FS_DEVICE_INFORMATION), .Device);
- switch (rc) {
- .SUCCESS => {},
- else => return false,
- }
- if (device_info.DeviceType.FileDevice != .NAMED_PIPE) return false;
- }
-
- const name_bytes_offset = @offsetOf(windows.FILE_NAME_INFO, "FileName");
- // `NAME_MAX` UTF-16 code units (2 bytes each)
- // This buffer may not be long enough to handle *all* possible paths
- // (PATH_MAX_WIDE would be necessary for that), but because we only care
- // about certain paths and we know they must be within a reasonable length,
- // we can use this smaller buffer and just return false on any error from
- // NtQueryInformationFile.
- const num_name_bytes = windows.MAX_PATH * 2;
- var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (name_bytes_offset + num_name_bytes);
-
- var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @intCast(name_info_bytes.len), .Name);
- switch (rc) {
- .SUCCESS => {},
- .INVALID_PARAMETER => unreachable,
- else => return false,
- }
-
- const name_info: *const windows.FILE_NAME_INFO = @ptrCast(&name_info_bytes);
- const name_bytes = name_info_bytes[name_bytes_offset .. name_bytes_offset + name_info.FileNameLength];
- const name_wide = std.mem.bytesAsSlice(u16, name_bytes);
- // The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master
- return (std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'm', 's', 'y', 's', '-' }) or
- std.mem.startsWith(u16, name_wide, &[_]u16{ '\\', 'c', 'y', 'g', 'w', 'i', 'n', '-' })) and
- std.mem.find(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null;
-}
-
-/// Returns whether or not ANSI escape codes will be treated as such,
-/// and attempts to enable support for ANSI escape codes if necessary
-/// (on Windows).
-///
-/// Returns `true` if ANSI escape codes are supported or support was
-/// successfully enabled. Returns false if ANSI escape codes are not
-/// supported or support was unable to be enabled.
-///
-/// See also `supportsAnsiEscapeCodes`.
-pub fn getOrEnableAnsiEscapeSupport(self: File) bool {
- if (builtin.os.tag == .windows) {
- var original_console_mode: windows.DWORD = 0;
-
- // For Windows Terminal, VT Sequences processing is enabled by default.
- if (windows.kernel32.GetConsoleMode(self.handle, &original_console_mode) != 0) {
- if (original_console_mode & windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING != 0) return true;
-
- // For Windows Console, VT Sequences processing support was added in Windows 10 build 14361, but disabled by default.
- // https://devblogs.microsoft.com/commandline/tmux-support-arrives-for-bash-on-ubuntu-on-windows/
- //
- // Note: In Microsoft's example for enabling virtual terminal processing, it
- // shows attempting to enable `DISABLE_NEWLINE_AUTO_RETURN` as well:
- // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#example-of-enabling-virtual-terminal-processing
- // This is avoided because in the old Windows Console, that flag causes \n (as opposed to \r\n)
- // to behave unexpectedly (the cursor moves down 1 row but remains on the same column).
- // Additionally, the default console mode in Windows Terminal does not have
- // `DISABLE_NEWLINE_AUTO_RETURN` set, so by only enabling `ENABLE_VIRTUAL_TERMINAL_PROCESSING`
- // we end up matching the mode of Windows Terminal.
- const requested_console_modes = windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING;
- const console_mode = original_console_mode | requested_console_modes;
- if (windows.kernel32.SetConsoleMode(self.handle, console_mode) != 0) return true;
- }
-
- return self.isCygwinPty();
- }
- return self.supportsAnsiEscapeCodes();
-}
-
-/// Test whether ANSI escape codes will be treated as such without
-/// attempting to enable support for ANSI escape codes.
-///
-/// See also `getOrEnableAnsiEscapeSupport`.
-pub fn supportsAnsiEscapeCodes(self: File) bool {
- if (builtin.os.tag == .windows) {
- var console_mode: windows.DWORD = 0;
- if (windows.kernel32.GetConsoleMode(self.handle, &console_mode) != 0) {
- if (console_mode & windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING != 0) return true;
- }
-
- return self.isCygwinPty();
- }
- if (builtin.os.tag == .wasi) {
- // WASI sanitizes stdout when fd is a tty so ANSI escape codes
- // will not be interpreted as actual cursor commands, and
- // stderr is always sanitized.
- return false;
- }
- if (self.isTty()) {
- if (self.handle == posix.STDOUT_FILENO or self.handle == posix.STDERR_FILENO) {
- if (posix.getenvZ("TERM")) |term| {
- if (std.mem.eql(u8, term, "dumb"))
- return false;
- }
- }
- return true;
- }
- return false;
-}
-
-pub const SetEndPosError = posix.TruncateError;
-
-/// Shrinks or expands the file.
-/// The file offset after this call is left unchanged.
-pub fn setEndPos(self: File, length: u64) SetEndPosError!void {
- try posix.ftruncate(self.handle, length);
-}
-
-pub const SeekError = posix.SeekError;
-
-/// Repositions read/write file offset relative to the current offset.
-/// TODO: integrate with async I/O
-pub fn seekBy(self: File, offset: i64) SeekError!void {
- return posix.lseek_CUR(self.handle, offset);
-}
-
-/// Repositions read/write file offset relative to the end.
-/// TODO: integrate with async I/O
-pub fn seekFromEnd(self: File, offset: i64) SeekError!void {
- return posix.lseek_END(self.handle, offset);
-}
-
-/// Repositions read/write file offset relative to the beginning.
-/// TODO: integrate with async I/O
-pub fn seekTo(self: File, offset: u64) SeekError!void {
- return posix.lseek_SET(self.handle, offset);
-}
-
-pub const GetSeekPosError = posix.SeekError || StatError;
-
-/// TODO: integrate with async I/O
-pub fn getPos(self: File) GetSeekPosError!u64 {
- return posix.lseek_CUR_get(self.handle);
-}
-
-pub const GetEndPosError = std.os.windows.GetFileSizeError || StatError;
-
-/// TODO: integrate with async I/O
-pub fn getEndPos(self: File) GetEndPosError!u64 {
- if (builtin.os.tag == .windows) {
- return windows.GetFileSizeEx(self.handle);
- }
- return (try self.stat()).size;
-}
-
-pub const ModeError = StatError;
-
-/// TODO: integrate with async I/O
-pub fn mode(self: File) ModeError!Mode {
- if (builtin.os.tag == .windows) {
- return 0;
- }
- return (try self.stat()).mode;
-}
-
-pub const Stat = Io.File.Stat;
-
-pub const StatError = posix.FStatError;
-
-/// Returns `Stat` containing basic information about the `File`.
-pub fn stat(self: File) StatError!Stat {
- var threaded: Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
- return Io.File.stat(.{ .handle = self.handle }, io);
-}
-
-pub const ChmodError = posix.FChmodError;
-
-/// Changes the mode of the file.
-/// The process must have the correct privileges in order to do this
-/// successfully, or must have the effective user ID matching the owner
-/// of the file.
-pub fn chmod(self: File, new_mode: Mode) ChmodError!void {
- try posix.fchmod(self.handle, new_mode);
-}
-
-pub const ChownError = posix.FChownError;
-
-/// Changes the owner and group of the file.
-/// The process must have the correct privileges in order to do this
-/// successfully. The group may be changed by the owner of the file to
-/// any group of which the owner is a member. If the owner or group is
-/// specified as `null`, the ID is not changed.
-pub fn chown(self: File, owner: ?Uid, group: ?Gid) ChownError!void {
- try posix.fchown(self.handle, owner, group);
-}
-
-/// Cross-platform representation of permissions on a file.
-/// The `readonly` and `setReadonly` are the only methods available across all platforms.
-/// Platform-specific functionality is available through the `inner` field.
-pub const Permissions = struct {
- /// You may use the `inner` field to use platform-specific functionality
- inner: switch (builtin.os.tag) {
- .windows => PermissionsWindows,
- else => PermissionsUnix,
- },
-
- const Self = @This();
-
- /// Returns `true` if permissions represent an unwritable file.
- /// On Unix, `true` is returned only if no class has write permissions.
- pub fn readOnly(self: Self) bool {
- return self.inner.readOnly();
- }
-
- /// Sets whether write permissions are provided.
- /// On Unix, this affects *all* classes. If this is undesired, use `unixSet`.
- /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
- pub fn setReadOnly(self: *Self, read_only: bool) void {
- self.inner.setReadOnly(read_only);
- }
-};
-
-pub const PermissionsWindows = struct {
- attributes: windows.DWORD,
-
- const Self = @This();
-
- /// Returns `true` if permissions represent an unwritable file.
- pub fn readOnly(self: Self) bool {
- return self.attributes & windows.FILE_ATTRIBUTE_READONLY != 0;
- }
-
- /// Sets whether write permissions are provided.
- /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
- pub fn setReadOnly(self: *Self, read_only: bool) void {
- if (read_only) {
- self.attributes |= windows.FILE_ATTRIBUTE_READONLY;
- } else {
- self.attributes &= ~@as(windows.DWORD, windows.FILE_ATTRIBUTE_READONLY);
- }
- }
-};
-
-pub const PermissionsUnix = struct {
- mode: Mode,
-
- const Self = @This();
-
- /// Returns `true` if permissions represent an unwritable file.
- /// `true` is returned only if no class has write permissions.
- pub fn readOnly(self: Self) bool {
- return self.mode & 0o222 == 0;
- }
-
- /// Sets whether write permissions are provided.
- /// This affects *all* classes. If this is undesired, use `unixSet`.
- /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
- pub fn setReadOnly(self: *Self, read_only: bool) void {
- if (read_only) {
- self.mode &= ~@as(Mode, 0o222);
- } else {
- self.mode |= @as(Mode, 0o222);
- }
- }
-
- pub const Class = enum(u2) {
- user = 2,
- group = 1,
- other = 0,
- };
-
- pub const Permission = enum(u3) {
- read = 0o4,
- write = 0o2,
- execute = 0o1,
- };
-
- /// Returns `true` if the chosen class has the selected permission.
- /// This method is only available on Unix platforms.
- pub fn unixHas(self: Self, class: Class, permission: Permission) bool {
- const mask = @as(Mode, @intFromEnum(permission)) << @as(u3, @intFromEnum(class)) * 3;
- return self.mode & mask != 0;
- }
-
- /// Sets the permissions for the chosen class. Any permissions set to `null` are left unchanged.
- /// This method *DOES NOT* set permissions on the filesystem: use `File.setPermissions(permissions)`
- pub fn unixSet(self: *Self, class: Class, permissions: struct {
- read: ?bool = null,
- write: ?bool = null,
- execute: ?bool = null,
- }) void {
- const shift = @as(u3, @intFromEnum(class)) * 3;
- if (permissions.read) |r| {
- if (r) {
- self.mode |= @as(Mode, 0o4) << shift;
- } else {
- self.mode &= ~(@as(Mode, 0o4) << shift);
- }
- }
- if (permissions.write) |w| {
- if (w) {
- self.mode |= @as(Mode, 0o2) << shift;
- } else {
- self.mode &= ~(@as(Mode, 0o2) << shift);
- }
- }
- if (permissions.execute) |x| {
- if (x) {
- self.mode |= @as(Mode, 0o1) << shift;
- } else {
- self.mode &= ~(@as(Mode, 0o1) << shift);
- }
- }
- }
-
- /// Returns a `Permissions` struct representing the permissions from the passed mode.
- pub fn unixNew(new_mode: Mode) Self {
- return Self{
- .mode = new_mode,
- };
- }
-};
-
-pub const SetPermissionsError = ChmodError;
-
-/// Sets permissions according to the provided `Permissions` struct.
-/// This method is *NOT* available on WASI
-pub fn setPermissions(self: File, permissions: Permissions) SetPermissionsError!void {
- switch (builtin.os.tag) {
- .windows => {
- var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- var info = windows.FILE_BASIC_INFORMATION{
- .CreationTime = 0,
- .LastAccessTime = 0,
- .LastWriteTime = 0,
- .ChangeTime = 0,
- .FileAttributes = permissions.inner.attributes,
- };
- const rc = windows.ntdll.NtSetInformationFile(
- self.handle,
- &io_status_block,
- &info,
- @sizeOf(windows.FILE_BASIC_INFORMATION),
- .Basic,
- );
- switch (rc) {
- .SUCCESS => return,
- .INVALID_HANDLE => unreachable,
- .ACCESS_DENIED => return error.AccessDenied,
- else => return windows.unexpectedStatus(rc),
- }
- },
- .wasi => @compileError("Unsupported OS"), // Wasi filesystem does not *yet* support chmod
- else => {
- try self.chmod(permissions.inner.mode);
- },
- }
-}
-
-pub const UpdateTimesError = posix.FutimensError || windows.SetFileTimeError;
-
-/// The underlying file system may have a different granularity than nanoseconds,
-/// and therefore this function cannot guarantee any precision will be stored.
-/// Further, the maximum value is limited by the system ABI. When a value is provided
-/// that exceeds this range, the value is clamped to the maximum.
-/// TODO: integrate with async I/O
-pub fn updateTimes(
- self: File,
- /// access timestamp in nanoseconds
- atime: Io.Timestamp,
- /// last modification timestamp in nanoseconds
- mtime: Io.Timestamp,
-) UpdateTimesError!void {
- if (builtin.os.tag == .windows) {
- const atime_ft = windows.nanoSecondsToFileTime(atime);
- const mtime_ft = windows.nanoSecondsToFileTime(mtime);
- return windows.SetFileTime(self.handle, null, &atime_ft, &mtime_ft);
- }
- const times = [2]posix.timespec{
- posix.timespec{
- .sec = math.cast(isize, @divFloor(atime.nanoseconds, std.time.ns_per_s)) orelse maxInt(isize),
- .nsec = math.cast(isize, @mod(atime.nanoseconds, std.time.ns_per_s)) orelse maxInt(isize),
- },
- posix.timespec{
- .sec = math.cast(isize, @divFloor(mtime.nanoseconds, std.time.ns_per_s)) orelse maxInt(isize),
- .nsec = math.cast(isize, @mod(mtime.nanoseconds, std.time.ns_per_s)) orelse maxInt(isize),
- },
- };
- try posix.futimens(self.handle, &times);
-}
-
-pub const ReadError = posix.ReadError;
-pub const PReadError = posix.PReadError;
-
-pub fn read(self: File, buffer: []u8) ReadError!usize {
- if (is_windows) {
- return windows.ReadFile(self.handle, buffer, null);
- }
-
- return posix.read(self.handle, buffer);
-}
-
-/// On Windows, this function currently does alter the file pointer.
-/// https://github.com/ziglang/zig/issues/12783
-pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
- if (is_windows) {
- return windows.ReadFile(self.handle, buffer, offset);
- }
-
- return posix.pread(self.handle, buffer, offset);
-}
-
-/// Deprecated in favor of `Reader`.
-pub fn preadAll(self: File, buffer: []u8, offset: u64) PReadError!usize {
- var index: usize = 0;
- while (index != buffer.len) {
- const amt = try self.pread(buffer[index..], offset + index);
- if (amt == 0) break;
- index += amt;
- }
- return index;
-}
-
-/// See https://github.com/ziglang/zig/issues/7699
-pub fn readv(self: File, iovecs: []const posix.iovec) ReadError!usize {
- if (is_windows) {
- if (iovecs.len == 0) return 0;
- const first = iovecs[0];
- return windows.ReadFile(self.handle, first.base[0..first.len], null);
- }
-
- return posix.readv(self.handle, iovecs);
-}
-
-/// See https://github.com/ziglang/zig/issues/7699
-/// On Windows, this function currently does alter the file pointer.
-/// https://github.com/ziglang/zig/issues/12783
-pub fn preadv(self: File, iovecs: []const posix.iovec, offset: u64) PReadError!usize {
- if (is_windows) {
- if (iovecs.len == 0) return 0;
- const first = iovecs[0];
- return windows.ReadFile(self.handle, first.base[0..first.len], offset);
- }
-
- return posix.preadv(self.handle, iovecs, offset);
-}
-
-pub const WriteError = posix.WriteError;
-pub const PWriteError = posix.PWriteError;
-
-pub fn write(self: File, bytes: []const u8) WriteError!usize {
- if (is_windows) {
- return windows.WriteFile(self.handle, bytes, null);
- }
-
- return posix.write(self.handle, bytes);
-}
-
-pub fn writeAll(self: File, bytes: []const u8) WriteError!void {
- var index: usize = 0;
- while (index < bytes.len) {
- index += try self.write(bytes[index..]);
- }
-}
-
-/// Deprecated in favor of `Writer`.
-pub fn pwriteAll(self: File, bytes: []const u8, offset: u64) PWriteError!void {
- var index: usize = 0;
- while (index < bytes.len) {
- index += try self.pwrite(bytes[index..], offset + index);
- }
-}
-
-/// On Windows, this function currently does alter the file pointer.
-/// https://github.com/ziglang/zig/issues/12783
-pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize {
- if (is_windows) {
- return windows.WriteFile(self.handle, bytes, offset);
- }
-
- return posix.pwrite(self.handle, bytes, offset);
-}
-
-/// See https://github.com/ziglang/zig/issues/7699
-pub fn writev(self: File, iovecs: []const posix.iovec_const) WriteError!usize {
- if (is_windows) {
- // TODO improve this to use WriteFileScatter
- if (iovecs.len == 0) return 0;
- const first = iovecs[0];
- return windows.WriteFile(self.handle, first.base[0..first.len], null);
- }
-
- return posix.writev(self.handle, iovecs);
-}
-
-/// See https://github.com/ziglang/zig/issues/7699
-/// On Windows, this function currently does alter the file pointer.
-/// https://github.com/ziglang/zig/issues/12783
-pub fn pwritev(self: File, iovecs: []posix.iovec_const, offset: u64) PWriteError!usize {
- if (is_windows) {
- if (iovecs.len == 0) return 0;
- const first = iovecs[0];
- return windows.WriteFile(self.handle, first.base[0..first.len], offset);
- }
-
- return posix.pwritev(self.handle, iovecs, offset);
-}
-
-/// Deprecated in favor of `Writer`.
-pub const CopyRangeError = posix.CopyFileRangeError;
-
-/// Deprecated in favor of `Writer`.
-pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: u64) CopyRangeError!u64 {
- const adjusted_len = math.cast(usize, len) orelse maxInt(usize);
- const result = try posix.copy_file_range(in.handle, in_offset, out.handle, out_offset, adjusted_len, 0);
- return result;
-}
-
-/// Deprecated in favor of `Writer`.
-pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: u64) CopyRangeError!u64 {
- var total_bytes_copied: u64 = 0;
- var in_off = in_offset;
- var out_off = out_offset;
- while (total_bytes_copied < len) {
- const amt_copied = try copyRange(in, in_off, out, out_off, len - total_bytes_copied);
- if (amt_copied == 0) return total_bytes_copied;
- total_bytes_copied += amt_copied;
- in_off += amt_copied;
- out_off += amt_copied;
- }
- return total_bytes_copied;
-}
-
-/// Deprecated in favor of `Io.File.Reader`.
-pub const Reader = Io.File.Reader;
-
-pub const Writer = struct {
- file: File,
- err: ?WriteError = null,
- mode: Writer.Mode = .positional,
- /// Tracks the true seek position in the file. To obtain the logical
- /// position, add the buffer size to this value.
- pos: u64 = 0,
- sendfile_err: ?SendfileError = null,
- copy_file_range_err: ?CopyFileRangeError = null,
- fcopyfile_err: ?FcopyfileError = null,
- seek_err: ?Writer.SeekError = null,
- interface: Io.Writer,
-
- pub const Mode = Reader.Mode;
-
- pub const SendfileError = error{
- UnsupportedOperation,
- SystemResources,
- InputOutput,
- BrokenPipe,
- WouldBlock,
- Unexpected,
- };
-
- pub const CopyFileRangeError = std.os.freebsd.CopyFileRangeError || std.os.linux.wrapped.CopyFileRangeError;
-
- pub const FcopyfileError = error{
- OperationNotSupported,
- OutOfMemory,
- Unexpected,
- };
-
- pub const SeekError = File.SeekError;
-
- /// Number of slices to store on the stack, when trying to send as many byte
- /// vectors through the underlying write calls as possible.
- const max_buffers_len = 16;
-
- pub fn init(file: File, buffer: []u8) Writer {
- return .{
- .file = file,
- .interface = initInterface(buffer),
- .mode = .positional,
- };
- }
-
- /// Positional is more threadsafe, since the global seek position is not
- /// affected, but when such syscalls are not available, preemptively
- /// initializing in streaming mode will skip a failed syscall.
- pub fn initStreaming(file: File, buffer: []u8) Writer {
- return .{
- .file = file,
- .interface = initInterface(buffer),
- .mode = .streaming,
- };
- }
-
- pub fn initInterface(buffer: []u8) Io.Writer {
- return .{
- .vtable = &.{
- .drain = drain,
- .sendFile = sendFile,
- },
- .buffer = buffer,
- };
- }
-
- /// TODO when this logic moves from fs.File to Io.File the io parameter should be deleted
- pub fn moveToReader(w: *Writer, io: Io) Reader {
- defer w.* = undefined;
- return .{
- .io = io,
- .file = .{ .handle = w.file.handle },
- .mode = w.mode,
- .pos = w.pos,
- .interface = Reader.initInterface(w.interface.buffer),
- .seek_err = w.seek_err,
- };
- }
-
- pub fn drain(io_w: *Io.Writer, data: []const []const u8, splat: usize) Io.Writer.Error!usize {
- const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
- const handle = w.file.handle;
- const buffered = io_w.buffered();
- if (is_windows) switch (w.mode) {
- .positional, .positional_reading => {
- if (buffered.len != 0) {
- const n = windows.WriteFile(handle, buffered, w.pos) catch |err| {
- w.err = err;
- return error.WriteFailed;
- };
- w.pos += n;
- return io_w.consume(n);
- }
- for (data[0 .. data.len - 1]) |buf| {
- if (buf.len == 0) continue;
- const n = windows.WriteFile(handle, buf, w.pos) catch |err| {
- w.err = err;
- return error.WriteFailed;
- };
- w.pos += n;
- return io_w.consume(n);
- }
- const pattern = data[data.len - 1];
- if (pattern.len == 0 or splat == 0) return 0;
- const n = windows.WriteFile(handle, pattern, w.pos) catch |err| {
- w.err = err;
- return error.WriteFailed;
- };
- w.pos += n;
- return io_w.consume(n);
- },
- .streaming, .streaming_reading => {
- if (buffered.len != 0) {
- const n = windows.WriteFile(handle, buffered, null) catch |err| {
- w.err = err;
- return error.WriteFailed;
- };
- w.pos += n;
- return io_w.consume(n);
- }
- for (data[0 .. data.len - 1]) |buf| {
- if (buf.len == 0) continue;
- const n = windows.WriteFile(handle, buf, null) catch |err| {
- w.err = err;
- return error.WriteFailed;
- };
- w.pos += n;
- return io_w.consume(n);
- }
- const pattern = data[data.len - 1];
- if (pattern.len == 0 or splat == 0) return 0;
- const n = windows.WriteFile(handle, pattern, null) catch |err| {
- w.err = err;
- return error.WriteFailed;
- };
- w.pos += n;
- return io_w.consume(n);
- },
- .failure => return error.WriteFailed,
- };
- var iovecs: [max_buffers_len]std.posix.iovec_const = undefined;
- var len: usize = 0;
- if (buffered.len > 0) {
- iovecs[len] = .{ .base = buffered.ptr, .len = buffered.len };
- len += 1;
- }
- for (data[0 .. data.len - 1]) |d| {
- if (d.len == 0) continue;
- iovecs[len] = .{ .base = d.ptr, .len = d.len };
- len += 1;
- if (iovecs.len - len == 0) break;
- }
- const pattern = data[data.len - 1];
- if (iovecs.len - len != 0) switch (splat) {
- 0 => {},
- 1 => if (pattern.len != 0) {
- iovecs[len] = .{ .base = pattern.ptr, .len = pattern.len };
- len += 1;
- },
- else => switch (pattern.len) {
- 0 => {},
- 1 => {
- const splat_buffer_candidate = io_w.buffer[io_w.end..];
- var backup_buffer: [64]u8 = undefined;
- const splat_buffer = if (splat_buffer_candidate.len >= backup_buffer.len)
- splat_buffer_candidate
- else
- &backup_buffer;
- const memset_len = @min(splat_buffer.len, splat);
- const buf = splat_buffer[0..memset_len];
- @memset(buf, pattern[0]);
- iovecs[len] = .{ .base = buf.ptr, .len = buf.len };
- len += 1;
- var remaining_splat = splat - buf.len;
- while (remaining_splat > splat_buffer.len and iovecs.len - len != 0) {
- assert(buf.len == splat_buffer.len);
- iovecs[len] = .{ .base = splat_buffer.ptr, .len = splat_buffer.len };
- len += 1;
- remaining_splat -= splat_buffer.len;
- }
- if (remaining_splat > 0 and iovecs.len - len != 0) {
- iovecs[len] = .{ .base = splat_buffer.ptr, .len = remaining_splat };
- len += 1;
- }
- },
- else => for (0..splat) |_| {
- iovecs[len] = .{ .base = pattern.ptr, .len = pattern.len };
- len += 1;
- if (iovecs.len - len == 0) break;
- },
- },
- };
- if (len == 0) return 0;
- switch (w.mode) {
- .positional, .positional_reading => {
- const n = std.posix.pwritev(handle, iovecs[0..len], w.pos) catch |err| switch (err) {
- error.Unseekable => {
- w.mode = w.mode.toStreaming();
- const pos = w.pos;
- if (pos != 0) {
- w.pos = 0;
- w.seekTo(@intCast(pos)) catch {
- w.mode = .failure;
- return error.WriteFailed;
- };
- }
- return 0;
- },
- else => |e| {
- w.err = e;
- return error.WriteFailed;
- },
- };
- w.pos += n;
- return io_w.consume(n);
- },
- .streaming, .streaming_reading => {
- const n = std.posix.writev(handle, iovecs[0..len]) catch |err| {
- w.err = err;
- return error.WriteFailed;
- };
- w.pos += n;
- return io_w.consume(n);
- },
- .failure => return error.WriteFailed,
- }
- }
-
- pub fn sendFile(
- io_w: *Io.Writer,
- file_reader: *Io.File.Reader,
- limit: Io.Limit,
- ) Io.Writer.FileError!usize {
- const reader_buffered = file_reader.interface.buffered();
- if (reader_buffered.len >= @intFromEnum(limit))
- return sendFileBuffered(io_w, file_reader, limit.slice(reader_buffered));
- const writer_buffered = io_w.buffered();
- const file_limit = @intFromEnum(limit) - reader_buffered.len;
- const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
- const out_fd = w.file.handle;
- const in_fd = file_reader.file.handle;
-
- if (file_reader.size) |size| {
- if (size - file_reader.pos == 0) {
- if (reader_buffered.len != 0) {
- return sendFileBuffered(io_w, file_reader, reader_buffered);
- } else {
- return error.EndOfStream;
- }
- }
- }
-
- if (native_os == .freebsd and w.mode == .streaming) sf: {
- // Try using sendfile on FreeBSD.
- if (w.sendfile_err != null) break :sf;
- const offset = std.math.cast(std.c.off_t, file_reader.pos) orelse break :sf;
- var hdtr_data: std.c.sf_hdtr = undefined;
- var headers: [2]posix.iovec_const = undefined;
- var headers_i: u8 = 0;
- if (writer_buffered.len != 0) {
- headers[headers_i] = .{ .base = writer_buffered.ptr, .len = writer_buffered.len };
- headers_i += 1;
- }
- if (reader_buffered.len != 0) {
- headers[headers_i] = .{ .base = reader_buffered.ptr, .len = reader_buffered.len };
- headers_i += 1;
- }
- const hdtr: ?*std.c.sf_hdtr = if (headers_i == 0) null else b: {
- hdtr_data = .{
- .headers = &headers,
- .hdr_cnt = headers_i,
- .trailers = null,
- .trl_cnt = 0,
- };
- break :b &hdtr_data;
- };
- var sbytes: std.c.off_t = undefined;
- const nbytes: usize = @min(file_limit, maxInt(usize));
- const flags = 0;
- switch (posix.errno(std.c.sendfile(in_fd, out_fd, offset, nbytes, hdtr, &sbytes, flags))) {
- .SUCCESS, .INTR => {},
- .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => w.sendfile_err = error.UnsupportedOperation,
- .BADF => if (builtin.mode == .Debug) @panic("race condition") else {
- w.sendfile_err = error.Unexpected;
- },
- .FAULT => if (builtin.mode == .Debug) @panic("segmentation fault") else {
- w.sendfile_err = error.Unexpected;
- },
- .NOTCONN => w.sendfile_err = error.BrokenPipe,
- .AGAIN, .BUSY => if (sbytes == 0) {
- w.sendfile_err = error.WouldBlock;
- },
- .IO => w.sendfile_err = error.InputOutput,
- .PIPE => w.sendfile_err = error.BrokenPipe,
- .NOBUFS => w.sendfile_err = error.SystemResources,
- else => |err| w.sendfile_err = posix.unexpectedErrno(err),
- }
- if (w.sendfile_err != null) {
- // Give calling code chance to observe the error before trying
- // something else.
- return 0;
- }
- if (sbytes == 0) {
- file_reader.size = file_reader.pos;
- return error.EndOfStream;
- }
- const consumed = io_w.consume(@intCast(sbytes));
- file_reader.seekBy(@intCast(consumed)) catch return error.ReadFailed;
- return consumed;
- }
-
- if (native_os.isDarwin() and w.mode == .streaming) sf: {
- // Try using sendfile on macOS.
- if (w.sendfile_err != null) break :sf;
- const offset = std.math.cast(std.c.off_t, file_reader.pos) orelse break :sf;
- var hdtr_data: std.c.sf_hdtr = undefined;
- var headers: [2]posix.iovec_const = undefined;
- var headers_i: u8 = 0;
- if (writer_buffered.len != 0) {
- headers[headers_i] = .{ .base = writer_buffered.ptr, .len = writer_buffered.len };
- headers_i += 1;
- }
- if (reader_buffered.len != 0) {
- headers[headers_i] = .{ .base = reader_buffered.ptr, .len = reader_buffered.len };
- headers_i += 1;
- }
- const hdtr: ?*std.c.sf_hdtr = if (headers_i == 0) null else b: {
- hdtr_data = .{
- .headers = &headers,
- .hdr_cnt = headers_i,
- .trailers = null,
- .trl_cnt = 0,
- };
- break :b &hdtr_data;
- };
- const max_count = maxInt(i32); // Avoid EINVAL.
- var len: std.c.off_t = @min(file_limit, max_count);
- const flags = 0;
- switch (posix.errno(std.c.sendfile(in_fd, out_fd, offset, &len, hdtr, flags))) {
- .SUCCESS, .INTR => {},
- .OPNOTSUPP, .NOTSOCK, .NOSYS => w.sendfile_err = error.UnsupportedOperation,
- .BADF => if (builtin.mode == .Debug) @panic("race condition") else {
- w.sendfile_err = error.Unexpected;
- },
- .FAULT => if (builtin.mode == .Debug) @panic("segmentation fault") else {
- w.sendfile_err = error.Unexpected;
- },
- .INVAL => if (builtin.mode == .Debug) @panic("invalid API usage") else {
- w.sendfile_err = error.Unexpected;
- },
- .NOTCONN => w.sendfile_err = error.BrokenPipe,
- .AGAIN => if (len == 0) {
- w.sendfile_err = error.WouldBlock;
- },
- .IO => w.sendfile_err = error.InputOutput,
- .PIPE => w.sendfile_err = error.BrokenPipe,
- else => |err| w.sendfile_err = posix.unexpectedErrno(err),
- }
- if (w.sendfile_err != null) {
- // Give calling code chance to observe the error before trying
- // something else.
- return 0;
- }
- if (len == 0) {
- file_reader.size = file_reader.pos;
- return error.EndOfStream;
- }
- const consumed = io_w.consume(@bitCast(len));
- file_reader.seekBy(@intCast(consumed)) catch return error.ReadFailed;
- return consumed;
- }
-
- if (native_os == .linux and w.mode == .streaming) sf: {
- // Try using sendfile on Linux.
- if (w.sendfile_err != null) break :sf;
- // Linux sendfile does not support headers.
- if (writer_buffered.len != 0 or reader_buffered.len != 0)
- return sendFileBuffered(io_w, file_reader, reader_buffered);
- const max_count = 0x7ffff000; // Avoid EINVAL.
- var off: std.os.linux.off_t = undefined;
- const off_ptr: ?*std.os.linux.off_t, const count: usize = switch (file_reader.mode) {
- .positional => o: {
- const size = file_reader.getSize() catch return 0;
- off = std.math.cast(std.os.linux.off_t, file_reader.pos) orelse return error.ReadFailed;
- break :o .{ &off, @min(@intFromEnum(limit), size - file_reader.pos, max_count) };
- },
- .streaming => .{ null, limit.minInt(max_count) },
- .streaming_reading, .positional_reading => break :sf,
- .failure => return error.ReadFailed,
- };
- const n = std.os.linux.wrapped.sendfile(out_fd, in_fd, off_ptr, count) catch |err| switch (err) {
- error.Unseekable => {
- file_reader.mode = file_reader.mode.toStreaming();
- const pos = file_reader.pos;
- if (pos != 0) {
- file_reader.pos = 0;
- file_reader.seekBy(@intCast(pos)) catch {
- file_reader.mode = .failure;
- return error.ReadFailed;
- };
- }
- return 0;
- },
- else => |e| {
- w.sendfile_err = e;
- return 0;
- },
- };
- if (n == 0) {
- file_reader.size = file_reader.pos;
- return error.EndOfStream;
- }
- file_reader.pos += n;
- w.pos += n;
- return n;
- }
-
- const copy_file_range = switch (native_os) {
- .freebsd => std.os.freebsd.copy_file_range,
- .linux => std.os.linux.wrapped.copy_file_range,
- else => {},
- };
- if (@TypeOf(copy_file_range) != void) cfr: {
- if (w.copy_file_range_err != null) break :cfr;
- if (writer_buffered.len != 0 or reader_buffered.len != 0)
- return sendFileBuffered(io_w, file_reader, reader_buffered);
- var off_in: i64 = undefined;
- var off_out: i64 = undefined;
- const off_in_ptr: ?*i64 = switch (file_reader.mode) {
- .positional_reading, .streaming_reading => return error.Unimplemented,
- .positional => p: {
- off_in = @intCast(file_reader.pos);
- break :p &off_in;
- },
- .streaming => null,
- .failure => return error.WriteFailed,
- };
- const off_out_ptr: ?*i64 = switch (w.mode) {
- .positional_reading, .streaming_reading => return error.Unimplemented,
- .positional => p: {
- off_out = @intCast(w.pos);
- break :p &off_out;
- },
- .streaming => null,
- .failure => return error.WriteFailed,
- };
- const n = copy_file_range(in_fd, off_in_ptr, out_fd, off_out_ptr, @intFromEnum(limit), 0) catch |err| {
- w.copy_file_range_err = err;
- return 0;
- };
- if (n == 0) {
- file_reader.size = file_reader.pos;
- return error.EndOfStream;
- }
- file_reader.pos += n;
- w.pos += n;
- return n;
- }
-
- if (builtin.os.tag.isDarwin()) fcf: {
- if (w.fcopyfile_err != null) break :fcf;
- if (file_reader.pos != 0) break :fcf;
- if (w.pos != 0) break :fcf;
- if (limit != .unlimited) break :fcf;
- const size = file_reader.getSize() catch break :fcf;
- if (writer_buffered.len != 0 or reader_buffered.len != 0)
- return sendFileBuffered(io_w, file_reader, reader_buffered);
- const rc = std.c.fcopyfile(in_fd, out_fd, null, .{ .DATA = true });
- switch (posix.errno(rc)) {
- .SUCCESS => {},
- .INVAL => if (builtin.mode == .Debug) @panic("invalid API usage") else {
- w.fcopyfile_err = error.Unexpected;
- return 0;
- },
- .NOMEM => {
- w.fcopyfile_err = error.OutOfMemory;
- return 0;
- },
- .OPNOTSUPP => {
- w.fcopyfile_err = error.OperationNotSupported;
- return 0;
- },
- else => |err| {
- w.fcopyfile_err = posix.unexpectedErrno(err);
- return 0;
- },
- }
- file_reader.pos = size;
- w.pos = size;
- return size;
- }
-
- return error.Unimplemented;
- }
-
- fn sendFileBuffered(
- io_w: *Io.Writer,
- file_reader: *Io.File.Reader,
- reader_buffered: []const u8,
- ) Io.Writer.FileError!usize {
- const n = try drain(io_w, &.{reader_buffered}, 1);
- file_reader.seekBy(@intCast(n)) catch return error.ReadFailed;
- return n;
- }
-
- pub fn seekTo(w: *Writer, offset: u64) (Writer.SeekError || Io.Writer.Error)!void {
- try w.interface.flush();
- try seekToUnbuffered(w, offset);
- }
-
- /// Asserts that no data is currently buffered.
- pub fn seekToUnbuffered(w: *Writer, offset: u64) Writer.SeekError!void {
- assert(w.interface.buffered().len == 0);
- switch (w.mode) {
- .positional, .positional_reading => {
- w.pos = offset;
- },
- .streaming, .streaming_reading => {
- if (w.seek_err) |err| return err;
- posix.lseek_SET(w.file.handle, offset) catch |err| {
- w.seek_err = err;
- return err;
- };
- w.pos = offset;
- },
- .failure => return w.seek_err.?,
- }
- }
-
- pub const EndError = SetEndPosError || Io.Writer.Error;
-
- /// Flushes any buffered data and sets the end position of the file.
- ///
- /// If not overwriting existing contents, then calling `interface.flush`
- /// directly is sufficient.
- ///
- /// Flush failure is handled by setting `err` so that it can be handled
- /// along with other write failures.
- pub fn end(w: *Writer) EndError!void {
- try w.interface.flush();
- switch (w.mode) {
- .positional,
- .positional_reading,
- => w.file.setEndPos(w.pos) catch |err| switch (err) {
- error.NonResizable => return,
- else => |e| return e,
- },
-
- .streaming,
- .streaming_reading,
- .failure,
- => {},
- }
- }
-};
-
-/// Defaults to positional reading; falls back to streaming.
-///
-/// Positional is more threadsafe, since the global seek position is not
-/// affected.
-pub fn reader(file: File, io: Io, buffer: []u8) Reader {
- return .init(.{ .handle = file.handle }, io, buffer);
-}
-
-/// Positional is more threadsafe, since the global seek position is not
-/// affected, but when such syscalls are not available, preemptively
-/// initializing in streaming mode skips a failed syscall.
-pub fn readerStreaming(file: File, io: Io, buffer: []u8) Reader {
- return .initStreaming(.{ .handle = file.handle }, io, buffer);
-}
-
-/// Defaults to positional reading; falls back to streaming.
-///
-/// Positional is more threadsafe, since the global seek position is not
-/// affected.
-pub fn writer(file: File, buffer: []u8) Writer {
- return .init(file, buffer);
-}
-
-/// Positional is more threadsafe, since the global seek position is not
-/// affected, but when such syscalls are not available, preemptively
-/// initializing in streaming mode will skip a failed syscall.
-pub fn writerStreaming(file: File, buffer: []u8) Writer {
- return .initStreaming(file, buffer);
-}
-
-const range_off: windows.LARGE_INTEGER = 0;
-const range_len: windows.LARGE_INTEGER = 1;
-
-pub const LockError = error{
- SystemResources,
- FileLocksNotSupported,
-} || posix.UnexpectedError;
-
-/// Blocks when an incompatible lock is held by another process.
-/// A process may hold only one type of lock (shared or exclusive) on
-/// a file. When a process terminates in any way, the lock is released.
-///
-/// Assumes the file is unlocked.
-///
-/// TODO: integrate with async I/O
-pub fn lock(file: File, l: Lock) LockError!void {
- if (is_windows) {
- var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- const exclusive = switch (l) {
- .none => return,
- .shared => false,
- .exclusive => true,
- };
- return windows.LockFile(
- file.handle,
- null,
- null,
- null,
- &io_status_block,
- &range_off,
- &range_len,
- null,
- windows.FALSE, // non-blocking=false
- @intFromBool(exclusive),
- ) catch |err| switch (err) {
- error.WouldBlock => unreachable, // non-blocking=false
- else => |e| return e,
- };
- } else {
- return posix.flock(file.handle, switch (l) {
- .none => posix.LOCK.UN,
- .shared => posix.LOCK.SH,
- .exclusive => posix.LOCK.EX,
- }) catch |err| switch (err) {
- error.WouldBlock => unreachable, // non-blocking=false
- else => |e| return e,
- };
- }
-}
-
-/// Assumes the file is locked.
-pub fn unlock(file: File) void {
- if (is_windows) {
- var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- return windows.UnlockFile(
- file.handle,
- &io_status_block,
- &range_off,
- &range_len,
- 0,
- ) catch |err| switch (err) {
- error.RangeNotLocked => unreachable, // Function assumes unlocked.
- error.Unexpected => unreachable, // Resource deallocation must succeed.
- };
- } else {
- return posix.flock(file.handle, posix.LOCK.UN) catch |err| switch (err) {
- error.WouldBlock => unreachable, // unlocking can't block
- error.SystemResources => unreachable, // We are deallocating resources.
- error.FileLocksNotSupported => unreachable, // We already got the lock.
- error.Unexpected => unreachable, // Resource deallocation must succeed.
- };
- }
-}
-
-/// Attempts to obtain a lock, returning `true` if the lock is
-/// obtained, and `false` if there was an existing incompatible lock held.
-/// A process may hold only one type of lock (shared or exclusive) on
-/// a file. When a process terminates in any way, the lock is released.
-///
-/// Assumes the file is unlocked.
-///
-/// TODO: integrate with async I/O
-pub fn tryLock(file: File, l: Lock) LockError!bool {
- if (is_windows) {
- var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- const exclusive = switch (l) {
- .none => return,
- .shared => false,
- .exclusive => true,
- };
- windows.LockFile(
- file.handle,
- null,
- null,
- null,
- &io_status_block,
- &range_off,
- &range_len,
- null,
- windows.TRUE, // non-blocking=true
- @intFromBool(exclusive),
- ) catch |err| switch (err) {
- error.WouldBlock => return false,
- else => |e| return e,
- };
- } else {
- posix.flock(file.handle, switch (l) {
- .none => posix.LOCK.UN,
- .shared => posix.LOCK.SH | posix.LOCK.NB,
- .exclusive => posix.LOCK.EX | posix.LOCK.NB,
- }) catch |err| switch (err) {
- error.WouldBlock => return false,
- else => |e| return e,
- };
- }
- return true;
-}
-
-/// Assumes the file is already locked in exclusive mode.
-/// Atomically modifies the lock to be in shared mode, without releasing it.
-///
-/// TODO: integrate with async I/O
-pub fn downgradeLock(file: File) LockError!void {
- if (is_windows) {
- // On Windows it works like a semaphore + exclusivity flag. To implement this
- // function, we first obtain another lock in shared mode. This changes the
- // exclusivity flag, but increments the semaphore to 2. So we follow up with
- // an NtUnlockFile which decrements the semaphore but does not modify the
- // exclusivity flag.
- var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- windows.LockFile(
- file.handle,
- null,
- null,
- null,
- &io_status_block,
- &range_off,
- &range_len,
- null,
- windows.TRUE, // non-blocking=true
- windows.FALSE, // exclusive=false
- ) catch |err| switch (err) {
- error.WouldBlock => unreachable, // File was not locked in exclusive mode.
- else => |e| return e,
- };
- return windows.UnlockFile(
- file.handle,
- &io_status_block,
- &range_off,
- &range_len,
- 0,
- ) catch |err| switch (err) {
- error.RangeNotLocked => unreachable, // File was not locked.
- error.Unexpected => unreachable, // Resource deallocation must succeed.
- };
- } else {
- return posix.flock(file.handle, posix.LOCK.SH | posix.LOCK.NB) catch |err| switch (err) {
- error.WouldBlock => unreachable, // File was not locked in exclusive mode.
- else => |e| return e,
- };
- }
-}
-
-pub fn adaptToNewApi(file: File) Io.File {
- return .{ .handle = file.handle };
-}
-
-pub fn adaptFromNewApi(file: Io.File) File {
- return .{ .handle = file.handle };
-}
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index f00007756f..a8465f52d4 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -872,7 +872,7 @@ pub fn resolve(allocator: Allocator, paths: []const []const u8) Allocator.Error!
/// This function is like a series of `cd` statements executed one after another.
/// It resolves "." and ".." to the best of its ability, but will not convert relative paths to
-/// an absolute path, use std.fs.Dir.realpath instead.
+/// an absolute path, use Io.Dir.realpath instead.
/// ".." components may persist in the resolved path if the resolved path is relative or drive-relative.
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
///
@@ -1095,7 +1095,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) Allocator
/// This function is like a series of `cd` statements executed one after another.
/// It resolves "." and ".." to the best of its ability, but will not convert relative paths to
-/// an absolute path, use std.fs.Dir.realpath instead.
+/// an absolute path, use Io.Dir.realpath instead.
/// ".." components may persist in the resolved path if the resolved path is relative.
/// The result does not have a trailing path separator.
/// This function does not perform any syscalls. Executing this series of path
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 4a2c0117b1..bcb9048e0e 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -3,66 +3,89 @@ const native_os = builtin.os.tag;
const std = @import("../std.zig");
const Io = std.Io;
-const testing = std.testing;
-const fs = std.fs;
const mem = std.mem;
+const Allocator = std.mem.Allocator;
const wasi = std.os.wasi;
const windows = std.os.windows;
-const posix = std.posix;
-
const ArenaAllocator = std.heap.ArenaAllocator;
-const Dir = std.fs.Dir;
-const File = std.fs.File;
-const tmpDir = testing.tmpDir;
-const SymLinkFlags = std.fs.Dir.SymLinkFlags;
+const Dir = std.Io.Dir;
+const File = std.Io.File;
+const SymLinkFlags = std.Io.Dir.SymLinkFlags;
+
+const testing = std.testing;
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+const expectEqualSlices = std.testing.expectEqualSlices;
+const expectEqualStrings = std.testing.expectEqualStrings;
+const expectError = std.testing.expectError;
+const tmpDir = std.testing.tmpDir;
const PathType = enum {
relative,
absolute,
unc,
- pub fn isSupported(self: PathType, target_os: std.Target.Os) bool {
+ fn isSupported(self: PathType, target_os: std.Target.Os) bool {
return switch (self) {
.relative => true,
- .absolute => std.os.isGetFdPathSupportedOnTarget(target_os),
+ .absolute => switch (target_os.tag) {
+ .windows,
+ .driverkit,
+ .ios,
+ .maccatalyst,
+ .macos,
+ .tvos,
+ .visionos,
+ .watchos,
+ .linux,
+ .illumos,
+ .freebsd,
+ .serenity,
+ => true,
+
+ .dragonfly => target_os.version_range.semver.max.order(.{ .major = 6, .minor = 0, .patch = 0 }) != .lt,
+ .netbsd => target_os.version_range.semver.max.order(.{ .major = 10, .minor = 0, .patch = 0 }) != .lt,
+ else => false,
+ },
.unc => target_os.tag == .windows,
};
}
- pub const TransformError = posix.RealPathError || error{OutOfMemory};
- pub const TransformFn = fn (allocator: mem.Allocator, dir: Dir, relative_path: [:0]const u8) TransformError![:0]const u8;
+ const TransformError = Dir.RealPathError || error{OutOfMemory};
+ const TransformFn = fn (Allocator, Io, Dir, relative_path: [:0]const u8) TransformError![:0]const u8;
- pub fn getTransformFn(comptime path_type: PathType) TransformFn {
+ fn getTransformFn(comptime path_type: PathType) TransformFn {
switch (path_type) {
.relative => return struct {
- fn transform(allocator: mem.Allocator, dir: Dir, relative_path: [:0]const u8) TransformError![:0]const u8 {
+ fn transform(allocator: Allocator, io: Io, dir: Dir, relative_path: [:0]const u8) TransformError![:0]const u8 {
_ = allocator;
+ _ = io;
_ = dir;
return relative_path;
}
}.transform,
.absolute => return struct {
- fn transform(allocator: mem.Allocator, dir: Dir, relative_path: [:0]const u8) TransformError![:0]const u8 {
+ fn transform(allocator: Allocator, io: Io, dir: Dir, relative_path: [:0]const u8) TransformError![:0]const u8 {
// The final path may not actually exist which would cause realpath to fail.
// So instead, we get the path of the dir and join it with the relative path.
- var fd_path_buf: [fs.max_path_bytes]u8 = undefined;
- const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf);
- return fs.path.joinZ(allocator, &.{ dir_path, relative_path });
+ var fd_path_buf: [Dir.max_path_bytes]u8 = undefined;
+ const dir_path = fd_path_buf[0..try dir.realPath(io, &fd_path_buf)];
+ return Dir.path.joinZ(allocator, &.{ dir_path, relative_path });
}
}.transform,
.unc => return struct {
- fn transform(allocator: mem.Allocator, dir: Dir, relative_path: [:0]const u8) TransformError![:0]const u8 {
+ fn transform(allocator: Allocator, io: Io, dir: Dir, relative_path: [:0]const u8) TransformError![:0]const u8 {
// Any drive absolute path (C:\foo) can be converted into a UNC path by
// using '127.0.0.1' as the server name and '<drive letter>$' as the share name.
- var fd_path_buf: [fs.max_path_bytes]u8 = undefined;
- const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf);
+ var fd_path_buf: [Dir.max_path_bytes]u8 = undefined;
+ const dir_path = fd_path_buf[0..try dir.realPath(io, &fd_path_buf)];
const windows_path_type = windows.getWin32PathType(u8, dir_path);
switch (windows_path_type) {
- .unc_absolute => return fs.path.joinZ(allocator, &.{ dir_path, relative_path }),
+ .unc_absolute => return Dir.path.joinZ(allocator, &.{ dir_path, relative_path }),
.drive_absolute => {
// `C:\<...>` -> `\\127.0.0.1\C$\<...>`
const prepended = "\\\\127.0.0.1\\";
- var path = try fs.path.joinZ(allocator, &.{ prepended, dir_path, relative_path });
+ var path = try Dir.path.joinZ(allocator, &.{ prepended, dir_path, relative_path });
path[prepended.len + 1] = '$';
return path;
},
@@ -80,10 +103,10 @@ const TestContext = struct {
path_sep: u8,
arena: ArenaAllocator,
tmp: testing.TmpDir,
- dir: std.fs.Dir,
+ dir: Dir,
transform_fn: *const PathType.TransformFn,
- pub fn init(path_type: PathType, path_sep: u8, allocator: mem.Allocator, transform_fn: *const PathType.TransformFn) TestContext {
+ pub fn init(path_type: PathType, path_sep: u8, allocator: Allocator, transform_fn: *const PathType.TransformFn) TestContext {
const tmp = tmpDir(.{ .iterate = true });
return .{
.io = testing.io,
@@ -107,7 +130,7 @@ const TestContext = struct {
/// `TestContext.deinit`.
pub fn transformPath(self: *TestContext, relative_path: [:0]const u8) ![:0]const u8 {
const allocator = self.arena.allocator();
- const transformed_path = try self.transform_fn(allocator, self.dir, relative_path);
+ const transformed_path = try self.transform_fn(allocator, self.io, self.dir, relative_path);
if (native_os == .windows) {
const transformed_sep_path = try allocator.dupeZ(u8, transformed_path);
std.mem.replaceScalar(u8, transformed_sep_path, switch (self.path_sep) {
@@ -150,7 +173,7 @@ fn testWithAllSupportedPathTypes(test_func: anytype) !void {
fn testWithPathTypeIfSupported(comptime path_type: PathType, comptime path_sep: u8, test_func: anytype) !void {
if (!(comptime path_type.isSupported(builtin.os))) return;
- if (!(comptime fs.path.isSep(path_sep))) return;
+ if (!(comptime Dir.path.isSep(path_sep))) return;
var ctx = TestContext.init(path_type, path_sep, testing.allocator, path_type.getTransformFn());
defer ctx.deinit();
@@ -160,8 +183,8 @@ fn testWithPathTypeIfSupported(comptime path_type: PathType, comptime path_sep:
// For use in test setup. If the symlink creation fails on Windows with
// AccessDenied, then make the test failure silent (it is not a Zig failure).
-fn setupSymlink(dir: Dir, target: []const u8, link: []const u8, flags: SymLinkFlags) !void {
- return dir.symLink(target, link, flags) catch |err| switch (err) {
+fn setupSymlink(io: Io, dir: Dir, target: []const u8, link: []const u8, flags: SymLinkFlags) !void {
+ return dir.symLink(io, target, link, flags) catch |err| switch (err) {
// Symlink requires admin privileges on windows, so this test can legitimately fail.
error.AccessDenied => if (native_os == .windows) return error.SkipZigTest else return err,
else => return err,
@@ -170,50 +193,43 @@ fn setupSymlink(dir: Dir, target: []const u8, link: []const u8, flags: SymLinkFl
// For use in test setup. If the symlink creation fails on Windows with
// AccessDenied, then make the test failure silent (it is not a Zig failure).
-fn setupSymlinkAbsolute(target: []const u8, link: []const u8, flags: SymLinkFlags) !void {
- return fs.symLinkAbsolute(target, link, flags) catch |err| switch (err) {
+fn setupSymlinkAbsolute(io: Io, target: []const u8, link: []const u8, flags: SymLinkFlags) !void {
+ return Dir.symLinkAbsolute(io, target, link, flags) catch |err| switch (err) {
error.AccessDenied => if (native_os == .windows) return error.SkipZigTest else return err,
else => return err,
};
}
test "Dir.readLink" {
+ const io = testing.io;
+
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
// Create some targets
const file_target_path = try ctx.transformPath("file.txt");
- try ctx.dir.writeFile(.{ .sub_path = file_target_path, .data = "nonsense" });
+ try ctx.dir.writeFile(io, .{ .sub_path = file_target_path, .data = "nonsense" });
const dir_target_path = try ctx.transformPath("subdir");
- try ctx.dir.makeDir(dir_target_path);
+ try ctx.dir.createDir(io, dir_target_path, .default_dir);
// On Windows, symlink targets always use the canonical path separator
const canonical_file_target_path = try ctx.toCanonicalPathSep(file_target_path);
const canonical_dir_target_path = try ctx.toCanonicalPathSep(dir_target_path);
// test 1: symlink to a file
- try setupSymlink(ctx.dir, file_target_path, "symlink1", .{});
- try testReadLink(ctx.dir, canonical_file_target_path, "symlink1");
- if (builtin.os.tag == .windows) {
- try testReadLinkW(testing.allocator, ctx.dir, canonical_file_target_path, "symlink1");
- }
+ try setupSymlink(io, ctx.dir, file_target_path, "symlink1", .{});
+ try testReadLink(io, ctx.dir, canonical_file_target_path, "symlink1");
// test 2: symlink to a directory (can be different on Windows)
- try setupSymlink(ctx.dir, dir_target_path, "symlink2", .{ .is_directory = true });
- try testReadLink(ctx.dir, canonical_dir_target_path, "symlink2");
- if (builtin.os.tag == .windows) {
- try testReadLinkW(testing.allocator, ctx.dir, canonical_dir_target_path, "symlink2");
- }
+ try setupSymlink(io, ctx.dir, dir_target_path, "symlink2", .{ .is_directory = true });
+ try testReadLink(io, ctx.dir, canonical_dir_target_path, "symlink2");
// test 3: relative path symlink
- const parent_file = ".." ++ fs.path.sep_str ++ "target.txt";
+ const parent_file = ".." ++ Dir.path.sep_str ++ "target.txt";
const canonical_parent_file = try ctx.toCanonicalPathSep(parent_file);
- var subdir = try ctx.dir.makeOpenPath("subdir", .{});
- defer subdir.close();
- try setupSymlink(subdir, canonical_parent_file, "relative-link.txt", .{});
- try testReadLink(subdir, canonical_parent_file, "relative-link.txt");
- if (builtin.os.tag == .windows) {
- try testReadLinkW(testing.allocator, subdir, canonical_parent_file, "relative-link.txt");
- }
+ var subdir = try ctx.dir.createDirPathOpen(io, "subdir", .{});
+ defer subdir.close(io);
+ try setupSymlink(io, subdir, canonical_parent_file, "relative-link.txt", .{});
+ try testReadLink(io, subdir, canonical_parent_file, "relative-link.txt");
}
}.impl);
}
@@ -221,55 +237,39 @@ test "Dir.readLink" {
test "Dir.readLink on non-symlinks" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const file_path = try ctx.transformPath("file.txt");
- try ctx.dir.writeFile(.{ .sub_path = file_path, .data = "nonsense" });
+ try ctx.dir.writeFile(io, .{ .sub_path = file_path, .data = "nonsense" });
const dir_path = try ctx.transformPath("subdir");
- try ctx.dir.makeDir(dir_path);
+ try ctx.dir.createDir(io, dir_path, .default_dir);
// file
- var buffer: [fs.max_path_bytes]u8 = undefined;
- try std.testing.expectError(error.NotLink, ctx.dir.readLink(file_path, &buffer));
- if (builtin.os.tag == .windows) {
- var file_path_w = try std.os.windows.sliceToPrefixedFileW(ctx.dir.fd, file_path);
- try std.testing.expectError(error.NotLink, ctx.dir.readLinkW(file_path_w.span(), &file_path_w.data));
- }
+ var buffer: [Dir.max_path_bytes]u8 = undefined;
+ try std.testing.expectError(error.NotLink, ctx.dir.readLink(io, file_path, &buffer));
// dir
- try std.testing.expectError(error.NotLink, ctx.dir.readLink(dir_path, &buffer));
- if (builtin.os.tag == .windows) {
- var dir_path_w = try std.os.windows.sliceToPrefixedFileW(ctx.dir.fd, dir_path);
- try std.testing.expectError(error.NotLink, ctx.dir.readLinkW(dir_path_w.span(), &dir_path_w.data));
- }
+ try std.testing.expectError(error.NotLink, ctx.dir.readLink(io, dir_path, &buffer));
}
}.impl);
}
-fn testReadLink(dir: Dir, target_path: []const u8, symlink_path: []const u8) !void {
- var buffer: [fs.max_path_bytes]u8 = undefined;
- const actual = try dir.readLink(symlink_path, buffer[0..]);
- try testing.expectEqualStrings(target_path, actual);
-}
-
-fn testReadLinkW(allocator: mem.Allocator, dir: Dir, target_path: []const u8, symlink_path: []const u8) !void {
- const target_path_w = try std.unicode.wtf8ToWtf16LeAlloc(allocator, target_path);
- defer allocator.free(target_path_w);
- // Calling the W functions directly requires the path to be NT-prefixed
- const symlink_path_w = try std.os.windows.sliceToPrefixedFileW(dir.fd, symlink_path);
- const wtf16_buffer = try allocator.alloc(u16, target_path_w.len);
- defer allocator.free(wtf16_buffer);
- const actual = try dir.readLinkW(symlink_path_w.span(), wtf16_buffer);
- try testing.expectEqualSlices(u16, target_path_w, actual);
+fn testReadLink(io: Io, dir: Dir, target_path: []const u8, symlink_path: []const u8) !void {
+ var buffer: [Dir.max_path_bytes]u8 = undefined;
+ const actual = buffer[0..try dir.readLink(io, symlink_path, &buffer)];
+ try expectEqualStrings(target_path, actual);
}
-fn testReadLinkAbsolute(target_path: []const u8, symlink_path: []const u8) !void {
- var buffer: [fs.max_path_bytes]u8 = undefined;
- const given = try fs.readLinkAbsolute(symlink_path, buffer[0..]);
- try testing.expectEqualStrings(target_path, given);
+fn testReadLinkAbsolute(io: Io, target_path: []const u8, symlink_path: []const u8) !void {
+ var buffer: [Dir.max_path_bytes]u8 = undefined;
+ const given = buffer[0..try Dir.readLinkAbsolute(io, symlink_path, &buffer)];
+ try expectEqualStrings(target_path, given);
}
test "File.stat on a File that is a symlink returns Kind.sym_link" {
- // This test requires getting a file descriptor of a symlink which
- // is not possible on all targets
+ const io = testing.io;
+
+ // This test requires getting a file descriptor of a symlink which is not
+ // possible on all targets.
switch (builtin.target.os.tag) {
.windows, .linux => {},
else => return error.SkipZigTest,
@@ -278,99 +278,35 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const dir_target_path = try ctx.transformPath("subdir");
- try ctx.dir.makeDir(dir_target_path);
-
- try setupSymlink(ctx.dir, dir_target_path, "symlink", .{ .is_directory = true });
-
- var symlink: Dir = switch (builtin.target.os.tag) {
- .windows => windows_symlink: {
- const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.fd, "symlink");
-
- var handle: windows.HANDLE = undefined;
-
- const path_len_bytes = @as(u16, @intCast(sub_path_w.span().len * 2));
- var nt_name = windows.UNICODE_STRING{
- .Length = path_len_bytes,
- .MaximumLength = path_len_bytes,
- .Buffer = @constCast(&sub_path_w.data),
- };
- var attr: windows.OBJECT_ATTRIBUTES = .{
- .Length = @sizeOf(windows.OBJECT_ATTRIBUTES),
- .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.fd,
- .Attributes = .{},
- .ObjectName = &nt_name,
- .SecurityDescriptor = null,
- .SecurityQualityOfService = null,
- };
- var io: windows.IO_STATUS_BLOCK = undefined;
- const rc = windows.ntdll.NtCreateFile(
- &handle,
- .{
- .SPECIFIC = .{ .FILE_DIRECTORY = .{
- .READ_EA = true,
- .TRAVERSE = true,
- .READ_ATTRIBUTES = true,
- } },
- .STANDARD = .{
- .RIGHTS = .READ,
- .SYNCHRONIZE = true,
- },
- },
- &attr,
- &io,
- null,
- .{ .NORMAL = true },
- .VALID_FLAGS,
- .OPEN,
- .{
- .DIRECTORY_FILE = true,
- .IO = .SYNCHRONOUS_NONALERT,
- .OPEN_FOR_BACKUP_INTENT = true,
- .OPEN_REPARSE_POINT = true, // the important thing here
- },
- null,
- 0,
- );
+ try ctx.dir.createDir(io, dir_target_path, .default_dir);
- switch (rc) {
- .SUCCESS => break :windows_symlink .{ .fd = handle },
- else => return windows.unexpectedStatus(rc),
- }
- },
- .linux => linux_symlink: {
- const sub_path_c = try posix.toPosixPath("symlink");
- // the O_NOFOLLOW | O_PATH combination can obtain a fd to a symlink
- // note that if O_DIRECTORY is set, then this will error with ENOTDIR
- const flags: posix.O = .{
- .NOFOLLOW = true,
- .PATH = true,
- .ACCMODE = .RDONLY,
- .CLOEXEC = true,
- };
- const fd = try posix.openatZ(ctx.dir.fd, &sub_path_c, flags, 0);
- break :linux_symlink Dir{ .fd = fd };
- },
- else => unreachable,
- };
- defer symlink.close();
+ try setupSymlink(io, ctx.dir, dir_target_path, "symlink", .{ .is_directory = true });
+
+ var symlink: File = try ctx.dir.openFile(io, "symlink", .{
+ .follow_symlinks = false,
+ .path_only = true,
+ });
+ defer symlink.close(io);
- const stat = try symlink.stat();
- try testing.expectEqual(File.Kind.sym_link, stat.kind);
+ const stat = try symlink.stat(io);
+ try expectEqual(File.Kind.sym_link, stat.kind);
}
}.impl);
}
test "openDir" {
+ const io = testing.io;
+
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const allocator = ctx.arena.allocator();
const subdir_path = try ctx.transformPath("subdir");
- try ctx.dir.makeDir(subdir_path);
+ try ctx.dir.createDir(io, subdir_path, .default_dir);
for ([_][]const u8{ "", ".", ".." }) |sub_path| {
- const dir_path = try fs.path.join(allocator, &.{ subdir_path, sub_path });
- var dir = try ctx.dir.openDir(dir_path, .{});
- defer dir.close();
+ const dir_path = try Dir.path.join(allocator, &.{ subdir_path, sub_path });
+ var dir = try ctx.dir.openDir(io, dir_path, .{});
+ defer dir.close(io);
}
}
}.impl);
@@ -380,79 +316,87 @@ test "accessAbsolute" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+ const gpa = testing.allocator;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- const base_path = try tmp.dir.realpathAlloc(testing.allocator, ".");
- defer testing.allocator.free(base_path);
+ const base_path = try tmp.dir.realPathFileAlloc(io, ".", gpa);
+ defer gpa.free(base_path);
- try fs.accessAbsolute(base_path, .{});
+ try Dir.accessAbsolute(io, base_path, .{});
}
test "openDirAbsolute" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+ const gpa = testing.allocator;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- const tmp_ino = (try tmp.dir.stat()).inode;
+ const tmp_ino = (try tmp.dir.stat(io)).inode;
- try tmp.dir.makeDir("subdir");
- const sub_path = try tmp.dir.realpathAlloc(testing.allocator, "subdir");
- defer testing.allocator.free(sub_path);
+ try tmp.dir.createDir(io, "subdir", .default_dir);
+ const sub_path = try tmp.dir.realPathFileAlloc(io, "subdir", gpa);
+ defer gpa.free(sub_path);
// Can open sub_path
- var tmp_sub = try fs.openDirAbsolute(sub_path, .{});
- defer tmp_sub.close();
+ var tmp_sub = try Dir.openDirAbsolute(io, sub_path, .{});
+ defer tmp_sub.close(io);
- const sub_ino = (try tmp_sub.stat()).inode;
+ const sub_ino = (try tmp_sub.stat(io)).inode;
{
// Can open sub_path + ".."
- const dir_path = try fs.path.join(testing.allocator, &.{ sub_path, ".." });
+ const dir_path = try Dir.path.join(testing.allocator, &.{ sub_path, ".." });
defer testing.allocator.free(dir_path);
- var dir = try fs.openDirAbsolute(dir_path, .{});
- defer dir.close();
+ var dir = try Dir.openDirAbsolute(io, dir_path, .{});
+ defer dir.close(io);
- const ino = (try dir.stat()).inode;
- try testing.expectEqual(tmp_ino, ino);
+ const ino = (try dir.stat(io)).inode;
+ try expectEqual(tmp_ino, ino);
}
{
// Can open sub_path + "."
- const dir_path = try fs.path.join(testing.allocator, &.{ sub_path, "." });
+ const dir_path = try Dir.path.join(testing.allocator, &.{ sub_path, "." });
defer testing.allocator.free(dir_path);
- var dir = try fs.openDirAbsolute(dir_path, .{});
- defer dir.close();
+ var dir = try Dir.openDirAbsolute(io, dir_path, .{});
+ defer dir.close(io);
- const ino = (try dir.stat()).inode;
- try testing.expectEqual(sub_ino, ino);
+ const ino = (try dir.stat(io)).inode;
+ try expectEqual(sub_ino, ino);
}
{
// Can open subdir + "..", with some extra "."
- const dir_path = try fs.path.join(testing.allocator, &.{ sub_path, ".", "..", "." });
+ const dir_path = try Dir.path.join(testing.allocator, &.{ sub_path, ".", "..", "." });
defer testing.allocator.free(dir_path);
- var dir = try fs.openDirAbsolute(dir_path, .{});
- defer dir.close();
+ var dir = try Dir.openDirAbsolute(io, dir_path, .{});
+ defer dir.close(io);
- const ino = (try dir.stat()).inode;
- try testing.expectEqual(tmp_ino, ino);
+ const ino = (try dir.stat(io)).inode;
+ try expectEqual(tmp_ino, ino);
}
}
test "openDir cwd parent '..'" {
- var dir = fs.cwd().openDir("..", .{}) catch |err| {
+ const io = testing.io;
+
+ var dir = Dir.cwd().openDir(io, "..", .{}) catch |err| {
if (native_os == .wasi and err == error.PermissionDenied) {
return; // This is okay. WASI disallows escaping from the fs sandbox
}
return err;
};
- defer dir.close();
+ defer dir.close(io);
}
test "openDir non-cwd parent '..'" {
@@ -461,69 +405,76 @@ test "openDir non-cwd parent '..'" {
else => {},
}
+ const io = testing.io;
+ const gpa = testing.allocator;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- var subdir = try tmp.dir.makeOpenPath("subdir", .{});
- defer subdir.close();
+ var subdir = try tmp.dir.createDirPathOpen(io, "subdir", .{});
+ defer subdir.close(io);
- var dir = try subdir.openDir("..", .{});
- defer dir.close();
+ var dir = try subdir.openDir(io, "..", .{});
+ defer dir.close(io);
- const expected_path = try tmp.dir.realpathAlloc(testing.allocator, ".");
- defer testing.allocator.free(expected_path);
+ const expected_path = try tmp.dir.realPathFileAlloc(io, ".", gpa);
+ defer gpa.free(expected_path);
- const actual_path = try dir.realpathAlloc(testing.allocator, ".");
- defer testing.allocator.free(actual_path);
+ const actual_path = try dir.realPathFileAlloc(io, ".", gpa);
+ defer gpa.free(actual_path);
- try testing.expectEqualStrings(expected_path, actual_path);
+ try expectEqualStrings(expected_path, actual_path);
}
test "readLinkAbsolute" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
// Create some targets
- try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = "nonsense" });
- try tmp.dir.makeDir("subdir");
+ try tmp.dir.writeFile(io, .{ .sub_path = "file.txt", .data = "nonsense" });
+ try tmp.dir.createDir(io, "subdir", .default_dir);
// Get base abs path
- var arena = ArenaAllocator.init(testing.allocator);
- defer arena.deinit();
- const allocator = arena.allocator();
+ var arena_allocator = ArenaAllocator.init(testing.allocator);
+ defer arena_allocator.deinit();
+ const arena = arena_allocator.allocator();
- const base_path = try tmp.dir.realpathAlloc(allocator, ".");
+ const base_path = try tmp.dir.realPathFileAlloc(io, ".", arena);
{
- const target_path = try fs.path.join(allocator, &.{ base_path, "file.txt" });
- const symlink_path = try fs.path.join(allocator, &.{ base_path, "symlink1" });
+ const target_path = try Dir.path.join(arena, &.{ base_path, "file.txt" });
+ const symlink_path = try Dir.path.join(arena, &.{ base_path, "symlink1" });
// Create symbolic link by path
- try setupSymlinkAbsolute(target_path, symlink_path, .{});
- try testReadLinkAbsolute(target_path, symlink_path);
+ try setupSymlinkAbsolute(io, target_path, symlink_path, .{});
+ try testReadLinkAbsolute(io, target_path, symlink_path);
}
{
- const target_path = try fs.path.join(allocator, &.{ base_path, "subdir" });
- const symlink_path = try fs.path.join(allocator, &.{ base_path, "symlink2" });
+ const target_path = try Dir.path.join(arena, &.{ base_path, "subdir" });
+ const symlink_path = try Dir.path.join(arena, &.{ base_path, "symlink2" });
// Create symbolic link to a directory by path
- try setupSymlinkAbsolute(target_path, symlink_path, .{ .is_directory = true });
- try testReadLinkAbsolute(target_path, symlink_path);
+ try setupSymlinkAbsolute(io, target_path, symlink_path, .{ .is_directory = true });
+ try testReadLinkAbsolute(io, target_path, symlink_path);
}
}
test "Dir.Iterator" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
// First, create a couple of entries to iterate over.
- const file = try tmp_dir.dir.createFile("some_file", .{});
- file.close();
+ const file = try tmp_dir.dir.createFile(io, "some_file", .{});
+ file.close(io);
- try tmp_dir.dir.makeDir("some_dir");
+ try tmp_dir.dir.createDir(io, "some_dir", .default_dir);
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
@@ -533,19 +484,21 @@ test "Dir.Iterator" {
// Create iterator.
var iter = tmp_dir.dir.iterate();
- while (try iter.next()) |entry| {
+ while (try iter.next(io)) |entry| {
// We cannot just store `entry` as on Windows, we're re-using the name buffer
// which means we'll actually share the `name` pointer between entries!
const name = try allocator.dupe(u8, entry.name);
- try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
+ try entries.append(Dir.Entry{ .name = name, .kind = entry.kind, .inode = 0 });
}
- try testing.expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
- try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .file }));
- try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .directory }));
+ try expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
+ try expect(contains(&entries, .{ .name = "some_file", .kind = .file, .inode = 0 }));
+ try expect(contains(&entries, .{ .name = "some_dir", .kind = .directory, .inode = 0 }));
}
test "Dir.Iterator many entries" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
@@ -554,8 +507,8 @@ test "Dir.Iterator many entries" {
var buf: [4]u8 = undefined; // Enough to store "1024".
while (i < num) : (i += 1) {
const name = try std.fmt.bufPrint(&buf, "{}", .{i});
- const file = try tmp_dir.dir.createFile(name, .{});
- file.close();
+ const file = try tmp_dir.dir.createFile(io, name, .{});
+ file.close(io);
}
var arena = ArenaAllocator.init(testing.allocator);
@@ -566,29 +519,31 @@ test "Dir.Iterator many entries" {
// Create iterator.
var iter = tmp_dir.dir.iterate();
- while (try iter.next()) |entry| {
+ while (try iter.next(io)) |entry| {
// We cannot just store `entry` as on Windows, we're re-using the name buffer
// which means we'll actually share the `name` pointer between entries!
const name = try allocator.dupe(u8, entry.name);
- try entries.append(.{ .name = name, .kind = entry.kind });
+ try entries.append(.{ .name = name, .kind = entry.kind, .inode = 0 });
}
i = 0;
while (i < num) : (i += 1) {
const name = try std.fmt.bufPrint(&buf, "{}", .{i});
- try testing.expect(contains(&entries, .{ .name = name, .kind = .file }));
+ try expect(contains(&entries, .{ .name = name, .kind = .file, .inode = 0 }));
}
}
test "Dir.Iterator twice" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
// First, create a couple of entries to iterate over.
- const file = try tmp_dir.dir.createFile("some_file", .{});
- file.close();
+ const file = try tmp_dir.dir.createFile(io, "some_file", .{});
+ file.close(io);
- try tmp_dir.dir.makeDir("some_dir");
+ try tmp_dir.dir.createDir(io, "some_dir", .default_dir);
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
@@ -600,28 +555,30 @@ test "Dir.Iterator twice" {
// Create iterator.
var iter = tmp_dir.dir.iterate();
- while (try iter.next()) |entry| {
+ while (try iter.next(io)) |entry| {
// We cannot just store `entry` as on Windows, we're re-using the name buffer
// which means we'll actually share the `name` pointer between entries!
const name = try allocator.dupe(u8, entry.name);
- try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
+ try entries.append(Dir.Entry{ .name = name, .kind = entry.kind, .inode = 0 });
}
- try testing.expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
- try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .file }));
- try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .directory }));
+ try expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
+ try expect(contains(&entries, .{ .name = "some_file", .kind = .file, .inode = 0 }));
+ try expect(contains(&entries, .{ .name = "some_dir", .kind = .directory, .inode = 0 }));
}
}
test "Dir.Iterator reset" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
// First, create a couple of entries to iterate over.
- const file = try tmp_dir.dir.createFile("some_file", .{});
- file.close();
+ const file = try tmp_dir.dir.createFile(io, "some_file", .{});
+ file.close(io);
- try tmp_dir.dir.makeDir("some_dir");
+ try tmp_dir.dir.createDir(io, "some_dir", .default_dir);
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
@@ -634,48 +591,45 @@ test "Dir.Iterator reset" {
while (i < 2) : (i += 1) {
var entries = std.array_list.Managed(Dir.Entry).init(allocator);
- while (try iter.next()) |entry| {
+ while (try iter.next(io)) |entry| {
// We cannot just store `entry` as on Windows, we're re-using the name buffer
// which means we'll actually share the `name` pointer between entries!
const name = try allocator.dupe(u8, entry.name);
- try entries.append(.{ .name = name, .kind = entry.kind });
+ try entries.append(.{ .name = name, .kind = entry.kind, .inode = 0 });
}
- try testing.expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
- try testing.expect(contains(&entries, .{ .name = "some_file", .kind = .file }));
- try testing.expect(contains(&entries, .{ .name = "some_dir", .kind = .directory }));
+ try expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
+ try expect(contains(&entries, .{ .name = "some_file", .kind = .file, .inode = 0 }));
+ try expect(contains(&entries, .{ .name = "some_dir", .kind = .directory, .inode = 0 }));
- iter.reset();
+ iter.reader.reset();
}
}
test "Dir.Iterator but dir is deleted during iteration" {
+ const io = testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
// Create directory and setup an iterator for it
- var subdir = try tmp.dir.makeOpenPath("subdir", .{ .iterate = true });
- defer subdir.close();
+ var subdir = try tmp.dir.createDirPathOpen(io, "subdir", .{ .open_options = .{ .iterate = true } });
+ defer subdir.close(io);
var iterator = subdir.iterate();
// Create something to iterate over within the subdir
- try tmp.dir.makePath("subdir" ++ fs.path.sep_str ++ "b");
+ try tmp.dir.createDirPath(io, "subdir" ++ Dir.path.sep_str ++ "b");
// Then, before iterating, delete the directory that we're iterating.
// This is a contrived reproduction, but this could happen outside of the program, in another thread, etc.
// If we get an error while trying to delete, we can skip this test (this will happen on platforms
// like Windows which will give FileBusy if the directory is currently open for iteration).
- tmp.dir.deleteTree("subdir") catch return error.SkipZigTest;
+ tmp.dir.deleteTree(io, "subdir") catch return error.SkipZigTest;
// Now, when we try to iterate, the next call should return null immediately.
- const entry = try iterator.next();
- try std.testing.expect(entry == null);
-
- // On Linux, we can opt-in to receiving a more specific error by calling `nextLinux`
- if (native_os == .linux) {
- try std.testing.expectError(error.DirNotFound, iterator.nextLinux());
- }
+ const entry = try iterator.next(io);
+ try testing.expect(entry == null);
}
fn entryEql(lhs: Dir.Entry, rhs: Dir.Entry) bool {
@@ -689,112 +643,122 @@ fn contains(entries: *const std.array_list.Managed(Dir.Entry), el: Dir.Entry) bo
return false;
}
-test "Dir.realpath smoke test" {
- if (!comptime std.os.isGetFdPathSupportedOnTarget(builtin.os)) return error.SkipZigTest;
+test "Dir.realPath smoke test" {
+ if (native_os == .wasi) return error.SkipZigTest;
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
- const allocator = ctx.arena.allocator();
+ const io = ctx.io;
+ const arena = ctx.arena.allocator();
const test_file_path = try ctx.transformPath("test_file");
const test_dir_path = try ctx.transformPath("test_dir");
- var buf: [fs.max_path_bytes]u8 = undefined;
+ var buf: [Dir.max_path_bytes]u8 = undefined;
// FileNotFound if the path doesn't exist
- try testing.expectError(error.FileNotFound, ctx.dir.realpathAlloc(allocator, test_file_path));
- try testing.expectError(error.FileNotFound, ctx.dir.realpath(test_file_path, &buf));
- try testing.expectError(error.FileNotFound, ctx.dir.realpathAlloc(allocator, test_dir_path));
- try testing.expectError(error.FileNotFound, ctx.dir.realpath(test_dir_path, &buf));
+ try expectError(error.FileNotFound, ctx.dir.realPathFileAlloc(io, test_file_path, arena));
+ try expectError(error.FileNotFound, ctx.dir.realPathFile(io, test_file_path, &buf));
+ try expectError(error.FileNotFound, ctx.dir.realPathFileAlloc(io, test_dir_path, arena));
+ try expectError(error.FileNotFound, ctx.dir.realPathFile(io, test_dir_path, &buf));
// Now create the file and dir
- try ctx.dir.writeFile(.{ .sub_path = test_file_path, .data = "" });
- try ctx.dir.makeDir(test_dir_path);
+ try ctx.dir.writeFile(io, .{ .sub_path = test_file_path, .data = "" });
+ try ctx.dir.createDir(io, test_dir_path, .default_dir);
const base_path = try ctx.transformPath(".");
- const base_realpath = try ctx.dir.realpathAlloc(allocator, base_path);
- const expected_file_path = try fs.path.join(
- allocator,
- &.{ base_realpath, "test_file" },
- );
- const expected_dir_path = try fs.path.join(
- allocator,
- &.{ base_realpath, "test_dir" },
- );
+ const base_realpath = try ctx.dir.realPathFileAlloc(io, base_path, arena);
+ const expected_file_path = try Dir.path.join(arena, &.{ base_realpath, "test_file" });
+ const expected_dir_path = try Dir.path.join(arena, &.{ base_realpath, "test_dir" });
// First, test non-alloc version
{
- const file_path = try ctx.dir.realpath(test_file_path, &buf);
- try testing.expectEqualStrings(expected_file_path, file_path);
+ const file_path = buf[0..try ctx.dir.realPathFile(io, test_file_path, &buf)];
+ try expectEqualStrings(expected_file_path, file_path);
- const dir_path = try ctx.dir.realpath(test_dir_path, &buf);
- try testing.expectEqualStrings(expected_dir_path, dir_path);
+ const dir_path = buf[0..try ctx.dir.realPathFile(io, test_dir_path, &buf)];
+ try expectEqualStrings(expected_dir_path, dir_path);
}
// Next, test alloc version
{
- const file_path = try ctx.dir.realpathAlloc(allocator, test_file_path);
- try testing.expectEqualStrings(expected_file_path, file_path);
+ const file_path = try ctx.dir.realPathFileAlloc(io, test_file_path, arena);
+ try expectEqualStrings(expected_file_path, file_path);
- const dir_path = try ctx.dir.realpathAlloc(allocator, test_dir_path);
- try testing.expectEqualStrings(expected_dir_path, dir_path);
+ const dir_path = try ctx.dir.realPathFileAlloc(io, test_dir_path, arena);
+ try expectEqualStrings(expected_dir_path, dir_path);
}
}
}.impl);
}
test "readFileAlloc" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
- var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
- defer file.close();
+ var file = try tmp_dir.dir.createFile(io, "test_file", .{ .read = true });
+ defer file.close(io);
- const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024));
+ const buf1 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(1024));
defer testing.allocator.free(buf1);
- try testing.expectEqualStrings("", buf1);
+ try expectEqualStrings("", buf1);
const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
- try file.writeAll(write_buf);
+ try file.writeStreamingAll(io, write_buf);
{
// max_bytes > file_size
- const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024));
+ const buf2 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(1024));
defer testing.allocator.free(buf2);
- try testing.expectEqualStrings(write_buf, buf2);
+ try expectEqualStrings(write_buf, buf2);
}
{
// max_bytes == file_size
- try testing.expectError(
+ try expectError(
error.StreamTooLong,
- tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len)),
+ tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len)),
);
}
{
// max_bytes == file_size + 1
- const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len + 1));
+ const buf2 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len + 1));
defer testing.allocator.free(buf2);
- try testing.expectEqualStrings(write_buf, buf2);
+ try expectEqualStrings(write_buf, buf2);
}
// max_bytes < file_size
- try testing.expectError(
+ try expectError(
error.StreamTooLong,
- tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len - 1)),
+ tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len - 1)),
);
}
test "Dir.statFile" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
- const test_file_name = try ctx.transformPath("test_file");
+ const io = ctx.io;
+ {
+ const test_file_name = try ctx.transformPath("test_file");
- try testing.expectError(error.FileNotFound, ctx.dir.statFile(test_file_name));
+ try expectError(error.FileNotFound, ctx.dir.statFile(io, test_file_name, .{}));
- try ctx.dir.writeFile(.{ .sub_path = test_file_name, .data = "" });
+ try ctx.dir.writeFile(io, .{ .sub_path = test_file_name, .data = "" });
- const stat = try ctx.dir.statFile(test_file_name);
- try testing.expectEqual(File.Kind.file, stat.kind);
+ const stat = try ctx.dir.statFile(io, test_file_name, .{});
+ try expectEqual(.file, stat.kind);
+ }
+ {
+ const test_dir_name = try ctx.transformPath("test_dir");
+
+ try expectError(error.FileNotFound, ctx.dir.statFile(io, test_dir_name, .{}));
+
+ try ctx.dir.createDir(io, test_dir_name, .default_dir);
+
+ const stat = try ctx.dir.statFile(io, test_dir_name, .{});
+ try expectEqual(.directory, stat.kind);
+ }
}
}.impl);
}
@@ -802,12 +766,13 @@ test "Dir.statFile" {
test "statFile on dangling symlink" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const symlink_name = try ctx.transformPath("dangling-symlink");
- const symlink_target = "." ++ fs.path.sep_str ++ "doesnotexist";
+ const symlink_target = "." ++ Dir.path.sep_str ++ "doesnotexist";
- try setupSymlink(ctx.dir, symlink_target, symlink_name, .{});
+ try setupSymlink(io, ctx.dir, symlink_target, symlink_name, .{});
- try std.testing.expectError(error.FileNotFound, ctx.dir.statFile(symlink_name));
+ try expectError(error.FileNotFound, ctx.dir.statFile(io, symlink_name, .{}));
}
}.impl);
}
@@ -815,25 +780,27 @@ test "statFile on dangling symlink" {
test "directory operations on files" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
+
const test_file_name = try ctx.transformPath("test_file");
- var file = try ctx.dir.createFile(test_file_name, .{ .read = true });
- file.close();
+ var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true });
+ file.close(io);
- try testing.expectError(error.PathAlreadyExists, ctx.dir.makeDir(test_file_name));
- try testing.expectError(error.NotDir, ctx.dir.openDir(test_file_name, .{}));
- try testing.expectError(error.NotDir, ctx.dir.deleteDir(test_file_name));
+ try expectError(error.PathAlreadyExists, ctx.dir.createDir(io, test_file_name, .default_dir));
+ try expectError(error.NotDir, ctx.dir.openDir(io, test_file_name, .{}));
+ try expectError(error.NotDir, ctx.dir.deleteDir(io, test_file_name));
if (ctx.path_type == .absolute and comptime PathType.absolute.isSupported(builtin.os)) {
- try testing.expectError(error.PathAlreadyExists, fs.makeDirAbsolute(test_file_name));
- try testing.expectError(error.NotDir, fs.deleteDirAbsolute(test_file_name));
+ try expectError(error.PathAlreadyExists, Dir.createDirAbsolute(io, test_file_name, .default_dir));
+ try expectError(error.NotDir, Dir.deleteDirAbsolute(io, test_file_name));
}
// ensure the file still exists and is a file as a sanity check
- file = try ctx.dir.openFile(test_file_name, .{});
- const stat = try file.stat();
- try testing.expectEqual(File.Kind.file, stat.kind);
- file.close();
+ file = try ctx.dir.openFile(io, test_file_name, .{});
+ const stat = try file.stat(io);
+ try expectEqual(File.Kind.file, stat.kind);
+ file.close(io);
}
}.impl);
}
@@ -842,81 +809,91 @@ test "file operations on directories" {
// TODO: fix this test on FreeBSD. https://github.com/ziglang/zig/issues/1759
if (native_os == .freebsd) return error.SkipZigTest;
+ const io = testing.io;
+
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_dir_name = try ctx.transformPath("test_dir");
- try ctx.dir.makeDir(test_dir_name);
+ try ctx.dir.createDir(io, test_dir_name, .default_dir);
- try testing.expectError(error.IsDir, ctx.dir.createFile(test_dir_name, .{}));
- try testing.expectError(error.IsDir, ctx.dir.deleteFile(test_dir_name));
+ try expectError(error.IsDir, ctx.dir.createFile(io, test_dir_name, .{}));
+ try expectError(error.IsDir, ctx.dir.deleteFile(io, test_dir_name));
switch (native_os) {
.dragonfly, .netbsd => {
// no error when reading a directory. See https://github.com/ziglang/zig/issues/5732
- const buf = try ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited);
+ const buf = try ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited);
testing.allocator.free(buf);
},
.wasi => {
// WASI return EBADF, which gets mapped to NotOpenForReading.
// See https://github.com/bytecodealliance/wasmtime/issues/1935
- try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited));
+ try expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited));
},
else => {
- try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited));
+ try expectError(error.IsDir, ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited));
},
}
if (native_os == .wasi and builtin.link_libc) {
// wasmtime unexpectedly succeeds here, see https://github.com/ziglang/zig/issues/20747
- const handle = try ctx.dir.openFile(test_dir_name, .{ .mode = .read_write });
- handle.close();
+ const handle = try ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write });
+ handle.close(io);
} else {
// Note: The `.mode = .read_write` is necessary to ensure the error occurs on all platforms.
- // TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732
- try testing.expectError(error.IsDir, ctx.dir.openFile(test_dir_name, .{ .mode = .read_write }));
+ try expectError(error.IsDir, ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write }));
}
+ {
+ const handle = try ctx.dir.openFile(io, test_dir_name, .{ .allow_directory = true, .mode = .read_only });
+ handle.close(io);
+ }
+ try expectError(error.IsDir, ctx.dir.openFile(io, test_dir_name, .{ .allow_directory = false, .mode = .read_only }));
+
if (ctx.path_type == .absolute and comptime PathType.absolute.isSupported(builtin.os)) {
- try testing.expectError(error.IsDir, fs.createFileAbsolute(test_dir_name, .{}));
- try testing.expectError(error.IsDir, fs.deleteFileAbsolute(test_dir_name));
+ try expectError(error.IsDir, Dir.createFileAbsolute(io, test_dir_name, .{}));
+ try expectError(error.IsDir, Dir.deleteFileAbsolute(io, test_dir_name));
}
// ensure the directory still exists as a sanity check
- var dir = try ctx.dir.openDir(test_dir_name, .{});
- dir.close();
+ var dir = try ctx.dir.openDir(io, test_dir_name, .{});
+ dir.close(io);
}
}.impl);
}
-test "makeOpenPath parent dirs do not exist" {
+test "createDirPathOpen parent dirs do not exist" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
- var dir = try tmp_dir.dir.makeOpenPath("root_dir/parent_dir/some_dir", .{});
- dir.close();
+ var dir = try tmp_dir.dir.createDirPathOpen(io, "root_dir/parent_dir/some_dir", .{});
+ dir.close(io);
// double check that the full directory structure was created
- var dir_verification = try tmp_dir.dir.openDir("root_dir/parent_dir/some_dir", .{});
- dir_verification.close();
+ var dir_verification = try tmp_dir.dir.openDir(io, "root_dir/parent_dir/some_dir", .{});
+ dir_verification.close(io);
}
test "deleteDir" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const test_dir_path = try ctx.transformPath("test_dir");
- const test_file_path = try ctx.transformPath("test_dir" ++ fs.path.sep_str ++ "test_file");
+ const test_file_path = try ctx.transformPath("test_dir" ++ Dir.path.sep_str ++ "test_file");
// deleting a non-existent directory
- try testing.expectError(error.FileNotFound, ctx.dir.deleteDir(test_dir_path));
+ try expectError(error.FileNotFound, ctx.dir.deleteDir(io, test_dir_path));
// deleting a non-empty directory
- try ctx.dir.makeDir(test_dir_path);
- try ctx.dir.writeFile(.{ .sub_path = test_file_path, .data = "" });
- try testing.expectError(error.DirNotEmpty, ctx.dir.deleteDir(test_dir_path));
+ try ctx.dir.createDir(io, test_dir_path, .default_dir);
+ try ctx.dir.writeFile(io, .{ .sub_path = test_file_path, .data = "" });
+ try expectError(error.DirNotEmpty, ctx.dir.deleteDir(io, test_dir_path));
// deleting an empty directory
- try ctx.dir.deleteFile(test_file_path);
- try ctx.dir.deleteDir(test_dir_path);
+ try ctx.dir.deleteFile(io, test_file_path);
+ try ctx.dir.deleteDir(io, test_dir_path);
}
}.impl);
}
@@ -924,6 +901,7 @@ test "deleteDir" {
test "Dir.rename files" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
// Rename on Windows can hit intermittent AccessDenied errors
// when certain conditions are true about the host system.
// For now, skip this test when the path type is UNC to avoid them.
@@ -933,32 +911,32 @@ test "Dir.rename files" {
const missing_file_path = try ctx.transformPath("missing_file_name");
const something_else_path = try ctx.transformPath("something_else");
- try testing.expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, something_else_path));
+ try expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, ctx.dir, something_else_path, io));
// Renaming files
const test_file_name = try ctx.transformPath("test_file");
const renamed_test_file_name = try ctx.transformPath("test_file_renamed");
- var file = try ctx.dir.createFile(test_file_name, .{ .read = true });
- file.close();
- try ctx.dir.rename(test_file_name, renamed_test_file_name);
+ var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true });
+ file.close(io);
+ try ctx.dir.rename(test_file_name, ctx.dir, renamed_test_file_name, io);
// Ensure the file was renamed
- try testing.expectError(error.FileNotFound, ctx.dir.openFile(test_file_name, .{}));
- file = try ctx.dir.openFile(renamed_test_file_name, .{});
- file.close();
+ try expectError(error.FileNotFound, ctx.dir.openFile(io, test_file_name, .{}));
+ file = try ctx.dir.openFile(io, renamed_test_file_name, .{});
+ file.close(io);
// Rename to self succeeds
- try ctx.dir.rename(renamed_test_file_name, renamed_test_file_name);
+ try ctx.dir.rename(renamed_test_file_name, ctx.dir, renamed_test_file_name, io);
// Rename to existing file succeeds
const existing_file_path = try ctx.transformPath("existing_file");
- var existing_file = try ctx.dir.createFile(existing_file_path, .{ .read = true });
- existing_file.close();
- try ctx.dir.rename(renamed_test_file_name, existing_file_path);
+ var existing_file = try ctx.dir.createFile(io, existing_file_path, .{ .read = true });
+ existing_file.close(io);
+ try ctx.dir.rename(renamed_test_file_name, ctx.dir, existing_file_path, io);
- try testing.expectError(error.FileNotFound, ctx.dir.openFile(renamed_test_file_name, .{}));
- file = try ctx.dir.openFile(existing_file_path, .{});
- file.close();
+ try expectError(error.FileNotFound, ctx.dir.openFile(io, renamed_test_file_name, .{}));
+ file = try ctx.dir.openFile(io, existing_file_path, .{});
+ file.close(io);
}
}.impl);
}
@@ -966,6 +944,8 @@ test "Dir.rename files" {
test "Dir.rename directories" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
+
// Rename on Windows can hit intermittent AccessDenied errors
// when certain conditions are true about the host system.
// For now, skip this test when the path type is UNC to avoid them.
@@ -976,27 +956,27 @@ test "Dir.rename directories" {
const test_dir_renamed_path = try ctx.transformPath("test_dir_renamed");
// Renaming directories
- try ctx.dir.makeDir(test_dir_path);
- try ctx.dir.rename(test_dir_path, test_dir_renamed_path);
+ try ctx.dir.createDir(io, test_dir_path, .default_dir);
+ try ctx.dir.rename(test_dir_path, ctx.dir, test_dir_renamed_path, io);
// Ensure the directory was renamed
- try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_path, .{}));
- var dir = try ctx.dir.openDir(test_dir_renamed_path, .{});
+ try expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{}));
+ var dir = try ctx.dir.openDir(io, test_dir_renamed_path, .{});
// Put a file in the directory
- var file = try dir.createFile("test_file", .{ .read = true });
- file.close();
- dir.close();
+ var file = try dir.createFile(io, "test_file", .{ .read = true });
+ file.close(io);
+ dir.close(io);
const test_dir_renamed_again_path = try ctx.transformPath("test_dir_renamed_again");
- try ctx.dir.rename(test_dir_renamed_path, test_dir_renamed_again_path);
+ try ctx.dir.rename(test_dir_renamed_path, ctx.dir, test_dir_renamed_again_path, io);
// Ensure the directory was renamed and the file still exists in it
- try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_renamed_path, .{}));
- dir = try ctx.dir.openDir(test_dir_renamed_again_path, .{});
- file = try dir.openFile("test_file", .{});
- file.close();
- dir.close();
+ try expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_renamed_path, .{}));
+ dir = try ctx.dir.openDir(io, test_dir_renamed_again_path, .{});
+ file = try dir.openFile(io, "test_file", .{});
+ file.close(io);
+ dir.close(io);
}
}.impl);
}
@@ -1007,17 +987,19 @@ test "Dir.rename directory onto empty dir" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
+
const test_dir_path = try ctx.transformPath("test_dir");
const target_dir_path = try ctx.transformPath("target_dir_path");
- try ctx.dir.makeDir(test_dir_path);
- try ctx.dir.makeDir(target_dir_path);
- try ctx.dir.rename(test_dir_path, target_dir_path);
+ try ctx.dir.createDir(io, test_dir_path, .default_dir);
+ try ctx.dir.createDir(io, target_dir_path, .default_dir);
+ try ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io);
// Ensure the directory was renamed
- try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_path, .{}));
- var dir = try ctx.dir.openDir(target_dir_path, .{});
- dir.close();
+ try expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{}));
+ var dir = try ctx.dir.openDir(io, target_dir_path, .{});
+ dir.close(io);
}
}.impl);
}
@@ -1028,22 +1010,23 @@ test "Dir.rename directory onto non-empty dir" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const test_dir_path = try ctx.transformPath("test_dir");
const target_dir_path = try ctx.transformPath("target_dir_path");
- try ctx.dir.makeDir(test_dir_path);
+ try ctx.dir.createDir(io, test_dir_path, .default_dir);
- var target_dir = try ctx.dir.makeOpenPath(target_dir_path, .{});
- var file = try target_dir.createFile("test_file", .{ .read = true });
- file.close();
- target_dir.close();
+ var target_dir = try ctx.dir.createDirPathOpen(io, target_dir_path, .{});
+ var file = try target_dir.createFile(io, "test_file", .{ .read = true });
+ file.close(io);
+ target_dir.close(io);
// Rename should fail with PathAlreadyExists if target_dir is non-empty
- try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, target_dir_path));
+ try expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io));
// Ensure the directory was not renamed
- var dir = try ctx.dir.openDir(test_dir_path, .{});
- dir.close();
+ var dir = try ctx.dir.openDir(io, test_dir_path, .{});
+ dir.close(io);
}
}.impl);
}
@@ -1054,19 +1037,22 @@ test "Dir.rename file <-> dir" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const test_file_path = try ctx.transformPath("test_file");
const test_dir_path = try ctx.transformPath("test_dir");
- var file = try ctx.dir.createFile(test_file_path, .{ .read = true });
- file.close();
- try ctx.dir.makeDir(test_dir_path);
- try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path));
- try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, test_file_path));
+ var file = try ctx.dir.createFile(io, test_file_path, .{ .read = true });
+ file.close(io);
+ try ctx.dir.createDir(io, test_dir_path, .default_dir);
+ try expectError(error.IsDir, ctx.dir.rename(test_file_path, ctx.dir, test_dir_path, io));
+ try expectError(error.NotDir, ctx.dir.rename(test_dir_path, ctx.dir, test_file_path, io));
}
}.impl);
}
test "rename" {
+ const io = testing.io;
+
var tmp_dir1 = tmpDir(.{});
defer tmp_dir1.cleanup();
@@ -1076,20 +1062,22 @@ test "rename" {
// Renaming files
const test_file_name = "test_file";
const renamed_test_file_name = "test_file_renamed";
- var file = try tmp_dir1.dir.createFile(test_file_name, .{ .read = true });
- file.close();
- try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name);
+ var file = try tmp_dir1.dir.createFile(io, test_file_name, .{ .read = true });
+ file.close(io);
+ try Dir.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name, io);
// ensure the file was renamed
- try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(test_file_name, .{}));
- file = try tmp_dir2.dir.openFile(renamed_test_file_name, .{});
- file.close();
+ try expectError(error.FileNotFound, tmp_dir1.dir.openFile(io, test_file_name, .{}));
+ file = try tmp_dir2.dir.openFile(io, renamed_test_file_name, .{});
+ file.close(io);
}
test "renameAbsolute" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
@@ -1098,289 +1086,359 @@ test "renameAbsolute" {
defer arena.deinit();
const allocator = arena.allocator();
- const base_path = try tmp_dir.dir.realpathAlloc(allocator, ".");
+ const base_path = try tmp_dir.dir.realPathFileAlloc(io, ".", allocator);
- try testing.expectError(error.FileNotFound, fs.renameAbsolute(
- try fs.path.join(allocator, &.{ base_path, "missing_file_name" }),
- try fs.path.join(allocator, &.{ base_path, "something_else" }),
+ try expectError(error.FileNotFound, Dir.renameAbsolute(
+ try Dir.path.join(allocator, &.{ base_path, "missing_file_name" }),
+ try Dir.path.join(allocator, &.{ base_path, "something_else" }),
+ io,
));
// Renaming files
const test_file_name = "test_file";
const renamed_test_file_name = "test_file_renamed";
- var file = try tmp_dir.dir.createFile(test_file_name, .{ .read = true });
- file.close();
- try fs.renameAbsolute(
- try fs.path.join(allocator, &.{ base_path, test_file_name }),
- try fs.path.join(allocator, &.{ base_path, renamed_test_file_name }),
+ var file = try tmp_dir.dir.createFile(io, test_file_name, .{ .read = true });
+ file.close(io);
+ try Dir.renameAbsolute(
+ try Dir.path.join(allocator, &.{ base_path, test_file_name }),
+ try Dir.path.join(allocator, &.{ base_path, renamed_test_file_name }),
+ io,
);
// ensure the file was renamed
- try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(test_file_name, .{}));
- file = try tmp_dir.dir.openFile(renamed_test_file_name, .{});
- const stat = try file.stat();
- try testing.expectEqual(File.Kind.file, stat.kind);
- file.close();
+ try expectError(error.FileNotFound, tmp_dir.dir.openFile(io, test_file_name, .{}));
+ file = try tmp_dir.dir.openFile(io, renamed_test_file_name, .{});
+ const stat = try file.stat(io);
+ try expectEqual(File.Kind.file, stat.kind);
+ file.close(io);
// Renaming directories
const test_dir_name = "test_dir";
const renamed_test_dir_name = "test_dir_renamed";
- try tmp_dir.dir.makeDir(test_dir_name);
- try fs.renameAbsolute(
- try fs.path.join(allocator, &.{ base_path, test_dir_name }),
- try fs.path.join(allocator, &.{ base_path, renamed_test_dir_name }),
+ try tmp_dir.dir.createDir(io, test_dir_name, .default_dir);
+ try Dir.renameAbsolute(
+ try Dir.path.join(allocator, &.{ base_path, test_dir_name }),
+ try Dir.path.join(allocator, &.{ base_path, renamed_test_dir_name }),
+ io,
);
// ensure the directory was renamed
- try testing.expectError(error.FileNotFound, tmp_dir.dir.openDir(test_dir_name, .{}));
- var dir = try tmp_dir.dir.openDir(renamed_test_dir_name, .{});
- dir.close();
+ try expectError(error.FileNotFound, tmp_dir.dir.openDir(io, test_dir_name, .{}));
+ var dir = try tmp_dir.dir.openDir(io, renamed_test_dir_name, .{});
+ dir.close(io);
}
-test "openSelfExe" {
+test "openExecutable" {
if (native_os == .wasi) return error.SkipZigTest;
- const self_exe_file = try std.fs.openSelfExe(.{});
- self_exe_file.close();
+ const io = testing.io;
+
+ const self_exe_file = try std.process.openExecutable(io, .{});
+ self_exe_file.close(io);
}
-test "selfExePath" {
+test "executablePath" {
if (native_os == .wasi) return error.SkipZigTest;
- var buf: [fs.max_path_bytes]u8 = undefined;
- const buf_self_exe_path = try std.fs.selfExePath(&buf);
- const alloc_self_exe_path = try std.fs.selfExePathAlloc(testing.allocator);
+ const io = testing.io;
+ var buf: [Dir.max_path_bytes]u8 = undefined;
+ const len = try std.process.executablePath(io, &buf);
+ const buf_self_exe_path = buf[0..len];
+ const alloc_self_exe_path = try std.process.executablePathAlloc(io, testing.allocator);
defer testing.allocator.free(alloc_self_exe_path);
- try testing.expectEqualSlices(u8, buf_self_exe_path, alloc_self_exe_path);
+ try expectEqualSlices(u8, buf_self_exe_path, alloc_self_exe_path);
}
test "deleteTree does not follow symlinks" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.makePath("b");
+ try tmp.dir.createDirPath(io, "b");
{
- var a = try tmp.dir.makeOpenPath("a", .{});
- defer a.close();
+ var a = try tmp.dir.createDirPathOpen(io, "a", .{});
+ defer a.close(io);
- try setupSymlink(a, "../b", "b", .{ .is_directory = true });
+ try setupSymlink(io, a, "../b", "b", .{ .is_directory = true });
}
- try tmp.dir.deleteTree("a");
+ try tmp.dir.deleteTree(io, "a");
- try testing.expectError(error.FileNotFound, tmp.dir.access("a", .{}));
- try tmp.dir.access("b", .{});
+ try expectError(error.FileNotFound, tmp.dir.access(io, "a", .{}));
+ try tmp.dir.access(io, "b", .{});
}
test "deleteTree on a symlink" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
// Symlink to a file
- try tmp.dir.writeFile(.{ .sub_path = "file", .data = "" });
- try setupSymlink(tmp.dir, "file", "filelink", .{});
+ try tmp.dir.writeFile(io, .{ .sub_path = "file", .data = "" });
+ try setupSymlink(io, tmp.dir, "file", "filelink", .{});
- try tmp.dir.deleteTree("filelink");
- try testing.expectError(error.FileNotFound, tmp.dir.access("filelink", .{}));
- try tmp.dir.access("file", .{});
+ try tmp.dir.deleteTree(io, "filelink");
+ try expectError(error.FileNotFound, tmp.dir.access(io, "filelink", .{}));
+ try tmp.dir.access(io, "file", .{});
// Symlink to a directory
- try tmp.dir.makePath("dir");
- try setupSymlink(tmp.dir, "dir", "dirlink", .{ .is_directory = true });
+ try tmp.dir.createDirPath(io, "dir");
+ try setupSymlink(io, tmp.dir, "dir", "dirlink", .{ .is_directory = true });
- try tmp.dir.deleteTree("dirlink");
- try testing.expectError(error.FileNotFound, tmp.dir.access("dirlink", .{}));
- try tmp.dir.access("dir", .{});
+ try tmp.dir.deleteTree(io, "dirlink");
+ try expectError(error.FileNotFound, tmp.dir.access(io, "dirlink", .{}));
+ try tmp.dir.access(io, "dir", .{});
}
-test "makePath, put some files in it, deleteTree" {
+test "createDirPath, put some files in it, deleteTree" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const allocator = ctx.arena.allocator();
const dir_path = try ctx.transformPath("os_test_tmp");
- try ctx.dir.makePath(try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c" }));
- try ctx.dir.writeFile(.{
- .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }),
+ try ctx.dir.createDirPath(io, try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c" }));
+ try ctx.dir.writeFile(io, .{
+ .sub_path = try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }),
.data = "nonsense",
});
- try ctx.dir.writeFile(.{
- .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }),
+ try ctx.dir.writeFile(io, .{
+ .sub_path = try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }),
.data = "blah",
});
- try ctx.dir.deleteTree(dir_path);
- try testing.expectError(error.FileNotFound, ctx.dir.openDir(dir_path, .{}));
+ try ctx.dir.deleteTree(io, dir_path);
+ try expectError(error.FileNotFound, ctx.dir.openDir(io, dir_path, .{}));
}
}.impl);
}
-test "makePath, put some files in it, deleteTreeMinStackSize" {
+test "createDirPath, put some files in it, deleteTreeMinStackSize" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const allocator = ctx.arena.allocator();
const dir_path = try ctx.transformPath("os_test_tmp");
- try ctx.dir.makePath(try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c" }));
- try ctx.dir.writeFile(.{
- .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }),
+ try ctx.dir.createDirPath(io, try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c" }));
+ try ctx.dir.writeFile(io, .{
+ .sub_path = try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }),
.data = "nonsense",
});
- try ctx.dir.writeFile(.{
- .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }),
+ try ctx.dir.writeFile(io, .{
+ .sub_path = try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }),
.data = "blah",
});
- try ctx.dir.deleteTreeMinStackSize(dir_path);
- try testing.expectError(error.FileNotFound, ctx.dir.openDir(dir_path, .{}));
+ try ctx.dir.deleteTreeMinStackSize(io, dir_path);
+ try expectError(error.FileNotFound, ctx.dir.openDir(io, dir_path, .{}));
}
}.impl);
}
-test "makePath in a directory that no longer exists" {
+test "createDirPath in a directory that no longer exists" {
if (native_os == .windows) return error.SkipZigTest; // Windows returns FileBusy if attempting to remove an open dir
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.parent_dir.deleteTree(&tmp.sub_path);
+ try tmp.parent_dir.deleteTree(io, &tmp.sub_path);
- try testing.expectError(error.FileNotFound, tmp.dir.makePath("sub-path"));
+ try expectError(error.FileNotFound, tmp.dir.createDirPath(io, "sub-path"));
}
-test "makePath but sub_path contains pre-existing file" {
+test "createDirPath but sub_path contains pre-existing file" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.makeDir("foo");
- try tmp.dir.writeFile(.{ .sub_path = "foo/bar", .data = "" });
+ try tmp.dir.createDir(io, "foo", .default_dir);
+ try tmp.dir.writeFile(io, .{ .sub_path = "foo/bar", .data = "" });
- try testing.expectError(error.NotDir, tmp.dir.makePath("foo/bar/baz"));
+ try expectError(error.NotDir, tmp.dir.createDirPath(io, "foo/bar/baz"));
}
-fn expectDir(dir: Dir, path: []const u8) !void {
- var d = try dir.openDir(path, .{});
- d.close();
+fn expectDir(io: Io, dir: Dir, path: []const u8) !void {
+ var d = try dir.openDir(io, path, .{});
+ d.close(io);
}
test "makepath existing directories" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.makeDir("A");
- var tmpA = try tmp.dir.openDir("A", .{});
- defer tmpA.close();
- try tmpA.makeDir("B");
+ try tmp.dir.createDir(io, "A", .default_dir);
+ var tmpA = try tmp.dir.openDir(io, "A", .{});
+ defer tmpA.close(io);
+ try tmpA.createDir(io, "B", .default_dir);
- const testPath = "A" ++ fs.path.sep_str ++ "B" ++ fs.path.sep_str ++ "C";
- try tmp.dir.makePath(testPath);
+ const testPath = "A" ++ Dir.path.sep_str ++ "B" ++ Dir.path.sep_str ++ "C";
+ try tmp.dir.createDirPath(io, testPath);
- try expectDir(tmp.dir, testPath);
+ try expectDir(io, tmp.dir, testPath);
}
test "makepath through existing valid symlink" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.makeDir("realfolder");
- try setupSymlink(tmp.dir, "." ++ fs.path.sep_str ++ "realfolder", "working-symlink", .{});
+ try tmp.dir.createDir(io, "realfolder", .default_dir);
+ try setupSymlink(io, tmp.dir, "." ++ Dir.path.sep_str ++ "realfolder", "working-symlink", .{});
- try tmp.dir.makePath("working-symlink" ++ fs.path.sep_str ++ "in-realfolder");
+ try tmp.dir.createDirPath(io, "working-symlink" ++ Dir.path.sep_str ++ "in-realfolder");
- try expectDir(tmp.dir, "realfolder" ++ fs.path.sep_str ++ "in-realfolder");
+ try expectDir(io, tmp.dir, "realfolder" ++ Dir.path.sep_str ++ "in-realfolder");
}
test "makepath relative walks" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- const relPath = try fs.path.join(testing.allocator, &.{
+ const relPath = try Dir.path.join(testing.allocator, &.{
"first", "..", "second", "..", "third", "..", "first", "A", "..", "B", "..", "C",
});
defer testing.allocator.free(relPath);
- try tmp.dir.makePath(relPath);
+ try tmp.dir.createDirPath(io, relPath);
// How .. is handled is different on Windows than non-Windows
switch (native_os) {
.windows => {
// On Windows, .. is resolved before passing the path to NtCreateFile,
// meaning everything except `first/C` drops out.
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "C");
- try testing.expectError(error.FileNotFound, tmp.dir.access("second", .{}));
- try testing.expectError(error.FileNotFound, tmp.dir.access("third", .{}));
+ try expectDir(io, tmp.dir, "first" ++ Dir.path.sep_str ++ "C");
+ try expectError(error.FileNotFound, tmp.dir.access(io, "second", .{}));
+ try expectError(error.FileNotFound, tmp.dir.access(io, "third", .{}));
},
else => {
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "A");
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "B");
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "C");
- try expectDir(tmp.dir, "second");
- try expectDir(tmp.dir, "third");
+ try expectDir(io, tmp.dir, "first" ++ Dir.path.sep_str ++ "A");
+ try expectDir(io, tmp.dir, "first" ++ Dir.path.sep_str ++ "B");
+ try expectDir(io, tmp.dir, "first" ++ Dir.path.sep_str ++ "C");
+ try expectDir(io, tmp.dir, "second");
+ try expectDir(io, tmp.dir, "third");
},
}
}
test "makepath ignores '.'" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
// Path to create, with "." elements:
- const dotPath = try fs.path.join(testing.allocator, &.{
+ const dotPath = try Dir.path.join(testing.allocator, &.{
"first", ".", "second", ".", "third",
});
defer testing.allocator.free(dotPath);
// Path to expect to find:
- const expectedPath = try fs.path.join(testing.allocator, &.{
+ const expectedPath = try Dir.path.join(testing.allocator, &.{
"first", "second", "third",
});
defer testing.allocator.free(expectedPath);
- try tmp.dir.makePath(dotPath);
+ try tmp.dir.createDirPath(io, dotPath);
- try expectDir(tmp.dir, expectedPath);
+ try expectDir(io, tmp.dir, expectedPath);
}
-fn testFilenameLimits(iterable_dir: Dir, maxed_filename: []const u8) !void {
- // setup, create a dir and a nested file both with maxed filenames, and walk the dir
+fn testFilenameLimits(io: Io, iterable_dir: Dir, maxed_filename: []const u8, maxed_dirname: []const u8) !void {
+ // create a file, a dir, and a nested file all with maxed filenames
{
- var maxed_dir = try iterable_dir.makeOpenPath(maxed_filename, .{});
- defer maxed_dir.close();
+ try iterable_dir.writeFile(io, .{ .sub_path = maxed_filename, .data = "" });
- try maxed_dir.writeFile(.{ .sub_path = maxed_filename, .data = "" });
+ var maxed_dir = try iterable_dir.createDirPathOpen(io, maxed_dirname, .{});
+ defer maxed_dir.close(io);
+ try maxed_dir.writeFile(io, .{ .sub_path = maxed_filename, .data = "" });
+ }
+ // Low level API with minimum buffer length
+ {
+ var reader_buf: [Dir.Reader.min_buffer_len]u8 align(@alignOf(usize)) = undefined;
+ var reader: Dir.Reader = .init(iterable_dir, &reader_buf);
+
+ var file_count: usize = 0;
+ var dir_count: usize = 0;
+ while (try reader.next(io)) |entry| {
+ switch (entry.kind) {
+ .file => {
+ try expectEqualStrings(maxed_filename, entry.name);
+ file_count += 1;
+ },
+ .directory => {
+ try expectEqualStrings(maxed_dirname, entry.name);
+ dir_count += 1;
+ },
+ else => return error.TestFailed,
+ }
+ }
+ try expectEqual(@as(usize, 1), file_count);
+ try expectEqual(@as(usize, 1), dir_count);
+ }
+ // High level walk API
+ {
var walker = try iterable_dir.walk(testing.allocator);
defer walker.deinit();
- var count: usize = 0;
- while (try walker.next()) |entry| {
- try testing.expectEqualStrings(maxed_filename, entry.basename);
- count += 1;
+ var file_count: usize = 0;
+ var dir_count: usize = 0;
+ while (try walker.next(io)) |entry| {
+ switch (entry.kind) {
+ .file => {
+ try expectEqualStrings(maxed_filename, entry.basename);
+ file_count += 1;
+ },
+ .directory => {
+ try expectEqualStrings(maxed_dirname, entry.basename);
+ dir_count += 1;
+ },
+ else => return error.TestFailed,
+ }
}
- try testing.expectEqual(@as(usize, 2), count);
+ try expectEqual(@as(usize, 2), file_count);
+ try expectEqual(@as(usize, 1), dir_count);
}
// ensure that we can delete the tree
- try iterable_dir.deleteTree(maxed_filename);
+ try iterable_dir.deleteTree(io, maxed_filename);
}
test "max file name component lengths" {
+ const io = testing.io;
+
var tmp = tmpDir(.{ .iterate = true });
defer tmp.cleanup();
if (native_os == .windows) {
// U+FFFF is the character with the largest code point that is encoded as a single
- // UTF-16 code unit, so Windows allows for NAME_MAX of them.
- const maxed_windows_filename = ("\u{FFFF}".*) ** windows.NAME_MAX;
- try testFilenameLimits(tmp.dir, &maxed_windows_filename);
+ // WTF-16 code unit, so Windows allows for NAME_MAX of them.
+ const maxed_windows_filename1 = ("\u{FFFF}".*) ** windows.NAME_MAX;
+ // This is also a code point that is encoded as one WTF-16 code unit, but
+ // three WTF-8 bytes, so it exercises the limits of both WTF-16 and WTF-8 encodings.
+ const maxed_windows_filename2 = ("€".*) ** windows.NAME_MAX;
+ try testFilenameLimits(io, tmp.dir, &maxed_windows_filename1, &maxed_windows_filename2);
} else if (native_os == .wasi) {
// On WASI, the maxed filename depends on the host OS, so in order for this test to
// work on any host, we need to use a length that will work for all platforms
// (i.e. the minimum max_name_bytes of all supported platforms).
- const maxed_wasi_filename = [_]u8{'1'} ** 255;
- try testFilenameLimits(tmp.dir, &maxed_wasi_filename);
+ const maxed_wasi_filename1: [255]u8 = @splat('1');
+ const maxed_wasi_filename2: [255]u8 = @splat('2');
+ try testFilenameLimits(io, tmp.dir, &maxed_wasi_filename1, &maxed_wasi_filename2);
} else {
- const maxed_ascii_filename = [_]u8{'1'} ** std.fs.max_name_bytes;
- try testFilenameLimits(tmp.dir, &maxed_ascii_filename);
+ const maxed_ascii_filename1: [Dir.max_name_bytes]u8 = @splat('1');
+ const maxed_ascii_filename2: [Dir.max_name_bytes]u8 = @splat('2');
+ try testFilenameLimits(io, tmp.dir, &maxed_ascii_filename1, &maxed_ascii_filename2);
}
}
@@ -1398,21 +1456,21 @@ test "writev, readv" {
var write_vecs: [2][]const u8 = .{ line1, line2 };
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
- var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
- defer src_file.close();
+ var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true });
+ defer src_file.close(io);
- var writer = src_file.writerStreaming(&.{});
+ var writer = src_file.writerStreaming(io, &.{});
try writer.interface.writeVecAll(&write_vecs);
try writer.interface.flush();
- try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.getEndPos());
+ try expectEqual(@as(u64, line1.len + line2.len), try src_file.length(io));
- var reader = writer.moveToReader(io);
+ var reader = writer.moveToReader();
try reader.seekTo(0);
try reader.interface.readVecAll(&read_vecs);
- try testing.expectEqualStrings(&buf1, "line2\n");
- try testing.expectEqualStrings(&buf2, "line1\n");
- try testing.expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
+ try expectEqualStrings(&buf1, "line2\n");
+ try expectEqualStrings(&buf2, "line1\n");
+ try expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
}
test "pwritev, preadv" {
@@ -1428,87 +1486,37 @@ test "pwritev, preadv" {
var buf2: [line2.len]u8 = undefined;
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
- var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
- defer src_file.close();
+ var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true });
+ defer src_file.close(io);
- var writer = src_file.writer(&.{});
+ var writer = src_file.writer(io, &.{});
try writer.seekTo(16);
try writer.interface.writeVecAll(&lines);
try writer.interface.flush();
- try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.getEndPos());
+ try expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.length(io));
- var reader = writer.moveToReader(io);
+ var reader = writer.moveToReader();
try reader.seekTo(16);
try reader.interface.readVecAll(&read_vecs);
- try testing.expectEqualStrings(&buf1, "line2\n");
- try testing.expectEqualStrings(&buf2, "line1\n");
- try testing.expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
-}
-
-test "setEndPos" {
- // https://github.com/ziglang/zig/issues/20747 (open fd does not have write permission)
- if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
- if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23806
-
- const io = testing.io;
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- const file_name = "afile.txt";
- try tmp.dir.writeFile(.{ .sub_path = file_name, .data = "ninebytes" });
- const f = try tmp.dir.openFile(file_name, .{ .mode = .read_write });
- defer f.close();
-
- const initial_size = try f.getEndPos();
- var buffer: [32]u8 = undefined;
- var reader = f.reader(io, &.{});
-
- {
- try f.setEndPos(initial_size);
- try testing.expectEqual(initial_size, try f.getEndPos());
- try reader.seekTo(0);
- try testing.expectEqual(initial_size, try reader.interface.readSliceShort(&buffer));
- try testing.expectEqualStrings("ninebytes", buffer[0..@intCast(initial_size)]);
- }
-
- {
- const larger = initial_size + 4;
- try f.setEndPos(larger);
- try testing.expectEqual(larger, try f.getEndPos());
- try reader.seekTo(0);
- try testing.expectEqual(larger, try reader.interface.readSliceShort(&buffer));
- try testing.expectEqualStrings("ninebytes\x00\x00\x00\x00", buffer[0..@intCast(larger)]);
- }
-
- {
- const smaller = initial_size - 5;
- try f.setEndPos(smaller);
- try testing.expectEqual(smaller, try f.getEndPos());
- try reader.seekTo(0);
- try testing.expectEqual(smaller, try reader.interface.readSliceShort(&buffer));
- try testing.expectEqualStrings("nine", buffer[0..@intCast(smaller)]);
- }
-
- try f.setEndPos(0);
- try testing.expectEqual(0, try f.getEndPos());
- try reader.seekTo(0);
- try testing.expectEqual(0, try reader.interface.readSliceShort(&buffer));
+ try expectEqualStrings(&buf1, "line2\n");
+ try expectEqualStrings(&buf2, "line1\n");
+ try expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
}
test "access file" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const dir_path = try ctx.transformPath("os_test_tmp");
- const file_path = try ctx.transformPath("os_test_tmp" ++ fs.path.sep_str ++ "file.txt");
+ const file_path = try ctx.transformPath("os_test_tmp" ++ Dir.path.sep_str ++ "file.txt");
- try ctx.dir.makePath(dir_path);
- try testing.expectError(error.FileNotFound, ctx.dir.access(file_path, .{}));
+ try ctx.dir.createDirPath(io, dir_path);
+ try expectError(error.FileNotFound, ctx.dir.access(io, file_path, .{}));
- try ctx.dir.writeFile(.{ .sub_path = file_path, .data = "" });
- try ctx.dir.access(file_path, .{});
- try ctx.dir.deleteTree(dir_path);
+ try ctx.dir.writeFile(io, .{ .sub_path = file_path, .data = "" });
+ try ctx.dir.access(io, file_path, .{});
+ try ctx.dir.deleteTree(io, dir_path);
}
}.impl);
}
@@ -1519,24 +1527,24 @@ test "sendfile" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.makePath("os_test_tmp");
+ try tmp.dir.createDirPath(io, "os_test_tmp");
- var dir = try tmp.dir.openDir("os_test_tmp", .{});
- defer dir.close();
+ var dir = try tmp.dir.openDir(io, "os_test_tmp", .{});
+ defer dir.close(io);
const line1 = "line1\n";
const line2 = "second line\n";
var vecs = [_][]const u8{ line1, line2 };
- var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
- defer src_file.close();
+ var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true });
+ defer src_file.close(io);
{
- var fw = src_file.writer(&.{});
+ var fw = src_file.writer(io, &.{});
try fw.interface.writeVecAll(&vecs);
}
- var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
- defer dest_file.close();
+ var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true });
+ defer dest_file.close(io);
const header1 = "header1\n";
const header2 = "second header\n";
@@ -1548,16 +1556,16 @@ test "sendfile" {
var written_buf: [100]u8 = undefined;
var file_reader = src_file.reader(io, &.{});
var fallback_buffer: [50]u8 = undefined;
- var file_writer = dest_file.writer(&fallback_buffer);
+ var file_writer = dest_file.writer(io, &fallback_buffer);
try file_writer.interface.writeVecAll(&headers);
try file_reader.seekTo(1);
- try testing.expectEqual(10, try file_writer.interface.sendFileAll(&file_reader, .limited(10)));
+ try expectEqual(10, try file_writer.interface.sendFileAll(&file_reader, .limited(10)));
try file_writer.interface.writeVecAll(&trailers);
try file_writer.interface.flush();
- var fr = file_writer.moveToReader(io);
+ var fr = file_writer.moveToReader();
try fr.seekTo(0);
const amt = try fr.interface.readSliceShort(&written_buf);
- try testing.expectEqualStrings("header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n", written_buf[0..amt]);
+ try expectEqualStrings("header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n", written_buf[0..amt]);
}
test "sendfile with buffered data" {
@@ -1566,18 +1574,18 @@ test "sendfile with buffered data" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
- try tmp.dir.makePath("os_test_tmp");
+ try tmp.dir.createDirPath(io, "os_test_tmp");
- var dir = try tmp.dir.openDir("os_test_tmp", .{});
- defer dir.close();
+ var dir = try tmp.dir.openDir(io, "os_test_tmp", .{});
+ defer dir.close(io);
- var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
- defer src_file.close();
+ var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true });
+ defer src_file.close(io);
- try src_file.writeAll("AAAABBBB");
+ try src_file.writeStreamingAll(io, "AAAABBBB");
- var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
- defer dest_file.close();
+ var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true });
+ defer dest_file.close(io);
var src_buffer: [32]u8 = undefined;
var file_reader = src_file.reader(io, &src_buffer);
@@ -1586,52 +1594,54 @@ test "sendfile with buffered data" {
try file_reader.interface.fill(8);
var fallback_buffer: [32]u8 = undefined;
- var file_writer = dest_file.writer(&fallback_buffer);
+ var file_writer = dest_file.writer(io, &fallback_buffer);
- try std.testing.expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4)));
+ try expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4)));
var written_buf: [8]u8 = undefined;
- var fr = file_writer.moveToReader(io);
+ var fr = file_writer.moveToReader();
try fr.seekTo(0);
const amt = try fr.interface.readSliceShort(&written_buf);
- try std.testing.expectEqual(4, amt);
- try std.testing.expectEqualSlices(u8, "AAAA", written_buf[0..amt]);
+ try expectEqual(4, amt);
+ try expectEqualSlices(u8, "AAAA", written_buf[0..amt]);
}
test "copyFile" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
const src_file = try ctx.transformPath("tmp_test_copy_file.txt");
const dest_file = try ctx.transformPath("tmp_test_copy_file2.txt");
const dest_file2 = try ctx.transformPath("tmp_test_copy_file3.txt");
- try ctx.dir.writeFile(.{ .sub_path = src_file, .data = data });
- defer ctx.dir.deleteFile(src_file) catch {};
+ try ctx.dir.writeFile(io, .{ .sub_path = src_file, .data = data });
+ defer ctx.dir.deleteFile(io, src_file) catch {};
- try ctx.dir.copyFile(src_file, ctx.dir, dest_file, .{});
- defer ctx.dir.deleteFile(dest_file) catch {};
+ try ctx.dir.copyFile(src_file, ctx.dir, dest_file, io, .{});
+ defer ctx.dir.deleteFile(io, dest_file) catch {};
- try ctx.dir.copyFile(src_file, ctx.dir, dest_file2, .{ .override_mode = File.default_mode });
- defer ctx.dir.deleteFile(dest_file2) catch {};
+ try ctx.dir.copyFile(src_file, ctx.dir, dest_file2, io, .{});
+ defer ctx.dir.deleteFile(io, dest_file2) catch {};
- try expectFileContents(ctx.dir, dest_file, data);
- try expectFileContents(ctx.dir, dest_file2, data);
+ try expectFileContents(io, ctx.dir, dest_file, data);
+ try expectFileContents(io, ctx.dir, dest_file2, data);
}
}.impl);
}
-fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void {
- const contents = try dir.readFileAlloc(file_path, testing.allocator, .limited(1000));
+fn expectFileContents(io: Io, dir: Dir, file_path: []const u8, data: []const u8) !void {
+ const contents = try dir.readFileAlloc(io, file_path, testing.allocator, .limited(1000));
defer testing.allocator.free(contents);
- try testing.expectEqualSlices(u8, data, contents);
+ try expectEqualSlices(u8, data, contents);
}
test "AtomicFile" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const allocator = ctx.arena.allocator();
const test_out_file = try ctx.transformPath("tmp_atomic_file_test_dest.txt");
const test_content =
@@ -1641,15 +1651,15 @@ test "AtomicFile" {
{
var buffer: [100]u8 = undefined;
- var af = try ctx.dir.atomicFile(test_out_file, .{ .write_buffer = &buffer });
+ var af = try ctx.dir.atomicFile(io, test_out_file, .{ .write_buffer = &buffer });
defer af.deinit();
try af.file_writer.interface.writeAll(test_content);
try af.finish();
}
- const content = try ctx.dir.readFileAlloc(test_out_file, allocator, .limited(9999));
- try testing.expectEqualStrings(test_content, content);
+ const content = try ctx.dir.readFileAlloc(io, test_out_file, allocator, .limited(9999));
+ try expectEqualStrings(test_content, content);
- try ctx.dir.deleteFile(test_out_file);
+ try ctx.dir.deleteFile(io, test_out_file);
}
}.impl);
}
@@ -1659,13 +1669,14 @@ test "open file with exclusive nonblocking lock twice" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
- const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
- defer file1.close();
+ const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true });
+ defer file1.close(io);
- const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
- try testing.expectError(error.WouldBlock, file2);
+ const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true });
+ try expectError(error.WouldBlock, file2);
}
}.impl);
}
@@ -1675,13 +1686,14 @@ test "open file with shared and exclusive nonblocking lock" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
- const file1 = try ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true });
- defer file1.close();
+ const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true });
+ defer file1.close(io);
- const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
- try testing.expectError(error.WouldBlock, file2);
+ const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true });
+ try expectError(error.WouldBlock, file2);
}
}.impl);
}
@@ -1691,13 +1703,14 @@ test "open file with exclusive and shared nonblocking lock" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
- const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
- defer file1.close();
+ const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true });
+ defer file1.close(io);
- const file2 = ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true });
- try testing.expectError(error.WouldBlock, file2);
+ const file2 = ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true });
+ try expectError(error.WouldBlock, file2);
}
}.impl);
}
@@ -1707,39 +1720,35 @@ test "open file with exclusive lock twice, make sure second lock waits" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_lock_test.txt");
- const file = try ctx.dir.createFile(filename, .{ .lock = .exclusive });
- errdefer file.close();
+ const file = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive });
+ errdefer file.close(io);
const S = struct {
- fn checkFn(dir: *fs.Dir, path: []const u8, started: *std.Thread.ResetEvent, locked: *std.Thread.ResetEvent) !void {
+ fn checkFn(inner_ctx: *TestContext, path: []const u8, started: *std.Thread.ResetEvent, locked: *std.Thread.ResetEvent) !void {
started.set();
- const file1 = try dir.createFile(path, .{ .lock = .exclusive });
+ const file1 = try inner_ctx.dir.createFile(inner_ctx.io, path, .{ .lock = .exclusive });
locked.set();
- file1.close();
+ file1.close(inner_ctx.io);
}
};
var started: std.Thread.ResetEvent = .unset;
var locked: std.Thread.ResetEvent = .unset;
- const t = try std.Thread.spawn(.{}, S.checkFn, .{
- &ctx.dir,
- filename,
- &started,
- &locked,
- });
+ const t = try std.Thread.spawn(.{}, S.checkFn, .{ ctx, filename, &started, &locked });
defer t.join();
// Wait for the spawned thread to start trying to acquire the exclusive file lock.
// Then wait a bit to make sure that can't acquire it since we currently hold the file lock.
started.wait();
- try testing.expectError(error.Timeout, locked.timedWait(10 * std.time.ns_per_ms));
+ try expectError(error.Timeout, locked.timedWait(10 * std.time.ns_per_ms));
// Release the file lock which should unlock the thread to lock it and set the locked event.
- file.close();
+ file.close(io);
locked.wait();
}
}.impl);
@@ -1748,11 +1757,13 @@ test "open file with exclusive lock twice, make sure second lock waits" {
test "open file with exclusive nonblocking lock twice (absolute paths)" {
if (native_os == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
var random_bytes: [12]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
- var random_b64: [fs.base64_encoder.calcSize(random_bytes.len)]u8 = undefined;
- _ = fs.base64_encoder.encode(&random_b64, &random_bytes);
+ var random_b64: [std.fs.base64_encoder.calcSize(random_bytes.len)]u8 = undefined;
+ _ = std.fs.base64_encoder.encode(&random_b64, &random_bytes);
const sub_path = random_b64 ++ "-zig-test-absolute-paths.txt";
@@ -1761,47 +1772,50 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" {
const cwd = try std.process.getCwdAlloc(gpa);
defer gpa.free(cwd);
- const filename = try fs.path.resolve(gpa, &.{ cwd, sub_path });
+ const filename = try Dir.path.resolve(gpa, &.{ cwd, sub_path });
defer gpa.free(filename);
- defer fs.deleteFileAbsolute(filename) catch {}; // createFileAbsolute can leave files on failures
- const file1 = try fs.createFileAbsolute(filename, .{
+ defer Dir.deleteFileAbsolute(io, filename) catch {}; // createFileAbsolute can leave files on failures
+ const file1 = try Dir.createFileAbsolute(io, filename, .{
.lock = .exclusive,
.lock_nonblocking = true,
});
- const file2 = fs.createFileAbsolute(filename, .{
+ const file2 = Dir.createFileAbsolute(io, filename, .{
.lock = .exclusive,
.lock_nonblocking = true,
});
- file1.close();
- try testing.expectError(error.WouldBlock, file2);
+ file1.close(io);
+ try expectError(error.WouldBlock, file2);
}
test "read from locked file" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("read_lock_file_test.txt");
{
- const f = try ctx.dir.createFile(filename, .{ .read = true });
- defer f.close();
+ const f = try ctx.dir.createFile(io, filename, .{ .read = true });
+ defer f.close(io);
var buffer: [1]u8 = undefined;
- _ = try f.read(&buffer);
+ _ = try f.readPositional(io, &.{&buffer}, 0);
}
{
- const f = try ctx.dir.createFile(filename, .{
+ const f = try ctx.dir.createFile(io, filename, .{
.read = true,
.lock = .exclusive,
});
- defer f.close();
- const f2 = try ctx.dir.openFile(filename, .{});
- defer f2.close();
+ defer f.close(io);
+ const f2 = try ctx.dir.openFile(io, filename, .{});
+ defer f2.close(io);
+ // On POSIX locks may be ignored, however on Windows they cause
+ // LockViolation.
var buffer: [1]u8 = undefined;
if (builtin.os.tag == .windows) {
- try std.testing.expectError(error.LockViolation, f2.read(&buffer));
+ try expectError(error.LockViolation, f2.readPositional(io, &.{&buffer}, 0));
} else {
- try std.testing.expectEqual(0, f2.read(&buffer));
+ try expectEqual(0, f2.readPositional(io, &.{&buffer}, 0));
}
}
}
@@ -1809,6 +1823,8 @@ test "read from locked file" {
}
test "walker" {
+ const io = testing.io;
+
var tmp = tmpDir(.{ .iterate = true });
defer tmp.cleanup();
@@ -1819,9 +1835,9 @@ test "walker" {
.{ "dir2", 1 },
.{ "dir3", 1 },
.{ "dir4", 1 },
- .{ "dir3" ++ fs.path.sep_str ++ "sub1", 2 },
- .{ "dir3" ++ fs.path.sep_str ++ "sub2", 2 },
- .{ "dir3" ++ fs.path.sep_str ++ "sub2" ++ fs.path.sep_str ++ "subsub1", 3 },
+ .{ "dir3" ++ Dir.path.sep_str ++ "sub1", 2 },
+ .{ "dir3" ++ Dir.path.sep_str ++ "sub2", 2 },
+ .{ "dir3" ++ Dir.path.sep_str ++ "sub2" ++ Dir.path.sep_str ++ "subsub1", 3 },
});
const expected_basenames = std.StaticStringMap(void).initComptime(.{
@@ -1835,35 +1851,37 @@ test "walker" {
});
for (expected_paths.keys()) |key| {
- try tmp.dir.makePath(key);
+ try tmp.dir.createDirPath(io, key);
}
var walker = try tmp.dir.walk(testing.allocator);
defer walker.deinit();
var num_walked: usize = 0;
- while (try walker.next()) |entry| {
- testing.expect(expected_basenames.has(entry.basename)) catch |err| {
+ while (try walker.next(io)) |entry| {
+ expect(expected_basenames.has(entry.basename)) catch |err| {
std.debug.print("found unexpected basename: {f}\n", .{std.ascii.hexEscape(entry.basename, .lower)});
return err;
};
- testing.expect(expected_paths.has(entry.path)) catch |err| {
+ expect(expected_paths.has(entry.path)) catch |err| {
std.debug.print("found unexpected path: {f}\n", .{std.ascii.hexEscape(entry.path, .lower)});
return err;
};
- testing.expectEqual(expected_paths.get(entry.path).?, entry.depth()) catch |err| {
+ expectEqual(expected_paths.get(entry.path).?, entry.depth()) catch |err| {
std.debug.print("path reported unexpected depth: {f}\n", .{std.ascii.hexEscape(entry.path, .lower)});
return err;
};
// make sure that the entry.dir is the containing dir
- var entry_dir = try entry.dir.openDir(entry.basename, .{});
- defer entry_dir.close();
+ var entry_dir = try entry.dir.openDir(io, entry.basename, .{});
+ defer entry_dir.close(io);
num_walked += 1;
}
- try testing.expectEqual(expected_paths.kvs.len, num_walked);
+ try expectEqual(expected_paths.kvs.len, num_walked);
}
test "selective walker, skip entries that start with ." {
+ const io = testing.io;
+
var tmp = tmpDir(.{ .iterate = true });
defer tmp.cleanup();
@@ -1878,11 +1896,11 @@ test "selective walker, skip entries that start with ." {
const expected_paths = std.StaticStringMap(usize).initComptime(.{
.{ "dir1", 1 },
- .{ "dir1" ++ fs.path.sep_str ++ "foo", 2 },
+ .{ "dir1" ++ Dir.path.sep_str ++ "foo", 2 },
.{ "a", 1 },
- .{ "a" ++ fs.path.sep_str ++ "b", 2 },
- .{ "a" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c", 3 },
- .{ "a" ++ fs.path.sep_str ++ "baz", 2 },
+ .{ "a" ++ Dir.path.sep_str ++ "b", 2 },
+ .{ "a" ++ Dir.path.sep_str ++ "b" ++ Dir.path.sep_str ++ "c", 3 },
+ .{ "a" ++ Dir.path.sep_str ++ "baz", 2 },
});
const expected_basenames = std.StaticStringMap(void).initComptime(.{
@@ -1895,41 +1913,43 @@ test "selective walker, skip entries that start with ." {
});
for (paths_to_create) |path| {
- try tmp.dir.makePath(path);
+ try tmp.dir.createDirPath(io, path);
}
var walker = try tmp.dir.walkSelectively(testing.allocator);
defer walker.deinit();
var num_walked: usize = 0;
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
if (entry.basename[0] == '.') continue;
if (entry.kind == .directory) {
- try walker.enter(entry);
+ try walker.enter(io, entry);
}
- testing.expect(expected_basenames.has(entry.basename)) catch |err| {
+ expect(expected_basenames.has(entry.basename)) catch |err| {
std.debug.print("found unexpected basename: {f}\n", .{std.ascii.hexEscape(entry.basename, .lower)});
return err;
};
- testing.expect(expected_paths.has(entry.path)) catch |err| {
+ expect(expected_paths.has(entry.path)) catch |err| {
std.debug.print("found unexpected path: {f}\n", .{std.ascii.hexEscape(entry.path, .lower)});
return err;
};
- testing.expectEqual(expected_paths.get(entry.path).?, entry.depth()) catch |err| {
+ expectEqual(expected_paths.get(entry.path).?, entry.depth()) catch |err| {
std.debug.print("path reported unexpected depth: {f}\n", .{std.ascii.hexEscape(entry.path, .lower)});
return err;
};
// make sure that the entry.dir is the containing dir
- var entry_dir = try entry.dir.openDir(entry.basename, .{});
- defer entry_dir.close();
+ var entry_dir = try entry.dir.openDir(io, entry.basename, .{});
+ defer entry_dir.close(io);
num_walked += 1;
}
- try testing.expectEqual(expected_paths.kvs.len, num_walked);
+ try expectEqual(expected_paths.kvs.len, num_walked);
}
test "walker without fully iterating" {
+ const io = testing.io;
+
var tmp = tmpDir(.{ .iterate = true });
defer tmp.cleanup();
@@ -1939,18 +1959,18 @@ test "walker without fully iterating" {
// Create 2 directories inside the tmp directory, but then only iterate once before breaking.
// This ensures that walker doesn't try to close the initial directory when not fully iterating.
- try tmp.dir.makePath("a");
- try tmp.dir.makePath("b");
+ try tmp.dir.createDirPath(io, "a");
+ try tmp.dir.createDirPath(io, "b");
var num_walked: usize = 0;
- while (try walker.next()) |_| {
+ while (try walker.next(io)) |_| {
num_walked += 1;
break;
}
- try testing.expectEqual(@as(usize, 1), num_walked);
+ try expectEqual(@as(usize, 1), num_walked);
}
-test "'.' and '..' in fs.Dir functions" {
+test "'.' and '..' in Dir functions" {
if (native_os == .windows and builtin.cpu.arch == .aarch64) {
// https://github.com/ziglang/zig/issues/17134
return error.SkipZigTest;
@@ -1965,27 +1985,27 @@ test "'.' and '..' in fs.Dir functions" {
const rename_path = try ctx.transformPath("./subdir/../rename");
const update_path = try ctx.transformPath("./subdir/../update");
- try ctx.dir.makeDir(subdir_path);
- try ctx.dir.access(subdir_path, .{});
- var created_subdir = try ctx.dir.openDir(subdir_path, .{});
- created_subdir.close();
+ try ctx.dir.createDir(io, subdir_path, .default_dir);
+ try ctx.dir.access(io, subdir_path, .{});
+ var created_subdir = try ctx.dir.openDir(io, subdir_path, .{});
+ created_subdir.close(io);
- const created_file = try ctx.dir.createFile(file_path, .{});
- created_file.close();
- try ctx.dir.access(file_path, .{});
+ const created_file = try ctx.dir.createFile(io, file_path, .{});
+ created_file.close(io);
+ try ctx.dir.access(io, file_path, .{});
- try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{});
- try ctx.dir.rename(copy_path, rename_path);
- const renamed_file = try ctx.dir.openFile(rename_path, .{});
- renamed_file.close();
- try ctx.dir.deleteFile(rename_path);
+ try ctx.dir.copyFile(file_path, ctx.dir, copy_path, io, .{});
+ try ctx.dir.rename(copy_path, ctx.dir, rename_path, io);
+ const renamed_file = try ctx.dir.openFile(io, rename_path, .{});
+ renamed_file.close(io);
+ try ctx.dir.deleteFile(io, rename_path);
- try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" });
- var dir = ctx.dir.adaptToNewApi();
+ try ctx.dir.writeFile(io, .{ .sub_path = update_path, .data = "something" });
+ var dir = ctx.dir;
const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{});
- try testing.expectEqual(Io.Dir.PrevStatus.stale, prev_status);
+ try expectEqual(Dir.PrevStatus.stale, prev_status);
- try ctx.dir.deleteDir(subdir_path);
+ try ctx.dir.deleteDir(io, subdir_path);
}
}.impl);
}
@@ -1994,6 +2014,8 @@ test "'.' and '..' in absolute functions" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -2001,83 +2023,71 @@ test "'.' and '..' in absolute functions" {
defer arena.deinit();
const allocator = arena.allocator();
- const base_path = try tmp.dir.realpathAlloc(allocator, ".");
+ const base_path = try tmp.dir.realPathFileAlloc(io, ".", allocator);
- const subdir_path = try fs.path.join(allocator, &.{ base_path, "./subdir" });
- try fs.makeDirAbsolute(subdir_path);
- try fs.accessAbsolute(subdir_path, .{});
- var created_subdir = try fs.openDirAbsolute(subdir_path, .{});
- created_subdir.close();
+ const subdir_path = try Dir.path.join(allocator, &.{ base_path, "./subdir" });
+ try Dir.createDirAbsolute(io, subdir_path, .default_dir);
+ try Dir.accessAbsolute(io, subdir_path, .{});
+ var created_subdir = try Dir.openDirAbsolute(io, subdir_path, .{});
+ created_subdir.close(io);
- const created_file_path = try fs.path.join(allocator, &.{ subdir_path, "../file" });
- const created_file = try fs.createFileAbsolute(created_file_path, .{});
- created_file.close();
- try fs.accessAbsolute(created_file_path, .{});
+ const created_file_path = try Dir.path.join(allocator, &.{ subdir_path, "../file" });
+ const created_file = try Dir.createFileAbsolute(io, created_file_path, .{});
+ created_file.close(io);
+ try Dir.accessAbsolute(io, created_file_path, .{});
- const copied_file_path = try fs.path.join(allocator, &.{ subdir_path, "../copy" });
- try fs.copyFileAbsolute(created_file_path, copied_file_path, .{});
- const renamed_file_path = try fs.path.join(allocator, &.{ subdir_path, "../rename" });
- try fs.renameAbsolute(copied_file_path, renamed_file_path);
- const renamed_file = try fs.openFileAbsolute(renamed_file_path, .{});
- renamed_file.close();
- try fs.deleteFileAbsolute(renamed_file_path);
+ const copied_file_path = try Dir.path.join(allocator, &.{ subdir_path, "../copy" });
+ try Dir.copyFileAbsolute(created_file_path, copied_file_path, io, .{});
+ const renamed_file_path = try Dir.path.join(allocator, &.{ subdir_path, "../rename" });
+ try Dir.renameAbsolute(copied_file_path, renamed_file_path, io);
+ const renamed_file = try Dir.openFileAbsolute(io, renamed_file_path, .{});
+ renamed_file.close(io);
+ try Dir.deleteFileAbsolute(io, renamed_file_path);
- try fs.deleteDirAbsolute(subdir_path);
+ try Dir.deleteDirAbsolute(io, subdir_path);
}
test "chmod" {
- if (native_os == .windows or native_os == .wasi)
- return error.SkipZigTest;
+ if (native_os == .windows or native_os == .wasi) return;
+
+ const io = testing.io;
var tmp = tmpDir(.{});
defer tmp.cleanup();
- const file = try tmp.dir.createFile("test_file", .{ .mode = 0o600 });
- defer file.close();
- try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat()).mode & 0o7777);
+ const file = try tmp.dir.createFile(io, "test_file", .{ .permissions = .fromMode(0o600) });
+ defer file.close(io);
+ try expectEqual(0o600, (try file.stat(io)).permissions.toMode() & 0o7777);
- try file.chmod(0o644);
- try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat()).mode & 0o7777);
+ try file.setPermissions(io, .fromMode(0o644));
+ try expectEqual(0o644, (try file.stat(io)).permissions.toMode() & 0o7777);
- try tmp.dir.makeDir("test_dir");
- var dir = try tmp.dir.openDir("test_dir", .{ .iterate = true });
- defer dir.close();
+ try tmp.dir.createDir(io, "test_dir", .default_dir);
+ var dir = try tmp.dir.openDir(io, "test_dir", .{ .iterate = true });
+ defer dir.close(io);
- try dir.chmod(0o700);
- try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat()).mode & 0o7777);
+ try dir.setPermissions(io, .fromMode(0o700));
+ try expectEqual(0o700, (try dir.stat(io)).permissions.toMode() & 0o7777);
}
-test "chown" {
+test "change ownership" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- const file = try tmp.dir.createFile("test_file", .{});
- defer file.close();
- try file.chown(null, null);
+ const file = try tmp.dir.createFile(io, "test_file", .{});
+ defer file.close(io);
+ try file.setOwner(io, null, null);
- try tmp.dir.makeDir("test_dir");
+ try tmp.dir.createDir(io, "test_dir", .default_dir);
- var dir = try tmp.dir.openDir("test_dir", .{ .iterate = true });
- defer dir.close();
- try dir.chown(null, null);
-}
-
-test "delete a setAsCwd directory on Windows" {
- if (native_os != .windows) return error.SkipZigTest;
-
- var tmp = tmpDir(.{});
- // Set tmp dir as current working directory.
- try tmp.dir.setAsCwd();
- tmp.dir.close();
- try testing.expectError(error.FileBusy, tmp.parent_dir.deleteTree(&tmp.sub_path));
- // Now set the parent dir as the current working dir for clean up.
- try tmp.parent_dir.setAsCwd();
- try tmp.parent_dir.deleteTree(&tmp.sub_path);
- // Close the parent "tmp" so we don't leak the HANDLE.
- tmp.parent_dir.close();
+ var dir = try tmp.dir.openDir(io, "test_dir", .{ .iterate = true });
+ defer dir.close(io);
+ try dir.setOwner(io, null, null);
}
test "invalid UTF-8/WTF-8 paths" {
@@ -2093,71 +2103,65 @@ test "invalid UTF-8/WTF-8 paths" {
// This is both invalid UTF-8 and WTF-8, since \xFF is an invalid start byte
const invalid_path = try ctx.transformPath("\xFF");
- try testing.expectError(expected_err, ctx.dir.openFile(invalid_path, .{}));
+ try expectError(expected_err, ctx.dir.openFile(io, invalid_path, .{}));
- try testing.expectError(expected_err, ctx.dir.createFile(invalid_path, .{}));
+ try expectError(expected_err, ctx.dir.createFile(io, invalid_path, .{}));
- try testing.expectError(expected_err, ctx.dir.makeDir(invalid_path));
+ try expectError(expected_err, ctx.dir.createDir(io, invalid_path, .default_dir));
- try testing.expectError(expected_err, ctx.dir.makePath(invalid_path));
- try testing.expectError(expected_err, ctx.dir.makeOpenPath(invalid_path, .{}));
+ try expectError(expected_err, ctx.dir.createDirPath(io, invalid_path));
+ try expectError(expected_err, ctx.dir.createDirPathOpen(io, invalid_path, .{}));
- try testing.expectError(expected_err, ctx.dir.openDir(invalid_path, .{}));
+ try expectError(expected_err, ctx.dir.openDir(io, invalid_path, .{}));
- try testing.expectError(expected_err, ctx.dir.deleteFile(invalid_path));
+ try expectError(expected_err, ctx.dir.deleteFile(io, invalid_path));
- try testing.expectError(expected_err, ctx.dir.deleteDir(invalid_path));
+ try expectError(expected_err, ctx.dir.deleteDir(io, invalid_path));
- try testing.expectError(expected_err, ctx.dir.rename(invalid_path, invalid_path));
+ try expectError(expected_err, ctx.dir.rename(invalid_path, ctx.dir, invalid_path, io));
- try testing.expectError(expected_err, ctx.dir.symLink(invalid_path, invalid_path, .{}));
- if (native_os == .wasi) {
- try testing.expectError(expected_err, ctx.dir.symLinkWasi(invalid_path, invalid_path, .{}));
- }
+ try expectError(expected_err, ctx.dir.symLink(io, invalid_path, invalid_path, .{}));
- try testing.expectError(expected_err, ctx.dir.readLink(invalid_path, &[_]u8{}));
- if (native_os == .wasi) {
- try testing.expectError(expected_err, ctx.dir.readLinkWasi(invalid_path, &[_]u8{}));
- }
+ try expectError(expected_err, ctx.dir.readLink(io, invalid_path, &[_]u8{}));
- try testing.expectError(expected_err, ctx.dir.readFile(invalid_path, &[_]u8{}));
- try testing.expectError(expected_err, ctx.dir.readFileAlloc(invalid_path, testing.allocator, .limited(0)));
+ try expectError(expected_err, ctx.dir.readFile(io, invalid_path, &[_]u8{}));
+ try expectError(expected_err, ctx.dir.readFileAlloc(io, invalid_path, testing.allocator, .limited(0)));
- try testing.expectError(expected_err, ctx.dir.deleteTree(invalid_path));
- try testing.expectError(expected_err, ctx.dir.deleteTreeMinStackSize(invalid_path));
+ try expectError(expected_err, ctx.dir.deleteTree(io, invalid_path));
+ try expectError(expected_err, ctx.dir.deleteTreeMinStackSize(io, invalid_path));
- try testing.expectError(expected_err, ctx.dir.writeFile(.{ .sub_path = invalid_path, .data = "" }));
+ try expectError(expected_err, ctx.dir.writeFile(io, .{ .sub_path = invalid_path, .data = "" }));
- try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{}));
+ try expectError(expected_err, ctx.dir.access(io, invalid_path, .{}));
- var dir = ctx.dir.adaptToNewApi();
- try testing.expectError(expected_err, dir.updateFile(io, invalid_path, dir, invalid_path, .{}));
- try testing.expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, .{}));
+ var dir = ctx.dir;
+ try expectError(expected_err, dir.updateFile(io, invalid_path, dir, invalid_path, .{}));
+ try expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, io, .{}));
- try testing.expectError(expected_err, ctx.dir.statFile(invalid_path));
+ try expectError(expected_err, ctx.dir.statFile(io, invalid_path, .{}));
if (native_os != .wasi) {
- try testing.expectError(expected_err, ctx.dir.realpath(invalid_path, &[_]u8{}));
- try testing.expectError(expected_err, ctx.dir.realpathAlloc(testing.allocator, invalid_path));
+ try expectError(expected_err, ctx.dir.realPathFile(io, invalid_path, &[_]u8{}));
+ try expectError(expected_err, ctx.dir.realPathFileAlloc(io, invalid_path, testing.allocator));
}
- try testing.expectError(expected_err, fs.rename(ctx.dir, invalid_path, ctx.dir, invalid_path));
+ try expectError(expected_err, Dir.rename(ctx.dir, invalid_path, ctx.dir, invalid_path, io));
if (native_os != .wasi and ctx.path_type != .relative) {
- try testing.expectError(expected_err, fs.copyFileAbsolute(invalid_path, invalid_path, .{}));
- try testing.expectError(expected_err, fs.makeDirAbsolute(invalid_path));
- try testing.expectError(expected_err, fs.deleteDirAbsolute(invalid_path));
- try testing.expectError(expected_err, fs.renameAbsolute(invalid_path, invalid_path));
- try testing.expectError(expected_err, fs.openDirAbsolute(invalid_path, .{}));
- try testing.expectError(expected_err, fs.openFileAbsolute(invalid_path, .{}));
- try testing.expectError(expected_err, fs.accessAbsolute(invalid_path, .{}));
- try testing.expectError(expected_err, fs.createFileAbsolute(invalid_path, .{}));
- try testing.expectError(expected_err, fs.deleteFileAbsolute(invalid_path));
- try testing.expectError(expected_err, fs.deleteTreeAbsolute(invalid_path));
- var readlink_buf: [fs.max_path_bytes]u8 = undefined;
- try testing.expectError(expected_err, fs.readLinkAbsolute(invalid_path, &readlink_buf));
- try testing.expectError(expected_err, fs.symLinkAbsolute(invalid_path, invalid_path, .{}));
- try testing.expectError(expected_err, fs.realpathAlloc(testing.allocator, invalid_path));
+ var buf: [Dir.max_path_bytes]u8 = undefined;
+ try expectError(expected_err, Dir.copyFileAbsolute(invalid_path, invalid_path, io, .{}));
+ try expectError(expected_err, Dir.createDirAbsolute(io, invalid_path, .default_dir));
+ try expectError(expected_err, Dir.deleteDirAbsolute(io, invalid_path));
+ try expectError(expected_err, Dir.renameAbsolute(invalid_path, invalid_path, io));
+ try expectError(expected_err, Dir.openDirAbsolute(io, invalid_path, .{}));
+ try expectError(expected_err, Dir.openFileAbsolute(io, invalid_path, .{}));
+ try expectError(expected_err, Dir.accessAbsolute(io, invalid_path, .{}));
+ try expectError(expected_err, Dir.createFileAbsolute(io, invalid_path, .{}));
+ try expectError(expected_err, Dir.deleteFileAbsolute(io, invalid_path));
+ try expectError(expected_err, Dir.readLinkAbsolute(io, invalid_path, &buf));
+ try expectError(expected_err, Dir.symLinkAbsolute(io, invalid_path, invalid_path, .{}));
+ try expectError(expected_err, Dir.realPathFileAbsolute(io, invalid_path, &buf));
+ try expectError(expected_err, Dir.realPathFileAbsoluteAlloc(io, invalid_path, testing.allocator));
}
}
}.impl);
@@ -2171,15 +2175,15 @@ test "read file non vectored" {
const contents = "hello, world!\n";
- const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true });
+ defer file.close(io);
{
- var file_writer: std.fs.File.Writer = .init(file, &.{});
+ var file_writer: File.Writer = .init(file, io, &.{});
try file_writer.interface.writeAll(contents);
try file_writer.interface.flush();
}
- var file_reader: std.Io.File.Reader = .initAdapted(file, io, &.{});
+ var file_reader: std.Io.File.Reader = .init(file, io, &.{});
var write_buffer: [100]u8 = undefined;
var w: std.Io.Writer = .fixed(&write_buffer);
@@ -2191,8 +2195,8 @@ test "read file non vectored" {
else => |e| return e,
};
}
- try testing.expectEqualStrings(contents, w.buffered());
- try testing.expectEqual(contents.len, i);
+ try expectEqualStrings(contents, w.buffered());
+ try expectEqual(contents.len, i);
}
test "seek keeping partial buffer" {
@@ -2203,18 +2207,18 @@ test "seek keeping partial buffer" {
const contents = "0123456789";
- const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true });
+ defer file.close(io);
{
- var file_writer: std.fs.File.Writer = .init(file, &.{});
+ var file_writer: File.Writer = .init(file, io, &.{});
try file_writer.interface.writeAll(contents);
try file_writer.interface.flush();
}
var read_buffer: [3]u8 = undefined;
- var file_reader: Io.File.Reader = .initAdapted(file, io, &read_buffer);
+ var file_reader: Io.File.Reader = .init(file, io, &read_buffer);
- try testing.expectEqual(0, file_reader.logicalPos());
+ try expectEqual(0, file_reader.logicalPos());
var buf: [4]u8 = undefined;
try file_reader.interface.readSliceAll(&buf);
@@ -2224,18 +2228,18 @@ test "seek keeping partial buffer" {
return;
}
- try testing.expectEqual(4, file_reader.logicalPos());
- try testing.expectEqual(7, file_reader.pos);
+ try expectEqual(4, file_reader.logicalPos());
+ try expectEqual(7, file_reader.pos);
try file_reader.seekTo(6);
- try testing.expectEqual(6, file_reader.logicalPos());
- try testing.expectEqual(7, file_reader.pos);
+ try expectEqual(6, file_reader.logicalPos());
+ try expectEqual(7, file_reader.pos);
- try testing.expectEqualStrings("0123", &buf);
+ try expectEqualStrings("0123", &buf);
const n = try file_reader.interface.readSliceShort(&buf);
- try testing.expectEqual(4, n);
+ try expectEqual(4, n);
- try testing.expectEqualStrings("6789", &buf);
+ try expectEqualStrings("6789", &buf);
}
test "seekBy" {
@@ -2244,16 +2248,16 @@ test "seekBy" {
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
- try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" });
- const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only });
- defer f.close();
+ try tmp_dir.dir.writeFile(io, .{ .sub_path = "blah.txt", .data = "let's test seekBy" });
+ const f = try tmp_dir.dir.openFile(io, "blah.txt", .{ .mode = .read_only });
+ defer f.close(io);
var reader = f.readerStreaming(io, &.{});
try reader.seekBy(2);
var buffer: [20]u8 = undefined;
const n = try reader.interface.readSliceShort(&buffer);
- try testing.expectEqual(15, n);
- try testing.expectEqualStrings("t's test seekBy", buffer[0..15]);
+ try expectEqual(15, n);
+ try expectEqualStrings("t's test seekBy", buffer[0..15]);
}
test "seekTo flushes buffered data" {
@@ -2264,11 +2268,11 @@ test "seekTo flushes buffered data" {
const contents = "data";
- const file = try tmp.dir.createFile("seek.bin", .{ .read = true });
- defer file.close();
+ const file = try tmp.dir.createFile(io, "seek.bin", .{ .read = true });
+ defer file.close(io);
{
var buf: [16]u8 = undefined;
- var file_writer = std.fs.File.writer(file, &buf);
+ var file_writer = file.writer(io, &buf);
try file_writer.interface.writeAll(contents);
try file_writer.seekTo(8);
@@ -2276,11 +2280,11 @@ test "seekTo flushes buffered data" {
}
var read_buffer: [16]u8 = undefined;
- var file_reader: std.Io.File.Reader = .initAdapted(file, io, &read_buffer);
+ var file_reader: std.Io.File.Reader = .init(file, io, &read_buffer);
var buf: [4]u8 = undefined;
try file_reader.interface.readSliceAll(&buf);
- try std.testing.expectEqualStrings(contents, &buf);
+ try expectEqualStrings(contents, &buf);
}
test "File.Writer sendfile with buffered contents" {
@@ -2290,11 +2294,11 @@ test "File.Writer sendfile with buffered contents" {
defer tmp_dir.cleanup();
{
- try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" });
- const in = try tmp_dir.dir.openFile("a", .{});
- defer in.close();
- const out = try tmp_dir.dir.createFile("b", .{});
- defer out.close();
+ try tmp_dir.dir.writeFile(io, .{ .sub_path = "a", .data = "bcd" });
+ const in = try tmp_dir.dir.openFile(io, "a", .{});
+ defer in.close(io);
+ const out = try tmp_dir.dir.createFile(io, "b", .{});
+ defer out.close(io);
var in_buf: [2]u8 = undefined;
var in_r = in.reader(io, &in_buf);
@@ -2302,16 +2306,332 @@ test "File.Writer sendfile with buffered contents" {
try in_r.interface.fill(2);
var out_buf: [1]u8 = undefined;
- var out_w = out.writerStreaming(&out_buf);
+ var out_w = out.writerStreaming(io, &out_buf);
try out_w.interface.writeByte('a');
- try testing.expectEqual(3, try out_w.interface.sendFileAll(&in_r, .unlimited));
+ try expectEqual(3, try out_w.interface.sendFileAll(&in_r, .unlimited));
try out_w.interface.flush();
}
- var check = try tmp_dir.dir.openFile("b", .{});
- defer check.close();
+ var check = try tmp_dir.dir.openFile(io, "b", .{});
+ defer check.close(io);
var check_buf: [4]u8 = undefined;
var check_r = check.reader(io, &check_buf);
- try testing.expectEqualStrings("abcd", try check_r.interface.take(4));
- try testing.expectError(error.EndOfStream, check_r.interface.takeByte());
+ try expectEqualStrings("abcd", try check_r.interface.take(4));
+ try expectError(error.EndOfStream, check_r.interface.takeByte());
+}
+
+test "readlink on Windows" {
+ if (native_os != .windows) return error.SkipZigTest;
+
+ const io = testing.io;
+
+ try testReadLinkWindows(io, "C:\\ProgramData", "C:\\Users\\All Users");
+ try testReadLinkWindows(io, "C:\\Users\\Default", "C:\\Users\\Default User");
+ try testReadLinkWindows(io, "C:\\Users", "C:\\Documents and Settings");
+}
+
+fn testReadLinkWindows(io: Io, target_path: []const u8, symlink_path: []const u8) !void {
+ var buffer: [Dir.max_path_bytes]u8 = undefined;
+ const len = try Dir.readLinkAbsolute(io, symlink_path, &buffer);
+ const given = buffer[0..len];
+ try expect(mem.eql(u8, target_path, given));
+}
+
+test "readlinkat" {
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ // create file
+ try tmp.dir.writeFile(io, .{ .sub_path = "file.txt", .data = "nonsense" });
+
+ // create a symbolic link
+ tmp.dir.symLink(io, "file.txt", "link", .{}) catch |err| switch (err) {
+ error.AccessDenied => {
+ // Symlink requires admin privileges on windows, so this test can legitimately fail.
+ if (native_os == .windows) return error.SkipZigTest;
+ },
+ else => |e| return e,
+ };
+
+ // read the link
+ var buffer: [Dir.max_path_bytes]u8 = undefined;
+ const read_link = buffer[0..try tmp.dir.readLink(io, "link", &buffer)];
+ try expectEqualStrings("file.txt", read_link);
+}
+
+test "fchmodat smoke test" {
+ if (!Io.File.Permissions.has_executable_bit) return error.SkipZigTest;
+
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ try expectError(error.FileNotFound, tmp.dir.setFilePermissions(io, "regfile", .fromMode(0o666), .{}));
+ const file = try tmp.dir.createFile(io, "regfile", .{
+ .exclusive = true,
+ .permissions = .fromMode(0o644),
+ });
+ file.close(io);
+
+ if ((builtin.cpu.arch == .riscv32 or builtin.cpu.arch.isLoongArch()) and
+ builtin.os.tag == .linux and !builtin.link_libc)
+ {
+ return error.SkipZigTest; // No `fstatat()`.
+ }
+
+ try tmp.dir.symLink(io, "regfile", "symlink", .{});
+ const sym_mode = blk: {
+ const st = try tmp.dir.statFile(io, "symlink", .{ .follow_symlinks = false });
+ break :blk st.permissions.toMode() & 0b111_111_111;
+ };
+
+ try tmp.dir.setFilePermissions(io, "regfile", .fromMode(0o640), .{});
+ try expectMode(io, tmp.dir, "regfile", .fromMode(0o640));
+ try tmp.dir.setFilePermissions(io, "regfile", .fromMode(0o600), .{ .follow_symlinks = false });
+ try expectMode(io, tmp.dir, "regfile", .fromMode(0o600));
+
+ try tmp.dir.setFilePermissions(io, "symlink", .fromMode(0o640), .{});
+ try expectMode(io, tmp.dir, "regfile", .fromMode(0o640));
+ try expectMode(io, tmp.dir, "symlink", .fromMode(sym_mode));
+
+ var test_link = true;
+ tmp.dir.setFilePermissions(io, "symlink", .fromMode(0o600), .{ .follow_symlinks = false }) catch |err| switch (err) {
+ error.OperationUnsupported => test_link = false,
+ else => |e| return e,
+ };
+ if (test_link) try expectMode(io, tmp.dir, "symlink", .fromMode(0o600));
+ try expectMode(io, tmp.dir, "regfile", .fromMode(0o640));
+}
+
+fn expectMode(io: Io, dir: Dir, file: []const u8, permissions: File.Permissions) !void {
+ const mode = permissions.toMode();
+ const st = try dir.statFile(io, file, .{ .follow_symlinks = false });
+ const found_mode = st.permissions.toMode();
+ try expectEqual(mode, found_mode & 0b111_111_111);
+}
+
+test "isatty" {
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ var file = try tmp.dir.createFile(io, "foo", .{});
+ defer file.close(io);
+
+ try expectEqual(false, try file.isTty(io));
+}
+
+test "read positional empty buffer" {
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ var file = try tmp.dir.createFile(io, "pread_empty", .{ .read = true });
+ defer file.close(io);
+
+ var buffer: [0]u8 = undefined;
+ try expectEqual(0, try file.readPositional(io, &.{&buffer}, 0));
+}
+
+test "write streaming empty buffer" {
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ var file = try tmp.dir.createFile(io, "write_empty", .{});
+ defer file.close(io);
+
+ const buffer: [0]u8 = .{};
+ try file.writeStreamingAll(io, &buffer);
+}
+
+test "write positional empty buffer" {
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ var file = try tmp.dir.createFile(io, "pwrite_empty", .{});
+ defer file.close(io);
+
+ const buffer: [0]u8 = .{};
+ try expectEqual(0, try file.writePositional(io, &.{&buffer}, 0));
+}
+
+test "access smoke test" {
+ if (native_os == .wasi) return error.SkipZigTest;
+ if (native_os == .windows) return error.SkipZigTest;
+ if (native_os == .openbsd) return error.SkipZigTest;
+
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ {
+ // Create some file using `open`.
+ const file = try tmp.dir.createFile(io, "some_file", .{ .read = true, .exclusive = true });
+ file.close(io);
+ }
+
+ {
+ // Try to access() the file
+ if (native_os == .windows) {
+ try tmp.dir.access(io, "some_file", .{});
+ } else {
+ try tmp.dir.access(io, "some_file", .{ .read = true, .write = true });
+ }
+ }
+
+ {
+ // Try to access() a non-existent file - should fail with error.FileNotFound
+ try expectError(error.FileNotFound, tmp.dir.access(io, "some_other_file", .{}));
+ }
+
+ {
+ // Create some directory
+ try tmp.dir.createDir(io, "some_dir", .default_dir);
+ }
+
+ {
+ // Try to access() the directory
+ try tmp.dir.access(io, "some_dir", .{});
+ }
+}
+
+test "write streaming a long vector" {
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ var file = try tmp.dir.createFile(io, "pwritev", .{});
+ defer file.close(io);
+
+ var vecs: [2000][]const u8 = undefined;
+ for (&vecs) |*v| v.* = "a";
+
+ const n = try file.writePositional(io, &vecs, 0);
+ try expect(n <= vecs.len);
+}
+
+test "open smoke test" {
+ if (native_os == .wasi) return error.SkipZigTest;
+ if (native_os == .windows) return error.SkipZigTest;
+ if (native_os == .openbsd) return error.SkipZigTest;
+
+ // TODO verify file attributes using `fstat`
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const io = testing.io;
+
+ {
+ // Create some file using `open`.
+ const file = try tmp.dir.createFile(io, "some_file", .{ .exclusive = true });
+ file.close(io);
+ }
+
+ // Try this again with the same flags. This op should fail with error.PathAlreadyExists.
+ try expectError(
+ error.PathAlreadyExists,
+ tmp.dir.createFile(io, "some_file", .{ .exclusive = true }),
+ );
+
+ {
+ // Try opening without exclusive flag.
+ const file = try tmp.dir.createFile(io, "some_file", .{});
+ file.close(io);
+ }
+
+ try expectError(error.NotDir, tmp.dir.openDir(io, "some_file", .{}));
+ try tmp.dir.createDir(io, "some_dir", .default_dir);
+
+ {
+ const dir = try tmp.dir.openDir(io, "some_dir", .{});
+ dir.close(io);
+ }
+
+ // Try opening as file which should fail.
+ try expectError(error.IsDir, tmp.dir.openFile(io, "some_dir", .{ .allow_directory = false }));
+}
+
+test "hard link with different directories" {
+ if (native_os == .wasi or native_os == .windows) return error.SkipZigTest;
+
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ const target_name = "link-target";
+ const link_name = "newlink";
+
+ const subdir = try tmp.dir.createDirPathOpen(io, "subdir", .{});
+
+ defer tmp.dir.deleteFile(io, target_name) catch {};
+ try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" });
+
+ // Test 1: link from file in subdir back up to target in parent directory
+ tmp.dir.hardLink(target_name, subdir, link_name, io, .{}) catch |err| switch (err) {
+ error.OperationUnsupported => return error.SkipZigTest,
+ else => |e| return e,
+ };
+
+ const efd = try tmp.dir.openFile(io, target_name, .{});
+ defer efd.close(io);
+
+ const nfd = try subdir.openFile(io, link_name, .{});
+ defer nfd.close(io);
+
+ {
+ const e_stat = try efd.stat(io);
+ const n_stat = try nfd.stat(io);
+
+ try expectEqual(e_stat.inode, n_stat.inode);
+ try expectEqual(2, e_stat.nlink);
+ try expectEqual(2, n_stat.nlink);
+ }
+
+ // Test 2: remove link
+ try subdir.deleteFile(io, link_name);
+ const e_stat = try efd.stat(io);
+ try expectEqual(1, e_stat.nlink);
+}
+
+test "stat smoke test" {
+ if (native_os == .wasi and !builtin.link_libc) return error.SkipZigTest;
+
+ const io = testing.io;
+
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ // create dummy file
+ const contents = "nonsense";
+ try tmp.dir.writeFile(io, .{ .sub_path = "file.txt", .data = contents });
+
+ // fetch file's info on the opened fd directly
+ const file = try tmp.dir.openFile(io, "file.txt", .{});
+ const stat = try file.stat(io);
+ defer file.close(io);
+
+ // now repeat but using directory handle instead
+ const statat = try tmp.dir.statFile(io, "file.txt", .{ .follow_symlinks = false });
+
+ try expectEqual(stat.inode, statat.inode);
+ try expectEqual(stat.nlink, statat.nlink);
+ try expectEqual(stat.size, statat.size);
+ try expectEqual(stat.permissions, statat.permissions);
+ try expectEqual(stat.kind, statat.kind);
+ try expectEqual(stat.atime, statat.atime);
+ try expectEqual(stat.mtime, statat.mtime);
+ try expectEqual(stat.ctime, statat.ctime);
}
diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig
index a21d6e9ada..6744b87fac 100644
--- a/lib/std/hash/benchmark.zig
+++ b/lib/std/hash/benchmark.zig
@@ -1,7 +1,8 @@
// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
+const builtin = @import("builtin");
const std = @import("std");
-const builtin = @import("builtin");
+const Io = std.Io;
const time = std.time;
const Timer = time.Timer;
const hash = std.hash;
@@ -354,7 +355,7 @@ fn mode(comptime x: comptime_int) comptime_int {
pub fn main() !void {
var stdout_buffer: [0x100]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_writer.interface;
var buffer: [1024]u8 = undefined;
diff --git a/lib/std/heap/debug_allocator.zig b/lib/std/heap/debug_allocator.zig
index 3183becd82..24f1554544 100644
--- a/lib/std/heap/debug_allocator.zig
+++ b/lib/std/heap/debug_allocator.zig
@@ -84,7 +84,7 @@ const builtin = @import("builtin");
const StackTrace = std.builtin.StackTrace;
const std = @import("std");
-const log = std.log.scoped(.gpa);
+const log = std.log.scoped(.DebugAllocator);
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
@@ -425,7 +425,6 @@ pub fn DebugAllocator(comptime config: Config) type {
bucket: *BucketHeader,
size_class_index: usize,
used_bits_count: usize,
- tty_config: std.Io.tty.Config,
) usize {
const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
const slot_count = slot_counts[size_class_index];
@@ -445,7 +444,7 @@ pub fn DebugAllocator(comptime config: Config) type {
addr,
std.debug.FormatStackTrace{
.stack_trace = stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
leaks += 1;
@@ -460,14 +459,12 @@ pub fn DebugAllocator(comptime config: Config) type {
pub fn detectLeaks(self: *Self) usize {
var leaks: usize = 0;
- const tty_config: std.Io.tty.Config = .detect(.stderr());
-
for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
var optional_bucket = init_optional_bucket;
const slot_count = slot_counts[size_class_index];
const used_bits_count = usedBitsCount(slot_count);
while (optional_bucket) |bucket| {
- leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count, tty_config);
+ leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count);
optional_bucket = bucket.prev;
}
}
@@ -480,7 +477,7 @@ pub fn DebugAllocator(comptime config: Config) type {
@intFromPtr(large_alloc.bytes.ptr),
std.debug.FormatStackTrace{
.stack_trace = stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
leaks += 1;
@@ -534,21 +531,21 @@ pub fn DebugAllocator(comptime config: Config) type {
}
fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
+ @branchHint(.cold);
var addr_buf: [stack_n]usize = undefined;
const second_free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
- const tty_config: std.Io.tty.Config = .detect(.stderr());
log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{
std.debug.FormatStackTrace{
.stack_trace = alloc_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = second_free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
}
@@ -588,19 +585,19 @@ pub fn DebugAllocator(comptime config: Config) type {
}
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
+ @branchHint(.cold);
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
- const tty_config: std.Io.tty.Config = .detect(.stderr());
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
std.debug.FormatStackTrace{
.stack_trace = entry.value_ptr.getStackTrace(.alloc),
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
}
@@ -701,19 +698,19 @@ pub fn DebugAllocator(comptime config: Config) type {
}
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
+ @branchHint(.cold);
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
- const tty_config: std.Io.tty.Config = .detect(.stderr());
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
std.debug.FormatStackTrace{
.stack_trace = entry.value_ptr.getStackTrace(.alloc),
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
}
@@ -935,32 +932,32 @@ pub fn DebugAllocator(comptime config: Config) type {
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
if (old_memory.len != requested_size) {
- const tty_config: std.Io.tty.Config = .detect(.stderr());
+ @branchHint(.cold);
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,
old_memory.len,
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
}
if (alignment != slot_alignment) {
- const tty_config: std.Io.tty.Config = .detect(.stderr());
+ @branchHint(.cold);
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
slot_alignment.toByteUnits(),
alignment.toByteUnits(),
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
}
@@ -1044,32 +1041,32 @@ pub fn DebugAllocator(comptime config: Config) type {
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
if (memory.len != requested_size) {
- const tty_config: std.Io.tty.Config = .detect(.stderr());
+ @branchHint(.cold);
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,
memory.len,
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
}
if (alignment != slot_alignment) {
- const tty_config: std.Io.tty.Config = .detect(.stderr());
+ @branchHint(.cold);
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
slot_alignment.toByteUnits(),
alignment.toByteUnits(),
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
- .tty_config = tty_config,
+ .terminal_mode = std.log.terminalMode(),
},
});
}
diff --git a/lib/std/http.zig b/lib/std/http.zig
index a768372ecc..291e22539b 100644
--- a/lib/std/http.zig
+++ b/lib/std/http.zig
@@ -2,7 +2,7 @@ const builtin = @import("builtin");
const std = @import("std.zig");
const assert = std.debug.assert;
const Writer = std.Io.Writer;
-const File = std.fs.File;
+const File = std.Io.File;
pub const Client = @import("http/Client.zig");
pub const Server = @import("http/Server.zig");
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 10ab23f476..7f8c9827ef 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -1473,6 +1473,8 @@ pub const ConnectUnixError = Allocator.Error || std.posix.SocketError || error{N
///
/// This function is threadsafe.
pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*Connection {
+ const io = client.io;
+
if (client.connection_pool.findConnection(.{
.host = path,
.port = 0,
@@ -1485,7 +1487,7 @@ pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*Connecti
conn.* = .{ .data = undefined };
const stream = try Io.net.connectUnixSocket(path);
- errdefer stream.close();
+ errdefer stream.close(io);
conn.data = .{
.stream = stream,
diff --git a/lib/std/json/dynamic.zig b/lib/std/json/dynamic.zig
index c3cccd1a91..e38ea9cb17 100644
--- a/lib/std/json/dynamic.zig
+++ b/lib/std/json/dynamic.zig
@@ -47,10 +47,9 @@ pub const Value = union(enum) {
}
pub fn dump(v: Value) void {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
-
- json.Stringify.value(v, .{}, w) catch return;
+ const stderr = std.debug.lockStderr(&.{}, null);
+ defer std.debug.unlockStderr();
+ json.Stringify.value(v, .{}, &stderr.file_writer.interface) catch return;
}
pub fn jsonStringify(value: @This(), jws: anytype) !void {
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 9568f9ba52..df11fe205b 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -15,7 +15,7 @@
//!
//! For an example implementation of the `logFn` function, see `defaultLog`,
//! which is the default implementation. It outputs to stderr, using color if
-//! the detected `std.Io.tty.Config` supports it. Its output looks like this:
+//! supported. Its output looks like this:
//! ```
//! error: this is an error
//! error(scope): this is an error with a non-default scope
@@ -80,6 +80,14 @@ pub fn logEnabled(comptime level: Level, comptime scope: @EnumLiteral()) bool {
return @intFromEnum(level) <= @intFromEnum(std.options.log_level);
}
+pub const terminalMode = std.options.logTerminalMode;
+
+pub fn defaultTerminalMode() std.Io.Terminal.Mode {
+ const stderr = std.debug.lockStderr(&.{}).terminal();
+ std.debug.unlockStderr();
+ return stderr.mode;
+}
+
/// The default implementation for the log function. Custom log functions may
/// forward log messages to this function.
///
@@ -92,25 +100,33 @@ pub fn defaultLog(
args: anytype,
) void {
var buffer: [64]u8 = undefined;
- const stderr, const ttyconf = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
- ttyconf.setColor(stderr, switch (level) {
+ const stderr = std.debug.lockStderr(&buffer).terminal();
+ defer std.debug.unlockStderr();
+ return defaultLogFileTerminal(level, scope, format, args, stderr) catch {};
+}
+
+pub fn defaultLogFileTerminal(
+ comptime level: Level,
+ comptime scope: @EnumLiteral(),
+ comptime format: []const u8,
+ args: anytype,
+ t: std.Io.Terminal,
+) std.Io.Writer.Error!void {
+ t.setColor(switch (level) {
.err => .red,
.warn => .yellow,
.info => .green,
.debug => .magenta,
}) catch {};
- ttyconf.setColor(stderr, .bold) catch {};
- stderr.writeAll(level.asText()) catch return;
- ttyconf.setColor(stderr, .reset) catch {};
- ttyconf.setColor(stderr, .dim) catch {};
- ttyconf.setColor(stderr, .bold) catch {};
- if (scope != .default) {
- stderr.print("({s})", .{@tagName(scope)}) catch return;
- }
- stderr.writeAll(": ") catch return;
- ttyconf.setColor(stderr, .reset) catch {};
- stderr.print(format ++ "\n", args) catch return;
+ t.setColor(.bold) catch {};
+ try t.writer.writeAll(level.asText());
+ t.setColor(.reset) catch {};
+ t.setColor(.dim) catch {};
+ t.setColor(.bold) catch {};
+ if (scope != .default) try t.writer.print("({t})", .{scope});
+ try t.writer.writeAll(": ");
+ t.setColor(.reset) catch {};
+ try t.writer.print(format ++ "\n", args);
}
/// Returns a scoped logging namespace that logs all messages using the scope
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 3e846cc9bd..667d743f3d 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -21,7 +21,6 @@ const mem = std.mem;
const elf = std.elf;
const fs = std.fs;
const dl = @import("dynamic_library.zig");
-const max_path_bytes = std.fs.max_path_bytes;
const posix = std.posix;
const native_os = builtin.os.tag;
@@ -31,7 +30,6 @@ pub const uefi = @import("os/uefi.zig");
pub const wasi = @import("os/wasi.zig");
pub const emscripten = @import("os/emscripten.zig");
pub const windows = @import("os/windows.zig");
-pub const freebsd = @import("os/freebsd.zig");
test {
_ = linux;
@@ -56,135 +54,6 @@ pub var argv: [][*:0]u8 = if (builtin.link_libc) undefined else switch (native_o
else => undefined,
};
-pub fn isGetFdPathSupportedOnTarget(os: std.Target.Os) bool {
- return switch (os.tag) {
- .windows,
- .driverkit,
- .ios,
- .maccatalyst,
- .macos,
- .tvos,
- .visionos,
- .watchos,
- .linux,
- .illumos,
- .freebsd,
- .serenity,
- => true,
-
- .dragonfly => os.version_range.semver.max.order(.{ .major = 6, .minor = 0, .patch = 0 }) != .lt,
- .netbsd => os.version_range.semver.max.order(.{ .major = 10, .minor = 0, .patch = 0 }) != .lt,
- else => false,
- };
-}
-
-/// Return canonical path of handle `fd`.
-///
-/// This function is very host-specific and is not universally supported by all hosts.
-/// For example, while it generally works on Linux, macOS, FreeBSD or Windows, it is
-/// unsupported on WASI.
-///
-/// * On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// * On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-///
-/// Calling this function is usually a bug.
-pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.RealPathError![]u8 {
- if (!comptime isGetFdPathSupportedOnTarget(builtin.os)) {
- @compileError("querying for canonical path of a handle is unsupported on this host");
- }
- switch (native_os) {
- .windows => {
- var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined;
- const wide_slice = try windows.GetFinalPathNameByHandle(fd, .{}, wide_buf[0..]);
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
- return out_buffer[0..end_index];
- },
- .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => {
- // On macOS, we can use F.GETPATH fcntl command to query the OS for
- // the path to the file descriptor.
- @memset(out_buffer[0..max_path_bytes], 0);
- switch (posix.errno(posix.system.fcntl(fd, posix.F.GETPATH, out_buffer))) {
- .SUCCESS => {},
- .BADF => return error.FileNotFound,
- .NOSPC => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- // TODO man pages for fcntl on macOS don't really tell you what
- // errno values to expect when command is F.GETPATH...
- else => |err| return posix.unexpectedErrno(err),
- }
- const len = mem.findScalar(u8, out_buffer[0..], 0) orelse max_path_bytes;
- return out_buffer[0..len];
- },
- .linux, .serenity => {
- var procfs_buf: ["/proc/self/fd/-2147483648\x00".len]u8 = undefined;
- const proc_path = std.fmt.bufPrintSentinel(procfs_buf[0..], "/proc/self/fd/{d}", .{fd}, 0) catch unreachable;
-
- const target = posix.readlinkZ(proc_path, out_buffer) catch |err| {
- switch (err) {
- error.NotLink => unreachable,
- error.BadPathName => unreachable,
- error.UnsupportedReparsePointType => unreachable, // Windows-only
- error.NetworkNotFound => unreachable, // Windows-only
- else => |e| return e,
- }
- };
- return target;
- },
- .illumos => {
- var procfs_buf: ["/proc/self/path/-2147483648\x00".len]u8 = undefined;
- const proc_path = std.fmt.bufPrintSentinel(procfs_buf[0..], "/proc/self/path/{d}", .{fd}, 0) catch unreachable;
-
- const target = posix.readlinkZ(proc_path, out_buffer) catch |err| switch (err) {
- error.UnsupportedReparsePointType => unreachable,
- error.NotLink => unreachable,
- else => |e| return e,
- };
- return target;
- },
- .freebsd => {
- var kfile: std.c.kinfo_file = undefined;
- kfile.structsize = std.c.KINFO_FILE_SIZE;
- switch (posix.errno(std.c.fcntl(fd, std.c.F.KINFO, @intFromPtr(&kfile)))) {
- .SUCCESS => {},
- .BADF => return error.FileNotFound,
- else => |err| return posix.unexpectedErrno(err),
- }
- const len = mem.findScalar(u8, &kfile.path, 0) orelse max_path_bytes;
- if (len == 0) return error.NameTooLong;
- const result = out_buffer[0..len];
- @memcpy(result, kfile.path[0..len]);
- return result;
- },
- .dragonfly => {
- @memset(out_buffer[0..max_path_bytes], 0);
- switch (posix.errno(std.c.fcntl(fd, posix.F.GETPATH, out_buffer))) {
- .SUCCESS => {},
- .BADF => return error.FileNotFound,
- .RANGE => return error.NameTooLong,
- else => |err| return posix.unexpectedErrno(err),
- }
- const len = mem.findScalar(u8, out_buffer[0..], 0) orelse max_path_bytes;
- return out_buffer[0..len];
- },
- .netbsd => {
- @memset(out_buffer[0..max_path_bytes], 0);
- switch (posix.errno(std.c.fcntl(fd, posix.F.GETPATH, out_buffer))) {
- .SUCCESS => {},
- .ACCES => return error.AccessDenied,
- .BADF => return error.FileNotFound,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .RANGE => return error.NameTooLong,
- else => |err| return posix.unexpectedErrno(err),
- }
- const len = mem.findScalar(u8, out_buffer[0..], 0) orelse max_path_bytes;
- return out_buffer[0..len];
- },
- else => unreachable, // made unreachable by isGetFdPathSupportedOnTarget above
- }
-}
-
pub const FstatError = error{
SystemResources,
AccessDenied,
@@ -203,3 +72,8 @@ pub fn fstat_wasi(fd: posix.fd_t) FstatError!wasi.filestat_t {
else => |err| return posix.unexpectedErrno(err),
}
}
+
+pub fn defaultWasiCwd() std.os.wasi.fd_t {
+ // Expect the first preopen to be current working directory.
+ return 3;
+}
diff --git a/lib/std/os/freebsd.zig b/lib/std/os/freebsd.zig
deleted file mode 100644
index 2d082bf0cd..0000000000
--- a/lib/std/os/freebsd.zig
+++ /dev/null
@@ -1,50 +0,0 @@
-const std = @import("../std.zig");
-const fd_t = std.c.fd_t;
-const off_t = std.c.off_t;
-const unexpectedErrno = std.posix.unexpectedErrno;
-const errno = std.posix.errno;
-const builtin = @import("builtin");
-
-pub const CopyFileRangeError = std.posix.UnexpectedError || error{
- /// If infd is not open for reading or outfd is not open for writing, or
- /// opened for writing with O_APPEND, or if infd and outfd refer to the
- /// same file.
- BadFileFlags,
- /// If the copy exceeds the process's file size limit or the maximum
- /// file size for the file system outfd re- sides on.
- FileTooBig,
- /// A signal interrupted the system call before it could be completed.
- /// This may happen for files on some NFS mounts. When this happens,
- /// the values pointed to by inoffp and outoffp are reset to the
- /// initial values for the system call.
- Interrupted,
- /// One of:
- /// * infd and outfd refer to the same file and the byte ranges overlap.
- /// * The flags argument is not zero.
- /// * Either infd or outfd refers to a file object that is not a regular file.
- InvalidArguments,
- /// An I/O error occurred while reading/writing the files.
- InputOutput,
- /// Corrupted data was detected while reading from a file system.
- CorruptedData,
- /// Either infd or outfd refers to a directory.
- IsDir,
- /// File system that stores outfd is full.
- NoSpaceLeft,
-};
-
-pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) CopyFileRangeError!usize {
- const rc = std.c.copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
- switch (errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .BADF => return error.BadFileFlags,
- .FBIG => return error.FileTooBig,
- .INTR => return error.Interrupted,
- .INVAL => return error.InvalidArguments,
- .IO => return error.InputOutput,
- .INTEGRITY => return error.CorruptedData,
- .ISDIR => return error.IsDir,
- .NOSPC => return error.NoSpaceLeft,
- else => |err| return unexpectedErrno(err),
- }
-}
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index d0cd278224..96b4c8ee6f 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -1420,7 +1420,7 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize {
if (@hasField(SYS, "chmod")) {
return syscall2(.chmod, @intFromPtr(path), mode);
} else {
- return fchmodat(AT.FDCWD, path, mode, 0);
+ return fchmodat(AT.FDCWD, path, mode);
}
}
@@ -1432,7 +1432,7 @@ pub fn fchown(fd: i32, owner: uid_t, group: gid_t) usize {
}
}
-pub fn fchmodat(fd: i32, path: [*:0]const u8, mode: mode_t, _: u32) usize {
+pub fn fchmodat(fd: i32, path: [*:0]const u8, mode: mode_t) usize {
return syscall3(.fchmodat, @bitCast(@as(isize, fd)), @intFromPtr(path), mode);
}
@@ -1561,14 +1561,14 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) usize {
}
}
-pub fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: i32) usize {
+pub fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: u32) usize {
return syscall5(
.linkat,
@as(usize, @bitCast(@as(isize, oldfd))),
@intFromPtr(oldpath),
@as(usize, @bitCast(@as(isize, newfd))),
@intFromPtr(newpath),
- @as(usize, @bitCast(@as(isize, flags))),
+ flags,
);
}
@@ -6040,7 +6040,7 @@ pub const dirent64 = extern struct {
off: u64,
reclen: u16,
type: u8,
- name: u8, // field address is the address of first byte of name https://github.com/ziglang/zig/issues/173
+ name: [0]u8,
};
pub const dl_phdr_info = extern struct {
@@ -6891,10 +6891,6 @@ pub const utsname = extern struct {
};
pub const HOST_NAME_MAX = 64;
-/// Flags used to request specific members in `Statx` be filled out.
-/// The `Statx.mask` member will be updated with what information the kernel
-/// returned. Callers must check this field since support varies by kernel
-/// version and filesystem.
pub const STATX = packed struct(u32) {
/// Want `mode & S.IFMT`.
TYPE: bool = false,
@@ -6982,7 +6978,9 @@ pub const statx_timestamp = extern struct {
/// Renamed to `Statx` to not conflict with the `statx` function.
pub const Statx = extern struct {
- /// Mask of bits indicating filled fields.
+ /// Mask of bits indicating filled fields. Updated with what information
+ /// the kernel returned. Callers must check this field since support varies
+ /// by kernel version and filesystem.
mask: STATX,
/// Block size for filesystem I/O.
blksize: u32,
@@ -9872,165 +9870,3 @@ pub const cmsghdr = extern struct {
level: i32,
type: i32,
};
-
-/// The syscalls, but with Zig error sets, going through libc if linking libc,
-/// and with some footguns eliminated.
-pub const wrapped = struct {
- pub const lfs64_abi = builtin.link_libc and (builtin.abi.isGnu() or builtin.abi.isAndroid());
- const system = if (builtin.link_libc) std.c else std.os.linux;
-
- pub const SendfileError = std.posix.UnexpectedError || error{
- /// `out_fd` is an unconnected socket, or out_fd closed its read end.
- BrokenPipe,
- /// Descriptor is not valid or locked, or an mmap(2)-like operation is not available for in_fd.
- UnsupportedOperation,
- /// Nonblocking I/O has been selected but the write would block.
- WouldBlock,
- /// Unspecified error while reading from in_fd.
- InputOutput,
- /// Insufficient kernel memory to read from in_fd.
- SystemResources,
- /// `offset` is not `null` but the input file is not seekable.
- Unseekable,
- };
-
- pub fn sendfile(
- out_fd: fd_t,
- in_fd: fd_t,
- in_offset: ?*off_t,
- in_len: usize,
- ) SendfileError!usize {
- const adjusted_len = @min(in_len, 0x7ffff000); // Prevents EOVERFLOW.
- const sendfileSymbol = if (lfs64_abi) system.sendfile64 else system.sendfile;
- const rc = sendfileSymbol(out_fd, in_fd, in_offset, adjusted_len);
- switch (system.errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .BADF => return invalidApiUsage(), // Always a race condition.
- .FAULT => return invalidApiUsage(), // Segmentation fault.
- .OVERFLOW => return unexpectedErrno(.OVERFLOW), // We avoid passing too large of a `count`.
- .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
- .INVAL => return error.UnsupportedOperation,
- .AGAIN => return error.WouldBlock,
- .IO => return error.InputOutput,
- .PIPE => return error.BrokenPipe,
- .NOMEM => return error.SystemResources,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub const CopyFileRangeError = std.posix.UnexpectedError || error{
- /// One of:
- /// * One or more file descriptors are not valid.
- /// * fd_in is not open for reading; or fd_out is not open for writing.
- /// * The O_APPEND flag is set for the open file description referred
- /// to by the file descriptor fd_out.
- BadFileFlags,
- /// One of:
- /// * An attempt was made to write at a position past the maximum file
- /// offset the kernel supports.
- /// * An attempt was made to write a range that exceeds the allowed
- /// maximum file size. The maximum file size differs between
- /// filesystem implementations and can be different from the maximum
- /// allowed file offset.
- /// * An attempt was made to write beyond the process's file size
- /// resource limit. This may also result in the process receiving a
- /// SIGXFSZ signal.
- FileTooBig,
- /// One of:
- /// * either fd_in or fd_out is not a regular file
- /// * flags argument is not zero
- /// * fd_in and fd_out refer to the same file and the source and target ranges overlap.
- InvalidArguments,
- /// A low-level I/O error occurred while copying.
- InputOutput,
- /// Either fd_in or fd_out refers to a directory.
- IsDir,
- OutOfMemory,
- /// There is not enough space on the target filesystem to complete the copy.
- NoSpaceLeft,
- /// (since Linux 5.19) the filesystem does not support this operation.
- OperationNotSupported,
- /// The requested source or destination range is too large to represent
- /// in the specified data types.
- Overflow,
- /// fd_out refers to an immutable file.
- PermissionDenied,
- /// Either fd_in or fd_out refers to an active swap file.
- SwapFile,
- /// The files referred to by fd_in and fd_out are not on the same
- /// filesystem, and the source and target filesystems are not of the
- /// same type, or do not support cross-filesystem copy.
- NotSameFileSystem,
- };
-
- pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) CopyFileRangeError!usize {
- const use_c = std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 });
- const sys = if (use_c) std.c else std.os.linux;
- const rc = sys.copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
- switch (sys.errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .BADF => return error.BadFileFlags,
- .FBIG => return error.FileTooBig,
- .INVAL => return error.InvalidArguments,
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOMEM => return error.OutOfMemory,
- .NOSPC => return error.NoSpaceLeft,
- .OPNOTSUPP => return error.OperationNotSupported,
- .OVERFLOW => return error.Overflow,
- .PERM => return error.PermissionDenied,
- .TXTBSY => return error.SwapFile,
- .XDEV => return error.NotSameFileSystem,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- pub const StatxError = std.posix.UnexpectedError || error{
- /// Search permission is denied for one of the directories in `path`.
- AccessDenied,
- /// Too many symbolic links were encountered traversing `path`.
- SymLinkLoop,
- /// `path` is too long.
- NameTooLong,
- /// One of:
- /// - A component of `path` does not exist.
- /// - A component of `path` is not a directory.
- /// - `path` is a relative and `dirfd` is not a directory file descriptor.
- FileNotFound,
- /// Insufficient memory is available.
- SystemResources,
- };
-
- pub fn statx(dirfd: fd_t, path: [*:0]const u8, flags: u32, mask: STATX) StatxError!Statx {
- const use_c = std.c.versionCheck(if (builtin.abi.isAndroid())
- .{ .major = 30, .minor = 0, .patch = 0 }
- else
- .{ .major = 2, .minor = 28, .patch = 0 });
- const sys = if (use_c) std.c else std.os.linux;
-
- var stx = std.mem.zeroes(Statx);
- const rc = sys.statx(dirfd, path, flags, mask, &stx);
- return switch (sys.errno(rc)) {
- .SUCCESS => stx,
- .ACCES => error.AccessDenied,
- .BADF => invalidApiUsage(),
- .FAULT => invalidApiUsage(),
- .INVAL => invalidApiUsage(),
- .LOOP => error.SymLinkLoop,
- .NAMETOOLONG => error.NameTooLong,
- .NOENT => error.FileNotFound,
- .NOTDIR => error.FileNotFound,
- .NOMEM => error.SystemResources,
- else => |err| unexpectedErrno(err),
- };
- }
-
- const unexpectedErrno = std.posix.unexpectedErrno;
-
- fn invalidApiUsage() error{Unexpected} {
- if (builtin.mode == .Debug) @panic("invalid API usage");
- return error.Unexpected;
- }
-};
diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig
index c927dab376..b3d6994275 100644
--- a/lib/std/os/linux/IoUring.zig
+++ b/lib/std/os/linux/IoUring.zig
@@ -1,14 +1,17 @@
const IoUring = @This();
-const std = @import("std");
+
const builtin = @import("builtin");
+const is_linux = builtin.os.tag == .linux;
+
+const std = @import("../../std.zig");
+const Io = std.Io;
+const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
-const mem = std.mem;
-const net = std.Io.net;
const posix = std.posix;
const linux = std.os.linux;
const testing = std.testing;
-const is_linux = builtin.os.tag == .linux;
const page_size_min = std.heap.page_size_min;
+const createSocketTestHarness = @import("IoUring/test.zig").createSocketTestHarness;
fd: linux.fd_t = -1,
sq: SubmissionQueue,
@@ -22,7 +25,7 @@ features: u32,
/// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L8027-L8050.
/// Matches the interface of io_uring_queue_init() in liburing.
pub fn init(entries: u16, flags: u32) !IoUring {
- var params = mem.zeroInit(linux.io_uring_params, .{
+ var params = std.mem.zeroInit(linux.io_uring_params, .{
.flags = flags,
.sq_thread_idle = 1000,
});
@@ -1309,7 +1312,7 @@ pub fn unregister_buffers(self: *IoUring) !void {
/// io_uring subsystem of the running kernel. The io_uring_probe contains the
/// list of supported operations.
pub fn get_probe(self: *IoUring) !linux.io_uring_probe {
- var probe = mem.zeroInit(linux.io_uring_probe, .{});
+ var probe = std.mem.zeroInit(linux.io_uring_probe, .{});
const res = linux.io_uring_register(self.fd, .REGISTER_PROBE, &probe, probe.ops.len);
try handle_register_buf_ring_result(res);
return probe;
@@ -1636,7 +1639,7 @@ pub const BufferGroup = struct {
pub fn init(
ring: *IoUring,
- allocator: mem.Allocator,
+ allocator: Allocator,
group_id: u16,
buffer_size: u32,
buffers_count: u16,
@@ -1670,7 +1673,7 @@ pub const BufferGroup = struct {
};
}
- pub fn deinit(self: *BufferGroup, allocator: mem.Allocator) void {
+ pub fn deinit(self: *BufferGroup, allocator: Allocator) void {
free_buf_ring(self.ring.fd, self.br, self.buffers_count, self.group_id);
allocator.free(self.buffers);
allocator.free(self.heads);
@@ -1695,7 +1698,7 @@ pub const BufferGroup = struct {
}
// Get buffer by id.
- fn get_by_id(self: *BufferGroup, buffer_id: u16) []u8 {
+ pub fn get_by_id(self: *BufferGroup, buffer_id: u16) []u8 {
const pos = self.buffer_size * buffer_id;
return self.buffers[pos .. pos + self.buffer_size][self.heads[buffer_id]..];
}
@@ -1764,7 +1767,7 @@ fn register_buf_ring(
group_id: u16,
flags: linux.io_uring_buf_reg.Flags,
) !void {
- var reg = mem.zeroInit(linux.io_uring_buf_reg, .{
+ var reg = std.mem.zeroInit(linux.io_uring_buf_reg, .{
.ring_addr = addr,
.ring_entries = entries,
.bgid = group_id,
@@ -1781,7 +1784,7 @@ fn register_buf_ring(
}
fn unregister_buf_ring(fd: linux.fd_t, group_id: u16) !void {
- var reg = mem.zeroInit(linux.io_uring_buf_reg, .{
+ var reg = std.mem.zeroInit(linux.io_uring_buf_reg, .{
.bgid = group_id,
});
const res = linux.io_uring_register(
@@ -1848,2262 +1851,11 @@ pub fn buf_ring_advance(br: *linux.io_uring_buf_ring, count: u16) void {
@atomicStore(u16, &br.tail, tail, .release);
}
-test "structs/offsets/entries" {
- if (!is_linux) return error.SkipZigTest;
-
- try testing.expectEqual(@as(usize, 120), @sizeOf(linux.io_uring_params));
- try testing.expectEqual(@as(usize, 64), @sizeOf(linux.io_uring_sqe));
- try testing.expectEqual(@as(usize, 16), @sizeOf(linux.io_uring_cqe));
-
- try testing.expectEqual(0, linux.IORING_OFF_SQ_RING);
- try testing.expectEqual(0x8000000, linux.IORING_OFF_CQ_RING);
- try testing.expectEqual(0x10000000, linux.IORING_OFF_SQES);
-
- try testing.expectError(error.EntriesZero, IoUring.init(0, 0));
- try testing.expectError(error.EntriesNotPowerOfTwo, IoUring.init(3, 0));
-}
-
-test "nop" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer {
- ring.deinit();
- testing.expectEqual(@as(linux.fd_t, -1), ring.fd) catch @panic("test failed");
- }
-
- const sqe = try ring.nop(0xaaaaaaaa);
- try testing.expectEqual(linux.io_uring_sqe{
- .opcode = .NOP,
- .flags = 0,
- .ioprio = 0,
- .fd = 0,
- .off = 0,
- .addr = 0,
- .len = 0,
- .rw_flags = 0,
- .user_data = 0xaaaaaaaa,
- .buf_index = 0,
- .personality = 0,
- .splice_fd_in = 0,
- .addr3 = 0,
- .resv = 0,
- }, sqe.*);
-
- try testing.expectEqual(@as(u32, 0), ring.sq.sqe_head);
- try testing.expectEqual(@as(u32, 1), ring.sq.sqe_tail);
- try testing.expectEqual(@as(u32, 0), ring.sq.tail.*);
- try testing.expectEqual(@as(u32, 0), ring.cq.head.*);
- try testing.expectEqual(@as(u32, 1), ring.sq_ready());
- try testing.expectEqual(@as(u32, 0), ring.cq_ready());
-
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- try testing.expectEqual(@as(u32, 1), ring.sq.sqe_head);
- try testing.expectEqual(@as(u32, 1), ring.sq.sqe_tail);
- try testing.expectEqual(@as(u32, 1), ring.sq.tail.*);
- try testing.expectEqual(@as(u32, 0), ring.cq.head.*);
- try testing.expectEqual(@as(u32, 0), ring.sq_ready());
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xaaaaaaaa,
- .res = 0,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqual(@as(u32, 1), ring.cq.head.*);
- try testing.expectEqual(@as(u32, 0), ring.cq_ready());
-
- const sqe_barrier = try ring.nop(0xbbbbbbbb);
- sqe_barrier.flags |= linux.IOSQE_IO_DRAIN;
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xbbbbbbbb,
- .res = 0,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqual(@as(u32, 2), ring.sq.sqe_head);
- try testing.expectEqual(@as(u32, 2), ring.sq.sqe_tail);
- try testing.expectEqual(@as(u32, 2), ring.sq.tail.*);
- try testing.expectEqual(@as(u32, 2), ring.cq.head.*);
-}
-
-test "readv" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
- defer posix.close(fd);
-
- // Linux Kernel 5.4 supports IORING_REGISTER_FILES but not sparse fd sets (i.e. an fd of -1).
- // Linux Kernel 5.5 adds support for sparse fd sets.
- // Compare:
- // https://github.com/torvalds/linux/blob/v5.4/fs/io_uring.c#L3119-L3124 vs
- // https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L6687-L6691
- // We therefore avoid stressing sparse fd sets here:
- var registered_fds = [_]linux.fd_t{0} ** 1;
- const fd_index = 0;
- registered_fds[fd_index] = fd;
- try ring.register_files(registered_fds[0..]);
-
- var buffer = [_]u8{42} ** 128;
- var iovecs = [_]posix.iovec{posix.iovec{ .base = &buffer, .len = buffer.len }};
- const sqe = try ring.read(0xcccccccc, fd_index, .{ .iovecs = iovecs[0..] }, 0);
- try testing.expectEqual(linux.IORING_OP.READV, sqe.opcode);
- sqe.flags |= linux.IOSQE_FIXED_FILE;
-
- try testing.expectError(error.SubmissionQueueFull, ring.nop(0));
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xcccccccc,
- .res = buffer.len,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer.len), buffer[0..]);
-
- try ring.unregister_files();
-}
-
-test "writev/fsync/readv" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(4, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const path = "test_io_uring_writev_fsync_readv";
- const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
- defer file.close();
- const fd = file.handle;
-
- const buffer_write = [_]u8{42} ** 128;
- const iovecs_write = [_]posix.iovec_const{
- posix.iovec_const{ .base = &buffer_write, .len = buffer_write.len },
- };
- var buffer_read = [_]u8{0} ** 128;
- var iovecs_read = [_]posix.iovec{
- posix.iovec{ .base = &buffer_read, .len = buffer_read.len },
- };
-
- const sqe_writev = try ring.writev(0xdddddddd, fd, iovecs_write[0..], 17);
- try testing.expectEqual(linux.IORING_OP.WRITEV, sqe_writev.opcode);
- try testing.expectEqual(@as(u64, 17), sqe_writev.off);
- sqe_writev.flags |= linux.IOSQE_IO_LINK;
-
- const sqe_fsync = try ring.fsync(0xeeeeeeee, fd, 0);
- try testing.expectEqual(linux.IORING_OP.FSYNC, sqe_fsync.opcode);
- try testing.expectEqual(fd, sqe_fsync.fd);
- sqe_fsync.flags |= linux.IOSQE_IO_LINK;
-
- const sqe_readv = try ring.read(0xffffffff, fd, .{ .iovecs = iovecs_read[0..] }, 17);
- try testing.expectEqual(linux.IORING_OP.READV, sqe_readv.opcode);
- try testing.expectEqual(@as(u64, 17), sqe_readv.off);
-
- try testing.expectEqual(@as(u32, 3), ring.sq_ready());
- try testing.expectEqual(@as(u32, 3), try ring.submit_and_wait(3));
- try testing.expectEqual(@as(u32, 0), ring.sq_ready());
- try testing.expectEqual(@as(u32, 3), ring.cq_ready());
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xdddddddd,
- .res = buffer_write.len,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqual(@as(u32, 2), ring.cq_ready());
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xeeeeeeee,
- .res = 0,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqual(@as(u32, 1), ring.cq_ready());
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xffffffff,
- .res = buffer_read.len,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqual(@as(u32, 0), ring.cq_ready());
-
- try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
-}
-
-test "write/read" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
- const path = "test_io_uring_write_read";
- const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
- defer file.close();
- const fd = file.handle;
-
- const buffer_write = [_]u8{97} ** 20;
- var buffer_read = [_]u8{98} ** 20;
- const sqe_write = try ring.write(0x11111111, fd, buffer_write[0..], 10);
- try testing.expectEqual(linux.IORING_OP.WRITE, sqe_write.opcode);
- try testing.expectEqual(@as(u64, 10), sqe_write.off);
- sqe_write.flags |= linux.IOSQE_IO_LINK;
- const sqe_read = try ring.read(0x22222222, fd, .{ .buffer = buffer_read[0..] }, 10);
- try testing.expectEqual(linux.IORING_OP.READ, sqe_read.opcode);
- try testing.expectEqual(@as(u64, 10), sqe_read.off);
- try testing.expectEqual(@as(u32, 2), try ring.submit());
-
- const cqe_write = try ring.copy_cqe();
- const cqe_read = try ring.copy_cqe();
- // Prior to Linux Kernel 5.6 this is the only way to test for read/write support:
- // https://lwn.net/Articles/809820/
- if (cqe_write.err() == .INVAL) return error.SkipZigTest;
- if (cqe_read.err() == .INVAL) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x11111111,
- .res = buffer_write.len,
- .flags = 0,
- }, cqe_write);
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x22222222,
- .res = buffer_read.len,
- .flags = 0,
- }, cqe_read);
- try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
-}
-
-test "splice/read" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(4, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- const path_src = "test_io_uring_splice_src";
- const file_src = try tmp.dir.createFile(path_src, .{ .read = true, .truncate = true });
- defer file_src.close();
- const fd_src = file_src.handle;
-
- const path_dst = "test_io_uring_splice_dst";
- const file_dst = try tmp.dir.createFile(path_dst, .{ .read = true, .truncate = true });
- defer file_dst.close();
- const fd_dst = file_dst.handle;
-
- const buffer_write = [_]u8{97} ** 20;
- var buffer_read = [_]u8{98} ** 20;
- _ = try file_src.write(&buffer_write);
-
- const fds = try posix.pipe();
- const pipe_offset: u64 = std.math.maxInt(u64);
-
- const sqe_splice_to_pipe = try ring.splice(0x11111111, fd_src, 0, fds[1], pipe_offset, buffer_write.len);
- try testing.expectEqual(linux.IORING_OP.SPLICE, sqe_splice_to_pipe.opcode);
- try testing.expectEqual(@as(u64, 0), sqe_splice_to_pipe.addr);
- try testing.expectEqual(pipe_offset, sqe_splice_to_pipe.off);
- sqe_splice_to_pipe.flags |= linux.IOSQE_IO_LINK;
-
- const sqe_splice_from_pipe = try ring.splice(0x22222222, fds[0], pipe_offset, fd_dst, 10, buffer_write.len);
- try testing.expectEqual(linux.IORING_OP.SPLICE, sqe_splice_from_pipe.opcode);
- try testing.expectEqual(pipe_offset, sqe_splice_from_pipe.addr);
- try testing.expectEqual(@as(u64, 10), sqe_splice_from_pipe.off);
- sqe_splice_from_pipe.flags |= linux.IOSQE_IO_LINK;
-
- const sqe_read = try ring.read(0x33333333, fd_dst, .{ .buffer = buffer_read[0..] }, 10);
- try testing.expectEqual(linux.IORING_OP.READ, sqe_read.opcode);
- try testing.expectEqual(@as(u64, 10), sqe_read.off);
- try testing.expectEqual(@as(u32, 3), try ring.submit());
-
- const cqe_splice_to_pipe = try ring.copy_cqe();
- const cqe_splice_from_pipe = try ring.copy_cqe();
- const cqe_read = try ring.copy_cqe();
- // Prior to Linux Kernel 5.6 this is the only way to test for splice/read support:
- // https://lwn.net/Articles/809820/
- if (cqe_splice_to_pipe.err() == .INVAL) return error.SkipZigTest;
- if (cqe_splice_from_pipe.err() == .INVAL) return error.SkipZigTest;
- if (cqe_read.err() == .INVAL) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x11111111,
- .res = buffer_write.len,
- .flags = 0,
- }, cqe_splice_to_pipe);
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x22222222,
- .res = buffer_write.len,
- .flags = 0,
- }, cqe_splice_from_pipe);
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x33333333,
- .res = buffer_read.len,
- .flags = 0,
- }, cqe_read);
- try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
-}
-
-test "write_fixed/read_fixed" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const path = "test_io_uring_write_read_fixed";
- const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
- defer file.close();
- const fd = file.handle;
-
- var raw_buffers: [2][11]u8 = undefined;
- // First buffer will be written to the file.
- @memset(&raw_buffers[0], 'z');
- raw_buffers[0][0.."foobar".len].* = "foobar".*;
-
- var buffers = [2]posix.iovec{
- .{ .base = &raw_buffers[0], .len = raw_buffers[0].len },
- .{ .base = &raw_buffers[1], .len = raw_buffers[1].len },
- };
- ring.register_buffers(&buffers) catch |err| switch (err) {
- error.SystemResources => {
- // See https://github.com/ziglang/zig/issues/15362
- return error.SkipZigTest;
- },
- else => |e| return e,
- };
-
- const sqe_write = try ring.write_fixed(0x45454545, fd, &buffers[0], 3, 0);
- try testing.expectEqual(linux.IORING_OP.WRITE_FIXED, sqe_write.opcode);
- try testing.expectEqual(@as(u64, 3), sqe_write.off);
- sqe_write.flags |= linux.IOSQE_IO_LINK;
-
- const sqe_read = try ring.read_fixed(0x12121212, fd, &buffers[1], 0, 1);
- try testing.expectEqual(linux.IORING_OP.READ_FIXED, sqe_read.opcode);
- try testing.expectEqual(@as(u64, 0), sqe_read.off);
-
- try testing.expectEqual(@as(u32, 2), try ring.submit());
-
- const cqe_write = try ring.copy_cqe();
- const cqe_read = try ring.copy_cqe();
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x45454545,
- .res = @as(i32, @intCast(buffers[0].len)),
- .flags = 0,
- }, cqe_write);
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x12121212,
- .res = @as(i32, @intCast(buffers[1].len)),
- .flags = 0,
- }, cqe_read);
-
- try testing.expectEqualSlices(u8, "\x00\x00\x00", buffers[1].base[0..3]);
- try testing.expectEqualSlices(u8, "foobar", buffers[1].base[3..9]);
- try testing.expectEqualSlices(u8, "zz", buffers[1].base[9..11]);
-}
-
-test "openat" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const path = "test_io_uring_openat";
-
- // Workaround for LLVM bug: https://github.com/ziglang/zig/issues/12014
- const path_addr = if (builtin.zig_backend == .stage2_llvm) p: {
- var workaround = path;
- _ = &workaround;
- break :p @intFromPtr(workaround);
- } else @intFromPtr(path);
-
- const flags: linux.O = .{ .CLOEXEC = true, .ACCMODE = .RDWR, .CREAT = true };
- const mode: posix.mode_t = 0o666;
- const sqe_openat = try ring.openat(0x33333333, tmp.dir.fd, path, flags, mode);
- try testing.expectEqual(linux.io_uring_sqe{
- .opcode = .OPENAT,
- .flags = 0,
- .ioprio = 0,
- .fd = tmp.dir.fd,
- .off = 0,
- .addr = path_addr,
- .len = mode,
- .rw_flags = @bitCast(flags),
- .user_data = 0x33333333,
- .buf_index = 0,
- .personality = 0,
- .splice_fd_in = 0,
- .addr3 = 0,
- .resv = 0,
- }, sqe_openat.*);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe_openat = try ring.copy_cqe();
- try testing.expectEqual(@as(u64, 0x33333333), cqe_openat.user_data);
- if (cqe_openat.err() == .INVAL) return error.SkipZigTest;
- if (cqe_openat.err() == .BADF) return error.SkipZigTest;
- if (cqe_openat.res <= 0) std.debug.print("\ncqe_openat.res={}\n", .{cqe_openat.res});
- try testing.expect(cqe_openat.res > 0);
- try testing.expectEqual(@as(u32, 0), cqe_openat.flags);
-
- posix.close(cqe_openat.res);
-}
-
-test "close" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const path = "test_io_uring_close";
- const file = try tmp.dir.createFile(path, .{});
- errdefer file.close();
-
- const sqe_close = try ring.close(0x44444444, file.handle);
- try testing.expectEqual(linux.IORING_OP.CLOSE, sqe_close.opcode);
- try testing.expectEqual(file.handle, sqe_close.fd);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe_close = try ring.copy_cqe();
- if (cqe_close.err() == .INVAL) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x44444444,
- .res = 0,
- .flags = 0,
- }, cqe_close);
-}
-
-test "accept/connect/send/recv" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
-
- const buffer_send = [_]u8{ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
- var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
-
- const sqe_send = try ring.send(0xeeeeeeee, socket_test_harness.client, buffer_send[0..], 0);
- sqe_send.flags |= linux.IOSQE_IO_LINK;
- _ = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
- try testing.expectEqual(@as(u32, 2), try ring.submit());
-
- const cqe_send = try ring.copy_cqe();
- if (cqe_send.err() == .INVAL) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xeeeeeeee,
- .res = buffer_send.len,
- .flags = 0,
- }, cqe_send);
-
- const cqe_recv = try ring.copy_cqe();
- if (cqe_recv.err() == .INVAL) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xffffffff,
- .res = buffer_recv.len,
- // ignore IORING_CQE_F_SOCK_NONEMPTY since it is only set on some systems
- .flags = cqe_recv.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
- }, cqe_recv);
-
- try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
-}
-
-test "sendmsg/recvmsg" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var address_server: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
-
- const server = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
- defer posix.close(server);
- try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEPORT, &mem.toBytes(@as(c_int, 1)));
- try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
- try posix.bind(server, addrAny(&address_server), @sizeOf(linux.sockaddr.in));
-
- // set address_server to the OS-chosen IP/port.
- var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
- try posix.getsockname(server, addrAny(&address_server), &slen);
-
- const client = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
- defer posix.close(client);
-
- const buffer_send = [_]u8{42} ** 128;
- const iovecs_send = [_]posix.iovec_const{
- posix.iovec_const{ .base = &buffer_send, .len = buffer_send.len },
- };
- const msg_send: linux.msghdr_const = .{
- .name = addrAny(&address_server),
- .namelen = @sizeOf(linux.sockaddr.in),
- .iov = &iovecs_send,
- .iovlen = 1,
- .control = null,
- .controllen = 0,
- .flags = 0,
- };
- const sqe_sendmsg = try ring.sendmsg(0x11111111, client, &msg_send, 0);
- sqe_sendmsg.flags |= linux.IOSQE_IO_LINK;
- try testing.expectEqual(linux.IORING_OP.SENDMSG, sqe_sendmsg.opcode);
- try testing.expectEqual(client, sqe_sendmsg.fd);
-
- var buffer_recv = [_]u8{0} ** 128;
- var iovecs_recv = [_]posix.iovec{
- posix.iovec{ .base = &buffer_recv, .len = buffer_recv.len },
- };
- var address_recv: linux.sockaddr.in = .{
- .port = 0,
- .addr = 0,
- };
- var msg_recv: linux.msghdr = .{
- .name = addrAny(&address_recv),
- .namelen = @sizeOf(linux.sockaddr.in),
- .iov = &iovecs_recv,
- .iovlen = 1,
- .control = null,
- .controllen = 0,
- .flags = 0,
- };
- const sqe_recvmsg = try ring.recvmsg(0x22222222, server, &msg_recv, 0);
- try testing.expectEqual(linux.IORING_OP.RECVMSG, sqe_recvmsg.opcode);
- try testing.expectEqual(server, sqe_recvmsg.fd);
-
- try testing.expectEqual(@as(u32, 2), ring.sq_ready());
- try testing.expectEqual(@as(u32, 2), try ring.submit_and_wait(2));
- try testing.expectEqual(@as(u32, 0), ring.sq_ready());
- try testing.expectEqual(@as(u32, 2), ring.cq_ready());
-
- const cqe_sendmsg = try ring.copy_cqe();
- if (cqe_sendmsg.res == -@as(i32, @intFromEnum(linux.E.INVAL))) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x11111111,
- .res = buffer_send.len,
- .flags = 0,
- }, cqe_sendmsg);
-
- const cqe_recvmsg = try ring.copy_cqe();
- if (cqe_recvmsg.res == -@as(i32, @intFromEnum(linux.E.INVAL))) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x22222222,
- .res = buffer_recv.len,
- // ignore IORING_CQE_F_SOCK_NONEMPTY since it is set non-deterministically
- .flags = cqe_recvmsg.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
- }, cqe_recvmsg);
-
- try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
-}
-
-test "timeout (after a relative time)" {
+test BufferGroup {
if (!is_linux) return error.SkipZigTest;
const io = testing.io;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const ms = 10;
- const margin = 5;
- const ts: linux.kernel_timespec = .{ .sec = 0, .nsec = ms * 1000000 };
-
- const started = try std.Io.Clock.awake.now(io);
- const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
- try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe.opcode);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- const cqe = try ring.copy_cqe();
- const stopped = try std.Io.Clock.awake.now(io);
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x55555555,
- .res = -@as(i32, @intFromEnum(linux.E.TIME)),
- .flags = 0,
- }, cqe);
-
- // Tests should not depend on timings: skip test if outside margin.
- const ms_elapsed = started.durationTo(stopped).toMilliseconds();
- if (ms_elapsed > margin) return error.SkipZigTest;
-}
-
-test "timeout (after a number of completions)" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const ts: linux.kernel_timespec = .{ .sec = 3, .nsec = 0 };
- const count_completions: u64 = 1;
- const sqe_timeout = try ring.timeout(0x66666666, &ts, count_completions, 0);
- try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
- try testing.expectEqual(count_completions, sqe_timeout.off);
- _ = try ring.nop(0x77777777);
- try testing.expectEqual(@as(u32, 2), try ring.submit());
-
- const cqe_nop = try ring.copy_cqe();
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x77777777,
- .res = 0,
- .flags = 0,
- }, cqe_nop);
-
- const cqe_timeout = try ring.copy_cqe();
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x66666666,
- .res = 0,
- .flags = 0,
- }, cqe_timeout);
-}
-
-test "timeout_remove" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const ts: linux.kernel_timespec = .{ .sec = 3, .nsec = 0 };
- const sqe_timeout = try ring.timeout(0x88888888, &ts, 0, 0);
- try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
- try testing.expectEqual(@as(u64, 0x88888888), sqe_timeout.user_data);
-
- const sqe_timeout_remove = try ring.timeout_remove(0x99999999, 0x88888888, 0);
- try testing.expectEqual(linux.IORING_OP.TIMEOUT_REMOVE, sqe_timeout_remove.opcode);
- try testing.expectEqual(@as(u64, 0x88888888), sqe_timeout_remove.addr);
- try testing.expectEqual(@as(u64, 0x99999999), sqe_timeout_remove.user_data);
-
- try testing.expectEqual(@as(u32, 2), try ring.submit());
-
- // The order in which the CQE arrive is not clearly documented and it changed with kernel 5.18:
- // * kernel 5.10 gives user data 0x88888888 first, 0x99999999 second
- // * kernel 5.18 gives user data 0x99999999 first, 0x88888888 second
-
- var cqes: [2]linux.io_uring_cqe = undefined;
- cqes[0] = try ring.copy_cqe();
- cqes[1] = try ring.copy_cqe();
-
- for (cqes) |cqe| {
- // IORING_OP_TIMEOUT_REMOVE is not supported by this kernel version:
- // Timeout remove operations set the fd to -1, which results in EBADF before EINVAL.
- // We use IORING_FEAT_RW_CUR_POS as a safety check here to make sure we are at least pre-5.6.
- // We don't want to skip this test for newer kernels.
- if (cqe.user_data == 0x99999999 and
- cqe.err() == .BADF and
- (ring.features & linux.IORING_FEAT_RW_CUR_POS) == 0)
- {
- return error.SkipZigTest;
- }
-
- try testing.expect(cqe.user_data == 0x88888888 or cqe.user_data == 0x99999999);
-
- if (cqe.user_data == 0x88888888) {
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x88888888,
- .res = -@as(i32, @intFromEnum(linux.E.CANCELED)),
- .flags = 0,
- }, cqe);
- } else if (cqe.user_data == 0x99999999) {
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x99999999,
- .res = 0,
- .flags = 0,
- }, cqe);
- }
- }
-}
-
-test "accept/connect/recv/link_timeout" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
-
- var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
-
- const sqe_recv = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
- sqe_recv.flags |= linux.IOSQE_IO_LINK;
-
- const ts = linux.kernel_timespec{ .sec = 0, .nsec = 1000000 };
- _ = try ring.link_timeout(0x22222222, &ts, 0);
-
- const nr_wait = try ring.submit();
- try testing.expectEqual(@as(u32, 2), nr_wait);
-
- var i: usize = 0;
- while (i < nr_wait) : (i += 1) {
- const cqe = try ring.copy_cqe();
- switch (cqe.user_data) {
- 0xffffffff => {
- if (cqe.res != -@as(i32, @intFromEnum(linux.E.INTR)) and
- cqe.res != -@as(i32, @intFromEnum(linux.E.CANCELED)))
- {
- std.debug.print("Req 0x{x} got {d}\n", .{ cqe.user_data, cqe.res });
- try testing.expect(false);
- }
- },
- 0x22222222 => {
- if (cqe.res != -@as(i32, @intFromEnum(linux.E.ALREADY)) and
- cqe.res != -@as(i32, @intFromEnum(linux.E.TIME)))
- {
- std.debug.print("Req 0x{x} got {d}\n", .{ cqe.user_data, cqe.res });
- try testing.expect(false);
- }
- },
- else => @panic("should not happen"),
- }
- }
-}
-
-test "fallocate" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const path = "test_io_uring_fallocate";
- const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
-
- try testing.expectEqual(@as(u64, 0), (try file.stat()).size);
-
- const len: u64 = 65536;
- const sqe = try ring.fallocate(0xaaaaaaaa, file.handle, 0, 0, len);
- try testing.expectEqual(linux.IORING_OP.FALLOCATE, sqe.opcode);
- try testing.expectEqual(file.handle, sqe.fd);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement fallocate():
- .INVAL => return error.SkipZigTest,
- // This kernel does not implement fallocate():
- .NOSYS => return error.SkipZigTest,
- // The filesystem containing the file referred to by fd does not support this operation;
- // or the mode is not supported by the filesystem containing the file referred to by fd:
- .OPNOTSUPP => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xaaaaaaaa,
- .res = 0,
- .flags = 0,
- }, cqe);
-
- try testing.expectEqual(len, (try file.stat()).size);
-}
-
-test "statx" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
- const path = "test_io_uring_statx";
- const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
-
- try testing.expectEqual(@as(u64, 0), (try file.stat()).size);
-
- try file.writeAll("foobar");
-
- var buf: linux.Statx = undefined;
- const sqe = try ring.statx(
- 0xaaaaaaaa,
- tmp.dir.fd,
- path,
- 0,
- .{ .SIZE = true },
- &buf,
- );
- try testing.expectEqual(linux.IORING_OP.STATX, sqe.opcode);
- try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement statx():
- .INVAL => return error.SkipZigTest,
- // This kernel does not implement statx():
- .NOSYS => return error.SkipZigTest,
- // The filesystem containing the file referred to by fd does not support this operation;
- // or the mode is not supported by the filesystem containing the file referred to by fd:
- .OPNOTSUPP => return error.SkipZigTest,
- // not supported on older kernels (5.4)
- .BADF => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xaaaaaaaa,
- .res = 0,
- .flags = 0,
- }, cqe);
-
- try testing.expect(buf.mask.SIZE);
- try testing.expectEqual(@as(u64, 6), buf.size);
-}
-
-test "accept/connect/recv/cancel" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
-
- var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
-
- _ = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const sqe_cancel = try ring.cancel(0x99999999, 0xffffffff, 0);
- try testing.expectEqual(linux.IORING_OP.ASYNC_CANCEL, sqe_cancel.opcode);
- try testing.expectEqual(@as(u64, 0xffffffff), sqe_cancel.addr);
- try testing.expectEqual(@as(u64, 0x99999999), sqe_cancel.user_data);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- var cqe_recv = try ring.copy_cqe();
- if (cqe_recv.err() == .INVAL) return error.SkipZigTest;
- var cqe_cancel = try ring.copy_cqe();
- if (cqe_cancel.err() == .INVAL) return error.SkipZigTest;
-
- // The recv/cancel CQEs may arrive in any order, the recv CQE will sometimes come first:
- if (cqe_recv.user_data == 0x99999999 and cqe_cancel.user_data == 0xffffffff) {
- const a = cqe_recv;
- const b = cqe_cancel;
- cqe_recv = b;
- cqe_cancel = a;
- }
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xffffffff,
- .res = -@as(i32, @intFromEnum(linux.E.CANCELED)),
- .flags = 0,
- }, cqe_recv);
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x99999999,
- .res = 0,
- .flags = 0,
- }, cqe_cancel);
-}
-
-test "register_files_update" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
- defer posix.close(fd);
-
- var registered_fds = [_]linux.fd_t{0} ** 2;
- const fd_index = 0;
- const fd_index2 = 1;
- registered_fds[fd_index] = fd;
- registered_fds[fd_index2] = -1;
-
- ring.register_files(registered_fds[0..]) catch |err| switch (err) {
- // Happens when the kernel doesn't support sparse entry (-1) in the file descriptors array.
- error.FileDescriptorInvalid => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- };
-
- // Test IORING_REGISTER_FILES_UPDATE
- // Only available since Linux 5.5
-
- const fd2 = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
- defer posix.close(fd2);
-
- registered_fds[fd_index] = fd2;
- registered_fds[fd_index2] = -1;
- try ring.register_files_update(0, registered_fds[0..]);
-
- var buffer = [_]u8{42} ** 128;
- {
- const sqe = try ring.read(0xcccccccc, fd_index, .{ .buffer = &buffer }, 0);
- try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
- sqe.flags |= linux.IOSQE_FIXED_FILE;
-
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xcccccccc,
- .res = buffer.len,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer.len), buffer[0..]);
- }
-
- // Test with a non-zero offset
-
- registered_fds[fd_index] = -1;
- registered_fds[fd_index2] = -1;
- try ring.register_files_update(1, registered_fds[1..]);
-
- {
- // Next read should still work since fd_index in the registered file descriptors hasn't been updated yet.
- const sqe = try ring.read(0xcccccccc, fd_index, .{ .buffer = &buffer }, 0);
- try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
- sqe.flags |= linux.IOSQE_FIXED_FILE;
-
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xcccccccc,
- .res = buffer.len,
- .flags = 0,
- }, try ring.copy_cqe());
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer.len), buffer[0..]);
- }
-
- try ring.register_files_update(0, registered_fds[0..]);
-
- {
- // Now this should fail since both fds are sparse (-1)
- const sqe = try ring.read(0xcccccccc, fd_index, .{ .buffer = &buffer }, 0);
- try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
- sqe.flags |= linux.IOSQE_FIXED_FILE;
-
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(linux.E.BADF, cqe.err());
- }
-
- try ring.unregister_files();
-}
-
-test "shutdown" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var address: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
-
- // Socket bound, expect shutdown to work
- {
- const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- defer posix.close(server);
- try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
- try posix.bind(server, addrAny(&address), @sizeOf(linux.sockaddr.in));
- try posix.listen(server, 1);
-
- // set address to the OS-chosen IP/port.
- var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
- try posix.getsockname(server, addrAny(&address), &slen);
-
- const shutdown_sqe = try ring.shutdown(0x445445445, server, linux.SHUT.RD);
- try testing.expectEqual(linux.IORING_OP.SHUTDOWN, shutdown_sqe.opcode);
- try testing.expectEqual(@as(i32, server), shutdown_sqe.fd);
-
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement shutdown (kernel version < 5.11)
- .INVAL => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x445445445,
- .res = 0,
- .flags = 0,
- }, cqe);
- }
-
- // Socket not bound, expect to fail with ENOTCONN
- {
- const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- defer posix.close(server);
-
- const shutdown_sqe = ring.shutdown(0x445445445, server, linux.SHUT.RD) catch |err| switch (err) {
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- };
- try testing.expectEqual(linux.IORING_OP.SHUTDOWN, shutdown_sqe.opcode);
- try testing.expectEqual(@as(i32, server), shutdown_sqe.fd);
-
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(@as(u64, 0x445445445), cqe.user_data);
- try testing.expectEqual(linux.E.NOTCONN, cqe.err());
- }
-}
-
-test "renameat" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const old_path = "test_io_uring_renameat_old";
- const new_path = "test_io_uring_renameat_new";
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- // Write old file with data
-
- const old_file = try tmp.dir.createFile(old_path, .{ .truncate = true, .mode = 0o666 });
- defer old_file.close();
- try old_file.writeAll("hello");
-
- // Submit renameat
-
- const sqe = try ring.renameat(
- 0x12121212,
- tmp.dir.fd,
- old_path,
- tmp.dir.fd,
- new_path,
- 0,
- );
- try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode);
- try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement renameat (kernel version < 5.11)
- .BADF, .INVAL => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x12121212,
- .res = 0,
- .flags = 0,
- }, cqe);
-
- // Validate that the old file doesn't exist anymore
- try testing.expectError(error.FileNotFound, tmp.dir.openFile(old_path, .{}));
-
- // Validate that the new file exists with the proper content
- var new_file_data: [16]u8 = undefined;
- try testing.expectEqualStrings("hello", try tmp.dir.readFile(new_path, &new_file_data));
-}
-
-test "unlinkat" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const path = "test_io_uring_unlinkat";
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- // Write old file with data
-
- const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
-
- // Submit unlinkat
-
- const sqe = try ring.unlinkat(
- 0x12121212,
- tmp.dir.fd,
- path,
- 0,
- );
- try testing.expectEqual(linux.IORING_OP.UNLINKAT, sqe.opcode);
- try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement unlinkat (kernel version < 5.11)
- .BADF, .INVAL => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x12121212,
- .res = 0,
- .flags = 0,
- }, cqe);
-
- // Validate that the file doesn't exist anymore
- _ = tmp.dir.openFile(path, .{}) catch |err| switch (err) {
- error.FileNotFound => {},
- else => std.debug.panic("unexpected error: {}", .{err}),
- };
-}
-
-test "mkdirat" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const path = "test_io_uring_mkdirat";
-
- // Submit mkdirat
-
- const sqe = try ring.mkdirat(
- 0x12121212,
- tmp.dir.fd,
- path,
- 0o0755,
- );
- try testing.expectEqual(linux.IORING_OP.MKDIRAT, sqe.opcode);
- try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement mkdirat (kernel version < 5.15)
- .BADF, .INVAL => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x12121212,
- .res = 0,
- .flags = 0,
- }, cqe);
-
- // Validate that the directory exist
- _ = try tmp.dir.openDir(path, .{});
-}
-
-test "symlinkat" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const path = "test_io_uring_symlinkat";
- const link_path = "test_io_uring_symlinkat_link";
-
- const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
-
- // Submit symlinkat
-
- const sqe = try ring.symlinkat(
- 0x12121212,
- path,
- tmp.dir.fd,
- link_path,
- );
- try testing.expectEqual(linux.IORING_OP.SYMLINKAT, sqe.opcode);
- try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement symlinkat (kernel version < 5.15)
- .BADF, .INVAL => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x12121212,
- .res = 0,
- .flags = 0,
- }, cqe);
-
- // Validate that the symlink exist
- _ = try tmp.dir.openFile(link_path, .{});
-}
-
-test "linkat" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- const first_path = "test_io_uring_linkat_first";
- const second_path = "test_io_uring_linkat_second";
-
- // Write file with data
-
- const first_file = try tmp.dir.createFile(first_path, .{ .truncate = true, .mode = 0o666 });
- defer first_file.close();
- try first_file.writeAll("hello");
-
- // Submit linkat
-
- const sqe = try ring.linkat(
- 0x12121212,
- tmp.dir.fd,
- first_path,
- tmp.dir.fd,
- second_path,
- 0,
- );
- try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode);
- try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- // This kernel's io_uring does not yet implement linkat (kernel version < 5.15)
- .BADF, .INVAL => return error.SkipZigTest,
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0x12121212,
- .res = 0,
- .flags = 0,
- }, cqe);
-
- // Validate the second file
- var second_file_data: [16]u8 = undefined;
- try testing.expectEqualStrings("hello", try tmp.dir.readFile(second_path, &second_file_data));
-}
-
-test "provide_buffers: read" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
- defer posix.close(fd);
-
- const group_id = 1337;
- const buffer_id = 0;
-
- const buffer_len = 128;
-
- var buffers: [4][buffer_len]u8 = undefined;
-
- // Provide 4 buffers
-
- {
- const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
- try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
- try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
- try testing.expectEqual(@as(u32, buffers[0].len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- // Happens when the kernel is < 5.7
- .INVAL, .BADF => return error.SkipZigTest,
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(@as(u64, 0xcccccccc), cqe.user_data);
- }
-
- // Do 4 reads which should consume all buffers
-
- var i: usize = 0;
- while (i < buffers.len) : (i += 1) {
- const sqe = try ring.read(0xdededede, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
- try testing.expectEqual(@as(i32, fd), sqe.fd);
- try testing.expectEqual(@as(u64, 0), sqe.addr);
- try testing.expectEqual(@as(u32, buffer_len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
-
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
- const used_buffer_id = cqe.flags >> 16;
- try testing.expect(used_buffer_id >= 0 and used_buffer_id <= 3);
- try testing.expectEqual(@as(i32, buffer_len), cqe.res);
-
- try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
- }
-
- // This read should fail
-
- {
- const sqe = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
- try testing.expectEqual(@as(i32, fd), sqe.fd);
- try testing.expectEqual(@as(u64, 0), sqe.addr);
- try testing.expectEqual(@as(u32, buffer_len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- // Expected
- .NOBUFS => {},
- .SUCCESS => std.debug.panic("unexpected success", .{}),
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- }
-
- // Provide 1 buffer again
-
- // Deliberately put something we don't expect in the buffers
- @memset(mem.sliceAsBytes(&buffers), 42);
-
- const reprovided_buffer_id = 2;
-
- {
- _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- }
-
- // Final read which should work
-
- {
- const sqe = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
- try testing.expectEqual(@as(i32, fd), sqe.fd);
- try testing.expectEqual(@as(u64, 0), sqe.addr);
- try testing.expectEqual(@as(u32, buffer_len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
-
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
- const used_buffer_id = cqe.flags >> 16;
- try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
- try testing.expectEqual(@as(i32, buffer_len), cqe.res);
- try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
- }
-}
-
-test "remove_buffers" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
- defer posix.close(fd);
-
- const group_id = 1337;
- const buffer_id = 0;
-
- const buffer_len = 128;
-
- var buffers: [4][buffer_len]u8 = undefined;
-
- // Provide 4 buffers
-
- {
- _ = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .INVAL, .BADF => return error.SkipZigTest,
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(@as(u64, 0xcccccccc), cqe.user_data);
- }
-
- // Remove 3 buffers
-
- {
- const sqe = try ring.remove_buffers(0xbababababa, 3, group_id);
- try testing.expectEqual(linux.IORING_OP.REMOVE_BUFFERS, sqe.opcode);
- try testing.expectEqual(@as(i32, 3), sqe.fd);
- try testing.expectEqual(@as(u64, 0), sqe.addr);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(@as(u64, 0xbababababa), cqe.user_data);
- }
-
- // This read should work
-
- {
- _ = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
-
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
- const used_buffer_id = cqe.flags >> 16;
- try testing.expect(used_buffer_id >= 0 and used_buffer_id < 4);
- try testing.expectEqual(@as(i32, buffer_len), cqe.res);
- try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
- }
-
- // Final read should _not_ work
-
- {
- _ = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- // Expected
- .NOBUFS => {},
- .SUCCESS => std.debug.panic("unexpected success", .{}),
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- }
-}
-
-test "provide_buffers: accept/connect/send/recv" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const group_id = 1337;
- const buffer_id = 0;
-
- const buffer_len = 128;
- var buffers: [4][buffer_len]u8 = undefined;
-
- // Provide 4 buffers
-
- {
- const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
- try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
- try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
- try testing.expectEqual(@as(u32, buffer_len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- // Happens when the kernel is < 5.7
- .INVAL => return error.SkipZigTest,
- // Happens on the kernel 5.4
- .BADF => return error.SkipZigTest,
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(@as(u64, 0xcccccccc), cqe.user_data);
- }
-
- const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
-
- // Do 4 send on the socket
-
- {
- var i: usize = 0;
- while (i < buffers.len) : (i += 1) {
- _ = try ring.send(0xdeaddead, socket_test_harness.server, &([_]u8{'z'} ** buffer_len), 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- }
-
- var cqes: [4]linux.io_uring_cqe = undefined;
- try testing.expectEqual(@as(u32, 4), try ring.copy_cqes(&cqes, 4));
- }
-
- // Do 4 recv which should consume all buffers
-
- // Deliberately put something we don't expect in the buffers
- @memset(mem.sliceAsBytes(&buffers), 1);
-
- var i: usize = 0;
- while (i < buffers.len) : (i += 1) {
- const sqe = try ring.recv(0xdededede, socket_test_harness.client, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(linux.IORING_OP.RECV, sqe.opcode);
- try testing.expectEqual(@as(i32, socket_test_harness.client), sqe.fd);
- try testing.expectEqual(@as(u64, 0), sqe.addr);
- try testing.expectEqual(@as(u32, buffer_len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 0), sqe.rw_flags);
- try testing.expectEqual(@as(u32, linux.IOSQE_BUFFER_SELECT), sqe.flags);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
-
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
- const used_buffer_id = cqe.flags >> 16;
- try testing.expect(used_buffer_id >= 0 and used_buffer_id <= 3);
- try testing.expectEqual(@as(i32, buffer_len), cqe.res);
-
- try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
- const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
- try testing.expectEqualSlices(u8, &([_]u8{'z'} ** buffer_len), buffer);
- }
-
- // This recv should fail
-
- {
- const sqe = try ring.recv(0xdfdfdfdf, socket_test_harness.client, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(linux.IORING_OP.RECV, sqe.opcode);
- try testing.expectEqual(@as(i32, socket_test_harness.client), sqe.fd);
- try testing.expectEqual(@as(u64, 0), sqe.addr);
- try testing.expectEqual(@as(u32, buffer_len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 0), sqe.rw_flags);
- try testing.expectEqual(@as(u32, linux.IOSQE_BUFFER_SELECT), sqe.flags);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- // Expected
- .NOBUFS => {},
- .SUCCESS => std.debug.panic("unexpected success", .{}),
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- }
-
- // Provide 1 buffer again
-
- const reprovided_buffer_id = 2;
-
- {
- _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
- }
-
- // Redo 1 send on the server socket
-
- {
- _ = try ring.send(0xdeaddead, socket_test_harness.server, &([_]u8{'w'} ** buffer_len), 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- _ = try ring.copy_cqe();
- }
-
- // Final recv which should work
-
- // Deliberately put something we don't expect in the buffers
- @memset(mem.sliceAsBytes(&buffers), 1);
-
- {
- const sqe = try ring.recv(0xdfdfdfdf, socket_test_harness.client, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
- try testing.expectEqual(linux.IORING_OP.RECV, sqe.opcode);
- try testing.expectEqual(@as(i32, socket_test_harness.client), sqe.fd);
- try testing.expectEqual(@as(u64, 0), sqe.addr);
- try testing.expectEqual(@as(u32, buffer_len), sqe.len);
- try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
- try testing.expectEqual(@as(u32, 0), sqe.rw_flags);
- try testing.expectEqual(@as(u32, linux.IOSQE_BUFFER_SELECT), sqe.flags);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- const cqe = try ring.copy_cqe();
- switch (cqe.err()) {
- .SUCCESS => {},
- else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
- }
-
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
- const used_buffer_id = cqe.flags >> 16;
- try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
- try testing.expectEqual(@as(i32, buffer_len), cqe.res);
- try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
- try testing.expectEqualSlices(u8, &([_]u8{'w'} ** buffer_len), buffer);
- }
-}
-
-/// Used for testing server/client interactions.
-const SocketTestHarness = struct {
- listener: posix.socket_t,
- server: posix.socket_t,
- client: posix.socket_t,
-
- fn close(self: SocketTestHarness) void {
- posix.close(self.client);
- posix.close(self.listener);
- }
-};
-
-fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
- // Create a TCP server socket
- var address: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
- const listener_socket = try createListenerSocket(&address);
- errdefer posix.close(listener_socket);
-
- // Submit 1 accept
- var accept_addr: posix.sockaddr = undefined;
- var accept_addr_len: posix.socklen_t = @sizeOf(@TypeOf(accept_addr));
- _ = try ring.accept(0xaaaaaaaa, listener_socket, &accept_addr, &accept_addr_len, 0);
-
- // Create a TCP client socket
- const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- errdefer posix.close(client);
- _ = try ring.connect(0xcccccccc, client, addrAny(&address), @sizeOf(linux.sockaddr.in));
-
- try testing.expectEqual(@as(u32, 2), try ring.submit());
-
- var cqe_accept = try ring.copy_cqe();
- if (cqe_accept.err() == .INVAL) return error.SkipZigTest;
- var cqe_connect = try ring.copy_cqe();
- if (cqe_connect.err() == .INVAL) return error.SkipZigTest;
-
- // The accept/connect CQEs may arrive in any order, the connect CQE will sometimes come first:
- if (cqe_accept.user_data == 0xcccccccc and cqe_connect.user_data == 0xaaaaaaaa) {
- const a = cqe_accept;
- const b = cqe_connect;
- cqe_accept = b;
- cqe_connect = a;
- }
-
- try testing.expectEqual(@as(u64, 0xaaaaaaaa), cqe_accept.user_data);
- if (cqe_accept.res <= 0) std.debug.print("\ncqe_accept.res={}\n", .{cqe_accept.res});
- try testing.expect(cqe_accept.res > 0);
- try testing.expectEqual(@as(u32, 0), cqe_accept.flags);
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xcccccccc,
- .res = 0,
- .flags = 0,
- }, cqe_connect);
-
- // All good
-
- return SocketTestHarness{
- .listener = listener_socket,
- .server = cqe_accept.res,
- .client = client,
- };
-}
-
-fn createListenerSocket(address: *linux.sockaddr.in) !posix.socket_t {
- const kernel_backlog = 1;
- const listener_socket = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- errdefer posix.close(listener_socket);
-
- try posix.setsockopt(listener_socket, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
- try posix.bind(listener_socket, addrAny(address), @sizeOf(linux.sockaddr.in));
- try posix.listen(listener_socket, kernel_backlog);
-
- // set address to the OS-chosen IP/port.
- var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
- try posix.getsockname(listener_socket, addrAny(address), &slen);
-
- return listener_socket;
-}
-
-test "accept multishot" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var address: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
- const listener_socket = try createListenerSocket(&address);
- defer posix.close(listener_socket);
-
- // submit multishot accept operation
- var addr: posix.sockaddr = undefined;
- var addr_len: posix.socklen_t = @sizeOf(@TypeOf(addr));
- const userdata: u64 = 0xaaaaaaaa;
- _ = try ring.accept_multishot(userdata, listener_socket, &addr, &addr_len, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- var nr: usize = 4; // number of clients to connect
- while (nr > 0) : (nr -= 1) {
- // connect client
- const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- errdefer posix.close(client);
- try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
-
- // test accept completion
- var cqe = try ring.copy_cqe();
- if (cqe.err() == .INVAL) return error.SkipZigTest;
- try testing.expect(cqe.res > 0);
- try testing.expect(cqe.user_data == userdata);
- try testing.expect(cqe.flags & linux.IORING_CQE_F_MORE > 0); // more flag is set
-
- posix.close(client);
- }
-}
-
-test "accept/connect/send_zc/recv" {
- try skipKernelLessThan(.{ .major = 6, .minor = 0, .patch = 0 });
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
-
- const buffer_send = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
- var buffer_recv = [_]u8{0} ** 10;
-
- // zero-copy send
- const sqe_send = try ring.send_zc(0xeeeeeeee, socket_test_harness.client, buffer_send[0..], 0, 0);
- sqe_send.flags |= linux.IOSQE_IO_LINK;
- _ = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
- try testing.expectEqual(@as(u32, 2), try ring.submit());
-
- var cqe_send = try ring.copy_cqe();
- // First completion of zero-copy send.
- // IORING_CQE_F_MORE, means that there
- // will be a second completion event / notification for the
- // request, with the user_data field set to the same value.
- // buffer_send must be keep alive until second cqe.
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xeeeeeeee,
- .res = buffer_send.len,
- .flags = linux.IORING_CQE_F_MORE,
- }, cqe_send);
-
- cqe_send, const cqe_recv = brk: {
- const cqe1 = try ring.copy_cqe();
- const cqe2 = try ring.copy_cqe();
- break :brk if (cqe1.user_data == 0xeeeeeeee) .{ cqe1, cqe2 } else .{ cqe2, cqe1 };
- };
-
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xffffffff,
- .res = buffer_recv.len,
- .flags = cqe_recv.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
- }, cqe_recv);
- try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
-
- // Second completion of zero-copy send.
- // IORING_CQE_F_NOTIF in flags signals that kernel is done with send_buffer
- try testing.expectEqual(linux.io_uring_cqe{
- .user_data = 0xeeeeeeee,
- .res = 0,
- .flags = linux.IORING_CQE_F_NOTIF,
- }, cqe_send);
-}
-
-test "accept_direct" {
- try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
- var address: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
-
- // register direct file descriptors
- var registered_fds = [_]linux.fd_t{-1} ** 2;
- try ring.register_files(registered_fds[0..]);
-
- const listener_socket = try createListenerSocket(&address);
- defer posix.close(listener_socket);
-
- const accept_userdata: u64 = 0xaaaaaaaa;
- const read_userdata: u64 = 0xbbbbbbbb;
- const data = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
-
- for (0..2) |_| {
- for (registered_fds, 0..) |_, i| {
- var buffer_recv = [_]u8{0} ** 16;
- const buffer_send: []const u8 = data[0 .. data.len - i]; // make it different at each loop
-
- // submit accept, will chose registered fd and return index in cqe
- _ = try ring.accept_direct(accept_userdata, listener_socket, null, null, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- // connect
- const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
- defer posix.close(client);
-
- // accept completion
- const cqe_accept = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe_accept.err());
- const fd_index = cqe_accept.res;
- try testing.expect(fd_index < registered_fds.len);
- try testing.expect(cqe_accept.user_data == accept_userdata);
-
- // send data
- _ = try posix.send(client, buffer_send, 0);
-
- // Example of how to use registered fd:
- // Submit receive to fixed file returned by accept (fd_index).
- // Fd field is set to registered file index, returned by accept.
- // Flag linux.IOSQE_FIXED_FILE must be set.
- const recv_sqe = try ring.recv(read_userdata, fd_index, .{ .buffer = &buffer_recv }, 0);
- recv_sqe.flags |= linux.IOSQE_FIXED_FILE;
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- // accept receive
- const recv_cqe = try ring.copy_cqe();
- try testing.expect(recv_cqe.user_data == read_userdata);
- try testing.expect(recv_cqe.res == buffer_send.len);
- try testing.expectEqualSlices(u8, buffer_send, buffer_recv[0..buffer_send.len]);
- }
- // no more available fds, accept will get NFILE error
- {
- // submit accept
- _ = try ring.accept_direct(accept_userdata, listener_socket, null, null, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- // connect
- const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
- defer posix.close(client);
- // completion with error
- const cqe_accept = try ring.copy_cqe();
- try testing.expect(cqe_accept.user_data == accept_userdata);
- try testing.expectEqual(posix.E.NFILE, cqe_accept.err());
- }
- // return file descriptors to kernel
- try ring.register_files_update(0, registered_fds[0..]);
- }
- try ring.unregister_files();
-}
-
-test "accept_multishot_direct" {
- try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
-
- if (builtin.cpu.arch == .riscv64) {
- // https://github.com/ziglang/zig/issues/25734
- return error.SkipZigTest;
- }
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var address: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
-
- var registered_fds = [_]linux.fd_t{-1} ** 2;
- try ring.register_files(registered_fds[0..]);
-
- const listener_socket = try createListenerSocket(&address);
- defer posix.close(listener_socket);
-
- const accept_userdata: u64 = 0xaaaaaaaa;
-
- for (0..2) |_| {
- // submit multishot accept
- // Will chose registered fd and return index of the selected registered file in cqe.
- _ = try ring.accept_multishot_direct(accept_userdata, listener_socket, null, null, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- for (registered_fds) |_| {
- // connect
- const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
- defer posix.close(client);
-
- // accept completion
- const cqe_accept = try ring.copy_cqe();
- const fd_index = cqe_accept.res;
- try testing.expect(fd_index < registered_fds.len);
- try testing.expect(cqe_accept.user_data == accept_userdata);
- try testing.expect(cqe_accept.flags & linux.IORING_CQE_F_MORE > 0); // has more is set
- }
- // No more available fds, accept will get NFILE error.
- // Multishot is terminated (more flag is not set).
- {
- // connect
- const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
- try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
- defer posix.close(client);
- // completion with error
- const cqe_accept = try ring.copy_cqe();
- try testing.expect(cqe_accept.user_data == accept_userdata);
- try testing.expectEqual(posix.E.NFILE, cqe_accept.err());
- try testing.expect(cqe_accept.flags & linux.IORING_CQE_F_MORE == 0); // has more is not set
- }
- // return file descriptors to kernel
- try ring.register_files_update(0, registered_fds[0..]);
- }
- try ring.unregister_files();
-}
-
-test "socket" {
- try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
-
- var ring = IoUring.init(1, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- // prepare, submit socket operation
- _ = try ring.socket(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- // test completion
- var cqe = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- const fd: linux.fd_t = @intCast(cqe.res);
- try testing.expect(fd > 2);
-
- posix.close(fd);
-}
-
-test "socket_direct/socket_direct_alloc/close_direct" {
- try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var registered_fds = [_]linux.fd_t{-1} ** 3;
- try ring.register_files(registered_fds[0..]);
-
- // create socket in registered file descriptor at index 0 (last param)
- _ = try ring.socket_direct(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- var cqe_socket = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe_socket.err());
- try testing.expect(cqe_socket.res == 0);
-
- // create socket in registered file descriptor at index 1 (last param)
- _ = try ring.socket_direct(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0, 1);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- cqe_socket = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe_socket.err());
- try testing.expect(cqe_socket.res == 0); // res is 0 when index is specified
-
- // create socket in kernel chosen file descriptor index (_alloc version)
- // completion res has index from registered files
- _ = try ring.socket_direct_alloc(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- cqe_socket = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe_socket.err());
- try testing.expect(cqe_socket.res == 2); // returns registered file index
-
- // use sockets from registered_fds in connect operation
- var address: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
- const listener_socket = try createListenerSocket(&address);
- defer posix.close(listener_socket);
- const accept_userdata: u64 = 0xaaaaaaaa;
- const connect_userdata: u64 = 0xbbbbbbbb;
- const close_userdata: u64 = 0xcccccccc;
- for (registered_fds, 0..) |_, fd_index| {
- // prepare accept
- _ = try ring.accept(accept_userdata, listener_socket, null, null, 0);
- // prepare connect with fixed socket
- const connect_sqe = try ring.connect(connect_userdata, @intCast(fd_index), addrAny(&address), @sizeOf(linux.sockaddr.in));
- connect_sqe.flags |= linux.IOSQE_FIXED_FILE; // fd is fixed file index
- // submit both
- try testing.expectEqual(@as(u32, 2), try ring.submit());
- // get completions
- var cqe_connect = try ring.copy_cqe();
- var cqe_accept = try ring.copy_cqe();
- // ignore order
- if (cqe_connect.user_data == accept_userdata and cqe_accept.user_data == connect_userdata) {
- const a = cqe_accept;
- const b = cqe_connect;
- cqe_accept = b;
- cqe_connect = a;
- }
- // test connect completion
- try testing.expect(cqe_connect.user_data == connect_userdata);
- try testing.expectEqual(posix.E.SUCCESS, cqe_connect.err());
- // test accept completion
- try testing.expect(cqe_accept.user_data == accept_userdata);
- try testing.expectEqual(posix.E.SUCCESS, cqe_accept.err());
-
- // submit and test close_direct
- _ = try ring.close_direct(close_userdata, @intCast(fd_index));
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- var cqe_close = try ring.copy_cqe();
- try testing.expect(cqe_close.user_data == close_userdata);
- try testing.expectEqual(posix.E.SUCCESS, cqe_close.err());
- }
-
- try ring.unregister_files();
-}
-
-test "openat_direct/close_direct" {
- try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- var registered_fds = [_]linux.fd_t{-1} ** 3;
- try ring.register_files(registered_fds[0..]);
-
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
- const path = "test_io_uring_close_direct";
- const flags: linux.O = .{ .ACCMODE = .RDWR, .CREAT = true };
- const mode: posix.mode_t = 0o666;
- const user_data: u64 = 0;
-
- // use registered file at index 0 (last param)
- _ = try ring.openat_direct(user_data, tmp.dir.fd, path, flags, mode, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- var cqe = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- try testing.expect(cqe.res == 0);
-
- // use registered file at index 1
- _ = try ring.openat_direct(user_data, tmp.dir.fd, path, flags, mode, 1);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- cqe = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- try testing.expect(cqe.res == 0); // res is 0 when we specify index
-
- // let kernel choose registered file index
- _ = try ring.openat_direct(user_data, tmp.dir.fd, path, flags, mode, linux.IORING_FILE_INDEX_ALLOC);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- cqe = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- try testing.expect(cqe.res == 2); // chosen index is in res
-
- // close all open file descriptors
- for (registered_fds, 0..) |_, fd_index| {
- _ = try ring.close_direct(user_data, @intCast(fd_index));
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- var cqe_close = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe_close.err());
- }
- try ring.unregister_files();
-}
-
-test "waitid" {
- try skipKernelLessThan(.{ .major = 6, .minor = 7, .patch = 0 });
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const pid = try posix.fork();
- if (pid == 0) {
- posix.exit(7);
- }
-
- var siginfo: posix.siginfo_t = undefined;
- _ = try ring.waitid(0, .PID, pid, &siginfo, posix.W.EXITED, 0);
-
- try testing.expectEqual(1, try ring.submit());
-
- const cqe_waitid = try ring.copy_cqe();
- try testing.expectEqual(0, cqe_waitid.res);
- try testing.expectEqual(pid, siginfo.fields.common.first.piduid.pid);
- try testing.expectEqual(7, siginfo.fields.common.second.sigchld.status);
-}
-
-/// For use in tests. Returns SkipZigTest if kernel version is less than required.
-inline fn skipKernelLessThan(required: std.SemanticVersion) !void {
- if (!is_linux) return error.SkipZigTest;
-
- var uts: linux.utsname = undefined;
- const res = linux.uname(&uts);
- switch (linux.errno(res)) {
- .SUCCESS => {},
- else => |errno| return posix.unexpectedErrno(errno),
- }
-
- const release = mem.sliceTo(&uts.release, 0);
- // Strips potential extra, as kernel version might not be semver compliant, example "6.8.9-300.fc40.x86_64"
- const extra_index = std.mem.findAny(u8, release, "-+");
- const stripped = release[0..(extra_index orelse release.len)];
- // Make sure the input don't rely on the extra we just stripped
- try testing.expect(required.pre == null and required.build == null);
-
- var current = try std.SemanticVersion.parse(stripped);
- current.pre = null; // don't check pre field
- if (required.order(current) == .gt) return error.SkipZigTest;
-}
-
-test BufferGroup {
- if (!is_linux) return error.SkipZigTest;
+ _ = io;
// Init IoUring
var ring = IoUring.init(16, 0) catch |err| switch (err) {
@@ -4167,465 +1919,6 @@ test BufferGroup {
}
}
-test "ring mapped buffers recv" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- // init buffer group
- const group_id: u16 = 1; // buffers group id
- const buffers_count: u16 = 2; // number of buffers in buffer group
- const buffer_size: usize = 4; // size of each buffer in group
- var buf_grp = BufferGroup.init(
- &ring,
- testing.allocator,
- group_id,
- buffer_size,
- buffers_count,
- ) catch |err| switch (err) {
- // kernel older than 5.19
- error.ArgumentsInvalid => return error.SkipZigTest,
- else => return err,
- };
- defer buf_grp.deinit(testing.allocator);
-
- // create client/server fds
- const fds = try createSocketTestHarness(&ring);
- defer fds.close();
-
- // for random user_data in sqe/cqe
- var Rnd = std.Random.DefaultPrng.init(std.testing.random_seed);
- var rnd = Rnd.random();
-
- var round: usize = 4; // repeat send/recv cycle round times
- while (round > 0) : (round -= 1) {
- // client sends data
- const data = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
- {
- const user_data = rnd.int(u64);
- _ = try ring.send(user_data, fds.client, data[0..], 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- const cqe_send = try ring.copy_cqe();
- if (cqe_send.err() == .INVAL) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{ .user_data = user_data, .res = data.len, .flags = 0 }, cqe_send);
- }
- var pos: usize = 0;
-
- // read first chunk
- const cqe1 = try buf_grp_recv_submit_get_cqe(&ring, &buf_grp, fds.server, rnd.int(u64));
- var buf = try buf_grp.get(cqe1);
- try testing.expectEqualSlices(u8, data[pos..][0..buf.len], buf);
- pos += buf.len;
- // second chunk
- const cqe2 = try buf_grp_recv_submit_get_cqe(&ring, &buf_grp, fds.server, rnd.int(u64));
- buf = try buf_grp.get(cqe2);
- try testing.expectEqualSlices(u8, data[pos..][0..buf.len], buf);
- pos += buf.len;
-
- // both buffers provided to the kernel are used so we get error
- // 'no more buffers', until we put buffers to the kernel
- {
- const user_data = rnd.int(u64);
- _ = try buf_grp.recv(user_data, fds.server, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(user_data, cqe.user_data);
- try testing.expect(cqe.res < 0); // fail
- try testing.expectEqual(posix.E.NOBUFS, cqe.err());
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == 0); // IORING_CQE_F_BUFFER flags is set on success only
- try testing.expectError(error.NoBufferSelected, cqe.buffer_id());
- }
-
- // put buffers back to the kernel
- try buf_grp.put(cqe1);
- try buf_grp.put(cqe2);
-
- // read remaining data
- while (pos < data.len) {
- const cqe = try buf_grp_recv_submit_get_cqe(&ring, &buf_grp, fds.server, rnd.int(u64));
- buf = try buf_grp.get(cqe);
- try testing.expectEqualSlices(u8, data[pos..][0..buf.len], buf);
- pos += buf.len;
- try buf_grp.put(cqe);
- }
- }
-}
-
-test "ring mapped buffers multishot recv" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(16, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- // init buffer group
- const group_id: u16 = 1; // buffers group id
- const buffers_count: u16 = 2; // number of buffers in buffer group
- const buffer_size: usize = 4; // size of each buffer in group
- var buf_grp = BufferGroup.init(
- &ring,
- testing.allocator,
- group_id,
- buffer_size,
- buffers_count,
- ) catch |err| switch (err) {
- // kernel older than 5.19
- error.ArgumentsInvalid => return error.SkipZigTest,
- else => return err,
- };
- defer buf_grp.deinit(testing.allocator);
-
- // create client/server fds
- const fds = try createSocketTestHarness(&ring);
- defer fds.close();
-
- // for random user_data in sqe/cqe
- var Rnd = std.Random.DefaultPrng.init(std.testing.random_seed);
- var rnd = Rnd.random();
-
- var round: usize = 4; // repeat send/recv cycle round times
- while (round > 0) : (round -= 1) {
- // client sends data
- const data = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf };
- {
- const user_data = rnd.int(u64);
- _ = try ring.send(user_data, fds.client, data[0..], 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
- const cqe_send = try ring.copy_cqe();
- if (cqe_send.err() == .INVAL) return error.SkipZigTest;
- try testing.expectEqual(linux.io_uring_cqe{ .user_data = user_data, .res = data.len, .flags = 0 }, cqe_send);
- }
-
- // start multishot recv
- var recv_user_data = rnd.int(u64);
- _ = try buf_grp.recv_multishot(recv_user_data, fds.server, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit()); // submit
-
- // server reads data into provided buffers
- // there are 2 buffers of size 4, so each read gets only chunk of data
- // we read four chunks of 4, 4, 4, 4 bytes each
- var chunk: []const u8 = data[0..buffer_size]; // first chunk
- const cqe1 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
- try testing.expect(cqe1.flags & linux.IORING_CQE_F_MORE > 0);
-
- chunk = data[buffer_size .. buffer_size * 2]; // second chunk
- const cqe2 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
- try testing.expect(cqe2.flags & linux.IORING_CQE_F_MORE > 0);
-
- // both buffers provided to the kernel are used so we get error
- // 'no more buffers', until we put buffers to the kernel
- {
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(recv_user_data, cqe.user_data);
- try testing.expect(cqe.res < 0); // fail
- try testing.expectEqual(posix.E.NOBUFS, cqe.err());
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == 0); // IORING_CQE_F_BUFFER flags is set on success only
- // has more is not set
- // indicates that multishot is finished
- try testing.expect(cqe.flags & linux.IORING_CQE_F_MORE == 0);
- try testing.expectError(error.NoBufferSelected, cqe.buffer_id());
- }
-
- // put buffers back to the kernel
- try buf_grp.put(cqe1);
- try buf_grp.put(cqe2);
-
- // restart multishot
- recv_user_data = rnd.int(u64);
- _ = try buf_grp.recv_multishot(recv_user_data, fds.server, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit()); // submit
-
- chunk = data[buffer_size * 2 .. buffer_size * 3]; // third chunk
- const cqe3 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
- try testing.expect(cqe3.flags & linux.IORING_CQE_F_MORE > 0);
- try buf_grp.put(cqe3);
-
- chunk = data[buffer_size * 3 ..]; // last chunk
- const cqe4 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
- try testing.expect(cqe4.flags & linux.IORING_CQE_F_MORE > 0);
- try buf_grp.put(cqe4);
-
- // cancel pending multishot recv operation
- {
- const cancel_user_data = rnd.int(u64);
- _ = try ring.cancel(cancel_user_data, recv_user_data, 0);
- try testing.expectEqual(@as(u32, 1), try ring.submit());
-
- // expect completion of cancel operation and completion of recv operation
- var cqe_cancel = try ring.copy_cqe();
- if (cqe_cancel.err() == .INVAL) return error.SkipZigTest;
- var cqe_recv = try ring.copy_cqe();
- if (cqe_recv.err() == .INVAL) return error.SkipZigTest;
-
- // don't depend on order of completions
- if (cqe_cancel.user_data == recv_user_data and cqe_recv.user_data == cancel_user_data) {
- const a = cqe_cancel;
- const b = cqe_recv;
- cqe_cancel = b;
- cqe_recv = a;
- }
-
- // Note on different kernel results:
- // on older kernel (tested with v6.0.16, v6.1.57, v6.2.12, v6.4.16)
- // cqe_cancel.err() == .NOENT
- // cqe_recv.err() == .NOBUFS
- // on kernel (tested with v6.5.0, v6.5.7)
- // cqe_cancel.err() == .SUCCESS
- // cqe_recv.err() == .CANCELED
- // Upstream reference: https://github.com/axboe/liburing/issues/984
-
- // cancel operation is success (or NOENT on older kernels)
- try testing.expectEqual(cancel_user_data, cqe_cancel.user_data);
- try testing.expect(cqe_cancel.err() == .NOENT or cqe_cancel.err() == .SUCCESS);
-
- // recv operation is failed with err CANCELED (or NOBUFS on older kernels)
- try testing.expectEqual(recv_user_data, cqe_recv.user_data);
- try testing.expect(cqe_recv.res < 0);
- try testing.expect(cqe_recv.err() == .NOBUFS or cqe_recv.err() == .CANCELED);
- try testing.expect(cqe_recv.flags & linux.IORING_CQE_F_MORE == 0);
- }
- }
-}
-
-// Prepare, submit recv and get cqe using buffer group.
-fn buf_grp_recv_submit_get_cqe(
- ring: *IoUring,
- buf_grp: *BufferGroup,
- fd: linux.fd_t,
- user_data: u64,
-) !linux.io_uring_cqe {
- // prepare and submit recv
- const sqe = try buf_grp.recv(user_data, fd, 0);
- try testing.expect(sqe.flags & linux.IOSQE_BUFFER_SELECT == linux.IOSQE_BUFFER_SELECT);
- try testing.expect(sqe.buf_index == buf_grp.group_id);
- try testing.expectEqual(@as(u32, 1), try ring.submit()); // submit
- // get cqe, expect success
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(user_data, cqe.user_data);
- try testing.expect(cqe.res >= 0); // success
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER); // IORING_CQE_F_BUFFER flag is set
-
- return cqe;
-}
-
-fn expect_buf_grp_cqe(
- ring: *IoUring,
- buf_grp: *BufferGroup,
- user_data: u64,
- expected: []const u8,
-) !linux.io_uring_cqe {
- // get cqe
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(user_data, cqe.user_data);
- try testing.expect(cqe.res >= 0); // success
- try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER); // IORING_CQE_F_BUFFER flag is set
- try testing.expectEqual(expected.len, @as(usize, @intCast(cqe.res)));
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
-
- // get buffer from pool
- const buffer_id = try cqe.buffer_id();
- const len = @as(usize, @intCast(cqe.res));
- const buf = buf_grp.get_by_id(buffer_id)[0..len];
- try testing.expectEqualSlices(u8, expected, buf);
-
- return cqe;
-}
-
-test "copy_cqes with wrapping sq.cqes buffer" {
- if (!is_linux) return error.SkipZigTest;
-
- var ring = IoUring.init(2, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- try testing.expectEqual(2, ring.sq.sqes.len);
- try testing.expectEqual(4, ring.cq.cqes.len);
-
- // submit 2 entries, receive 2 completions
- var cqes: [8]linux.io_uring_cqe = undefined;
- {
- for (0..2) |_| {
- const sqe = try ring.get_sqe();
- sqe.prep_timeout(&.{ .sec = 0, .nsec = 10000 }, 0, 0);
- try testing.expect(try ring.submit() == 1);
- }
- var cqe_count: u32 = 0;
- while (cqe_count < 2) {
- cqe_count += try ring.copy_cqes(&cqes, 2 - cqe_count);
- }
- }
-
- try testing.expectEqual(2, ring.cq.head.*);
-
- // sq.sqes len is 4, starting at position 2
- // every 4 entries submit wraps completion buffer
- // we are reading ring.cq.cqes at indexes 2,3,0,1
- for (1..1024) |i| {
- for (0..4) |_| {
- const sqe = try ring.get_sqe();
- sqe.prep_timeout(&.{ .sec = 0, .nsec = 10000 }, 0, 0);
- try testing.expect(try ring.submit() == 1);
- }
- var cqe_count: u32 = 0;
- while (cqe_count < 4) {
- cqe_count += try ring.copy_cqes(&cqes, 4 - cqe_count);
- }
- try testing.expectEqual(4, cqe_count);
- try testing.expectEqual(2 + 4 * i, ring.cq.head.*);
- }
-}
-
-test "bind/listen/connect" {
- if (builtin.cpu.arch == .s390x) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/25956
-
- var ring = IoUring.init(4, 0) catch |err| switch (err) {
- error.SystemOutdated => return error.SkipZigTest,
- error.PermissionDenied => return error.SkipZigTest,
- else => return err,
- };
- defer ring.deinit();
-
- const probe = ring.get_probe() catch return error.SkipZigTest;
- // LISTEN is higher required operation
- if (!probe.is_supported(.LISTEN)) return error.SkipZigTest;
-
- var addr: linux.sockaddr.in = .{
- .port = 0,
- .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
- };
- const proto: u32 = if (addr.family == linux.AF.UNIX) 0 else linux.IPPROTO.TCP;
-
- const listen_fd = brk: {
- // Create socket
- _ = try ring.socket(1, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
- try testing.expectEqual(1, try ring.submit());
- var cqe = try ring.copy_cqe();
- try testing.expectEqual(1, cqe.user_data);
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- const listen_fd: linux.fd_t = @intCast(cqe.res);
- try testing.expect(listen_fd > 2);
-
- // Prepare: set socket option * 2, bind, listen
- var optval: u32 = 1;
- (try ring.setsockopt(2, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEADDR, mem.asBytes(&optval))).link_next();
- (try ring.setsockopt(3, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEPORT, mem.asBytes(&optval))).link_next();
- (try ring.bind(4, listen_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in), 0)).link_next();
- _ = try ring.listen(5, listen_fd, 1, 0);
- // Submit 4 operations
- try testing.expectEqual(4, try ring.submit());
- // Expect all to succeed
- for (2..6) |user_data| {
- cqe = try ring.copy_cqe();
- try testing.expectEqual(user_data, cqe.user_data);
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- }
-
- // Check that socket option is set
- optval = 0;
- _ = try ring.getsockopt(5, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEADDR, mem.asBytes(&optval));
- try testing.expectEqual(1, try ring.submit());
- cqe = try ring.copy_cqe();
- try testing.expectEqual(5, cqe.user_data);
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- try testing.expectEqual(1, optval);
-
- // Read system assigned port into addr
- var addr_len: posix.socklen_t = @sizeOf(linux.sockaddr.in);
- try posix.getsockname(listen_fd, addrAny(&addr), &addr_len);
-
- break :brk listen_fd;
- };
-
- const connect_fd = brk: {
- // Create connect socket
- _ = try ring.socket(6, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
- try testing.expectEqual(1, try ring.submit());
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(6, cqe.user_data);
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- // Get connect socket fd
- const connect_fd: linux.fd_t = @intCast(cqe.res);
- try testing.expect(connect_fd > 2 and connect_fd != listen_fd);
- break :brk connect_fd;
- };
-
- // Prepare accept/connect operations
- _ = try ring.accept(7, listen_fd, null, null, 0);
- _ = try ring.connect(8, connect_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in));
- try testing.expectEqual(2, try ring.submit());
- // Get listener accepted socket
- var accept_fd: posix.socket_t = 0;
- for (0..2) |_| {
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- if (cqe.user_data == 7) {
- accept_fd = @intCast(cqe.res);
- } else {
- try testing.expectEqual(8, cqe.user_data);
- }
- }
- try testing.expect(accept_fd > 2 and accept_fd != listen_fd and accept_fd != connect_fd);
-
- // Communicate
- try testSendRecv(&ring, connect_fd, accept_fd);
- try testSendRecv(&ring, accept_fd, connect_fd);
-
- // Shutdown and close all sockets
- for ([_]posix.socket_t{ connect_fd, accept_fd, listen_fd }) |fd| {
- (try ring.shutdown(9, fd, posix.SHUT.RDWR)).link_next();
- _ = try ring.close(10, fd);
- try testing.expectEqual(2, try ring.submit());
- for (0..2) |i| {
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- try testing.expectEqual(9 + i, cqe.user_data);
- }
- }
-}
-
-fn testSendRecv(ring: *IoUring, send_fd: posix.socket_t, recv_fd: posix.socket_t) !void {
- const buffer_send = "0123456789abcdf" ** 10;
- var buffer_recv: [buffer_send.len * 2]u8 = undefined;
-
- // 2 sends
- _ = try ring.send(1, send_fd, buffer_send, linux.MSG.WAITALL);
- _ = try ring.send(2, send_fd, buffer_send, linux.MSG.WAITALL);
- try testing.expectEqual(2, try ring.submit());
- for (0..2) |i| {
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(1 + i, cqe.user_data);
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- try testing.expectEqual(buffer_send.len, @as(usize, @intCast(cqe.res)));
- }
-
- // receive
- var recv_len: usize = 0;
- while (recv_len < buffer_send.len * 2) {
- _ = try ring.recv(3, recv_fd, .{ .buffer = buffer_recv[recv_len..] }, 0);
- try testing.expectEqual(1, try ring.submit());
- const cqe = try ring.copy_cqe();
- try testing.expectEqual(3, cqe.user_data);
- try testing.expectEqual(posix.E.SUCCESS, cqe.err());
- recv_len += @intCast(cqe.res);
- }
-
- // inspect recv buffer
- try testing.expectEqualSlices(u8, buffer_send, buffer_recv[0..buffer_send.len]);
- try testing.expectEqualSlices(u8, buffer_send, buffer_recv[buffer_send.len..]);
-}
-
-fn addrAny(addr: *linux.sockaddr.in) *linux.sockaddr {
- return @ptrCast(addr);
+test {
+ if (is_linux) _ = @import("IoUring/test.zig");
}
diff --git a/lib/std/os/linux/IoUring/test.zig b/lib/std/os/linux/IoUring/test.zig
new file mode 100644
index 0000000000..251ba14a8b
--- /dev/null
+++ b/lib/std/os/linux/IoUring/test.zig
@@ -0,0 +1,2691 @@
+const builtin = @import("builtin");
+
+const std = @import("../../../std.zig");
+const Io = std.Io;
+const mem = std.mem;
+const assert = std.debug.assert;
+const testing = std.testing;
+const linux = std.os.linux;
+
+const IoUring = std.os.linux.IoUring;
+const BufferGroup = IoUring.BufferGroup;
+
+const posix = std.posix;
+const iovec = posix.iovec;
+const iovec_const = posix.iovec_const;
+
+comptime {
+ assert(builtin.os.tag == .linux);
+}
+
+test "structs/offsets/entries" {
+ try testing.expectEqual(@as(usize, 120), @sizeOf(linux.io_uring_params));
+ try testing.expectEqual(@as(usize, 64), @sizeOf(linux.io_uring_sqe));
+ try testing.expectEqual(@as(usize, 16), @sizeOf(linux.io_uring_cqe));
+
+ try testing.expectEqual(0, linux.IORING_OFF_SQ_RING);
+ try testing.expectEqual(0x8000000, linux.IORING_OFF_CQ_RING);
+ try testing.expectEqual(0x10000000, linux.IORING_OFF_SQES);
+
+ try testing.expectError(error.EntriesZero, IoUring.init(0, 0));
+ try testing.expectError(error.EntriesNotPowerOfTwo, IoUring.init(3, 0));
+}
+
+test "nop" {
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer {
+ ring.deinit();
+ testing.expectEqual(@as(linux.fd_t, -1), ring.fd) catch @panic("test failed");
+ }
+
+ const sqe = try ring.nop(0xaaaaaaaa);
+ try testing.expectEqual(linux.io_uring_sqe{
+ .opcode = .NOP,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = 0,
+ .off = 0,
+ .addr = 0,
+ .len = 0,
+ .rw_flags = 0,
+ .user_data = 0xaaaaaaaa,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .addr3 = 0,
+ .resv = 0,
+ }, sqe.*);
+
+ try testing.expectEqual(@as(u32, 0), ring.sq.sqe_head);
+ try testing.expectEqual(@as(u32, 1), ring.sq.sqe_tail);
+ try testing.expectEqual(@as(u32, 0), ring.sq.tail.*);
+ try testing.expectEqual(@as(u32, 0), ring.cq.head.*);
+ try testing.expectEqual(@as(u32, 1), ring.sq_ready());
+ try testing.expectEqual(@as(u32, 0), ring.cq_ready());
+
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ try testing.expectEqual(@as(u32, 1), ring.sq.sqe_head);
+ try testing.expectEqual(@as(u32, 1), ring.sq.sqe_tail);
+ try testing.expectEqual(@as(u32, 1), ring.sq.tail.*);
+ try testing.expectEqual(@as(u32, 0), ring.cq.head.*);
+ try testing.expectEqual(@as(u32, 0), ring.sq_ready());
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xaaaaaaaa,
+ .res = 0,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqual(@as(u32, 1), ring.cq.head.*);
+ try testing.expectEqual(@as(u32, 0), ring.cq_ready());
+
+ const sqe_barrier = try ring.nop(0xbbbbbbbb);
+ sqe_barrier.flags |= linux.IOSQE_IO_DRAIN;
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xbbbbbbbb,
+ .res = 0,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqual(@as(u32, 2), ring.sq.sqe_head);
+ try testing.expectEqual(@as(u32, 2), ring.sq.sqe_tail);
+ try testing.expectEqual(@as(u32, 2), ring.sq.tail.*);
+ try testing.expectEqual(@as(u32, 2), ring.cq.head.*);
+}
+
+test "readv" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const file = try Io.Dir.openFileAbsolute(io, "/dev/zero", .{});
+ defer file.close(io);
+
+ // Linux Kernel 5.4 supports IORING_REGISTER_FILES but not sparse fd sets (i.e. an fd of -1).
+ // Linux Kernel 5.5 adds support for sparse fd sets.
+ // Compare:
+ // https://github.com/torvalds/linux/blob/v5.4/fs/io_uring.c#L3119-L3124 vs
+ // https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L6687-L6691
+ // We therefore avoid stressing sparse fd sets here:
+ var registered_fds = [_]linux.fd_t{0} ** 1;
+ const fd_index = 0;
+ registered_fds[fd_index] = file.handle;
+ try ring.register_files(registered_fds[0..]);
+
+ var buffer = [_]u8{42} ** 128;
+ var iovecs = [_]iovec{iovec{ .base = &buffer, .len = buffer.len }};
+ const sqe = try ring.read(0xcccccccc, fd_index, .{ .iovecs = iovecs[0..] }, 0);
+ try testing.expectEqual(linux.IORING_OP.READV, sqe.opcode);
+ sqe.flags |= linux.IOSQE_FIXED_FILE;
+
+ try testing.expectError(error.SubmissionQueueFull, ring.nop(0));
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xcccccccc,
+ .res = buffer.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer.len), buffer[0..]);
+
+ try ring.unregister_files();
+}
+
+test "writev/fsync/readv" {
+ const io = testing.io;
+
+ var ring = IoUring.init(4, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const path = "test_io_uring_writev_fsync_readv";
+ const file = try tmp.dir.createFile(io, path, .{ .read = true });
+ defer file.close(io);
+ const fd = file.handle;
+
+ const buffer_write = [_]u8{42} ** 128;
+ const iovecs_write = [_]iovec_const{
+ iovec_const{ .base = &buffer_write, .len = buffer_write.len },
+ };
+ var buffer_read = [_]u8{0} ** 128;
+ var iovecs_read = [_]iovec{
+ iovec{ .base = &buffer_read, .len = buffer_read.len },
+ };
+
+ const sqe_writev = try ring.writev(0xdddddddd, fd, iovecs_write[0..], 17);
+ try testing.expectEqual(linux.IORING_OP.WRITEV, sqe_writev.opcode);
+ try testing.expectEqual(@as(u64, 17), sqe_writev.off);
+ sqe_writev.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_fsync = try ring.fsync(0xeeeeeeee, fd, 0);
+ try testing.expectEqual(linux.IORING_OP.FSYNC, sqe_fsync.opcode);
+ try testing.expectEqual(fd, sqe_fsync.fd);
+ sqe_fsync.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_readv = try ring.read(0xffffffff, fd, .{ .iovecs = iovecs_read[0..] }, 17);
+ try testing.expectEqual(linux.IORING_OP.READV, sqe_readv.opcode);
+ try testing.expectEqual(@as(u64, 17), sqe_readv.off);
+
+ try testing.expectEqual(@as(u32, 3), ring.sq_ready());
+ try testing.expectEqual(@as(u32, 3), try ring.submit_and_wait(3));
+ try testing.expectEqual(@as(u32, 0), ring.sq_ready());
+ try testing.expectEqual(@as(u32, 3), ring.cq_ready());
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xdddddddd,
+ .res = buffer_write.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqual(@as(u32, 2), ring.cq_ready());
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xeeeeeeee,
+ .res = 0,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqual(@as(u32, 1), ring.cq_ready());
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xffffffff,
+ .res = buffer_read.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqual(@as(u32, 0), ring.cq_ready());
+
+ try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
+}
+
+test "write/read" {
+ const io = testing.io;
+
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+ const path = "test_io_uring_write_read";
+ const file = try tmp.dir.createFile(io, path, .{ .read = true });
+ defer file.close(io);
+ const fd = file.handle;
+
+ const buffer_write = [_]u8{97} ** 20;
+ var buffer_read = [_]u8{98} ** 20;
+ const sqe_write = try ring.write(0x11111111, fd, buffer_write[0..], 10);
+ try testing.expectEqual(linux.IORING_OP.WRITE, sqe_write.opcode);
+ try testing.expectEqual(@as(u64, 10), sqe_write.off);
+ sqe_write.flags |= linux.IOSQE_IO_LINK;
+ const sqe_read = try ring.read(0x22222222, fd, .{ .buffer = buffer_read[0..] }, 10);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe_read.opcode);
+ try testing.expectEqual(@as(u64, 10), sqe_read.off);
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ const cqe_write = try ring.copy_cqe();
+ const cqe_read = try ring.copy_cqe();
+ // Prior to Linux Kernel 5.6 this is the only way to test for read/write support:
+ // https://lwn.net/Articles/809820/
+ if (cqe_write.err() == .INVAL) return error.SkipZigTest;
+ if (cqe_read.err() == .INVAL) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x11111111,
+ .res = buffer_write.len,
+ .flags = 0,
+ }, cqe_write);
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x22222222,
+ .res = buffer_read.len,
+ .flags = 0,
+ }, cqe_read);
+ try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
+}
+
+test "splice/read" {
+ const io = testing.io;
+
+ var ring = IoUring.init(4, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ const path_src = "test_io_uring_splice_src";
+ const file_src = try tmp.dir.createFile(io, path_src, .{ .read = true });
+ defer file_src.close(io);
+ const fd_src = file_src.handle;
+
+ const path_dst = "test_io_uring_splice_dst";
+ const file_dst = try tmp.dir.createFile(io, path_dst, .{ .read = true });
+ defer file_dst.close(io);
+ const fd_dst = file_dst.handle;
+
+ const buffer_write = [_]u8{97} ** 20;
+ var buffer_read = [_]u8{98} ** 20;
+ try file_src.writeStreamingAll(io, &buffer_write);
+
+ const fds = try posix.pipe();
+ const pipe_offset: u64 = std.math.maxInt(u64);
+
+ const sqe_splice_to_pipe = try ring.splice(0x11111111, fd_src, 0, fds[1], pipe_offset, buffer_write.len);
+ try testing.expectEqual(linux.IORING_OP.SPLICE, sqe_splice_to_pipe.opcode);
+ try testing.expectEqual(@as(u64, 0), sqe_splice_to_pipe.addr);
+ try testing.expectEqual(pipe_offset, sqe_splice_to_pipe.off);
+ sqe_splice_to_pipe.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_splice_from_pipe = try ring.splice(0x22222222, fds[0], pipe_offset, fd_dst, 10, buffer_write.len);
+ try testing.expectEqual(linux.IORING_OP.SPLICE, sqe_splice_from_pipe.opcode);
+ try testing.expectEqual(pipe_offset, sqe_splice_from_pipe.addr);
+ try testing.expectEqual(@as(u64, 10), sqe_splice_from_pipe.off);
+ sqe_splice_from_pipe.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_read = try ring.read(0x33333333, fd_dst, .{ .buffer = buffer_read[0..] }, 10);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe_read.opcode);
+ try testing.expectEqual(@as(u64, 10), sqe_read.off);
+ try testing.expectEqual(@as(u32, 3), try ring.submit());
+
+ const cqe_splice_to_pipe = try ring.copy_cqe();
+ const cqe_splice_from_pipe = try ring.copy_cqe();
+ const cqe_read = try ring.copy_cqe();
+ // Prior to Linux Kernel 5.6 this is the only way to test for splice/read support:
+ // https://lwn.net/Articles/809820/
+ if (cqe_splice_to_pipe.err() == .INVAL) return error.SkipZigTest;
+ if (cqe_splice_from_pipe.err() == .INVAL) return error.SkipZigTest;
+ if (cqe_read.err() == .INVAL) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x11111111,
+ .res = buffer_write.len,
+ .flags = 0,
+ }, cqe_splice_to_pipe);
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x22222222,
+ .res = buffer_write.len,
+ .flags = 0,
+ }, cqe_splice_from_pipe);
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x33333333,
+ .res = buffer_read.len,
+ .flags = 0,
+ }, cqe_read);
+ try testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
+}
+
+test "write_fixed/read_fixed" {
+ const io = testing.io;
+
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const path = "test_io_uring_write_read_fixed";
+ const file = try tmp.dir.createFile(io, path, .{ .read = true });
+ defer file.close(io);
+ const fd = file.handle;
+
+ var raw_buffers: [2][11]u8 = undefined;
+ // First buffer will be written to the file.
+ @memset(&raw_buffers[0], 'z');
+ raw_buffers[0][0.."foobar".len].* = "foobar".*;
+
+ var buffers = [2]iovec{
+ .{ .base = &raw_buffers[0], .len = raw_buffers[0].len },
+ .{ .base = &raw_buffers[1], .len = raw_buffers[1].len },
+ };
+ ring.register_buffers(&buffers) catch |err| switch (err) {
+ error.SystemResources => {
+ // See https://github.com/ziglang/zig/issues/15362
+ return error.SkipZigTest;
+ },
+ else => |e| return e,
+ };
+
+ const sqe_write = try ring.write_fixed(0x45454545, fd, &buffers[0], 3, 0);
+ try testing.expectEqual(linux.IORING_OP.WRITE_FIXED, sqe_write.opcode);
+ try testing.expectEqual(@as(u64, 3), sqe_write.off);
+ sqe_write.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_read = try ring.read_fixed(0x12121212, fd, &buffers[1], 0, 1);
+ try testing.expectEqual(linux.IORING_OP.READ_FIXED, sqe_read.opcode);
+ try testing.expectEqual(@as(u64, 0), sqe_read.off);
+
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ const cqe_write = try ring.copy_cqe();
+ const cqe_read = try ring.copy_cqe();
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x45454545,
+ .res = @as(i32, @intCast(buffers[0].len)),
+ .flags = 0,
+ }, cqe_write);
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x12121212,
+ .res = @as(i32, @intCast(buffers[1].len)),
+ .flags = 0,
+ }, cqe_read);
+
+ try testing.expectEqualSlices(u8, "\x00\x00\x00", buffers[1].base[0..3]);
+ try testing.expectEqualSlices(u8, "foobar", buffers[1].base[3..9]);
+ try testing.expectEqualSlices(u8, "zz", buffers[1].base[9..11]);
+}
+
+test "openat" {
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const path = "test_io_uring_openat";
+
+ // Workaround for LLVM bug: https://github.com/ziglang/zig/issues/12014
+ const path_addr = if (builtin.zig_backend == .stage2_llvm) p: {
+ var workaround = path;
+ _ = &workaround;
+ break :p @intFromPtr(workaround);
+ } else @intFromPtr(path);
+
+ const flags: linux.O = .{ .CLOEXEC = true, .ACCMODE = .RDWR, .CREAT = true };
+ const mode: posix.mode_t = 0o666;
+ const sqe_openat = try ring.openat(0x33333333, tmp.dir.handle, path, flags, mode);
+ try testing.expectEqual(linux.io_uring_sqe{
+ .opcode = .OPENAT,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = tmp.dir.handle,
+ .off = 0,
+ .addr = path_addr,
+ .len = mode,
+ .rw_flags = @bitCast(flags),
+ .user_data = 0x33333333,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .addr3 = 0,
+ .resv = 0,
+ }, sqe_openat.*);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe_openat = try ring.copy_cqe();
+ try testing.expectEqual(@as(u64, 0x33333333), cqe_openat.user_data);
+ if (cqe_openat.err() == .INVAL) return error.SkipZigTest;
+ if (cqe_openat.err() == .BADF) return error.SkipZigTest;
+ if (cqe_openat.res <= 0) std.debug.print("\ncqe_openat.res={}\n", .{cqe_openat.res});
+ try testing.expect(cqe_openat.res > 0);
+ try testing.expectEqual(@as(u32, 0), cqe_openat.flags);
+
+ posix.close(cqe_openat.res);
+}
+
+test "close" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const path = "test_io_uring_close";
+ const file = try tmp.dir.createFile(io, path, .{});
+ errdefer file.close(io);
+
+ const sqe_close = try ring.close(0x44444444, file.handle);
+ try testing.expectEqual(linux.IORING_OP.CLOSE, sqe_close.opcode);
+ try testing.expectEqual(file.handle, sqe_close.fd);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe_close = try ring.copy_cqe();
+ if (cqe_close.err() == .INVAL) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x44444444,
+ .res = 0,
+ .flags = 0,
+ }, cqe_close);
+}
+
+test "accept/connect/send/recv" {
+ const io = testing.io;
+ _ = io;
+
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const socket_test_harness = try createSocketTestHarness(&ring);
+ defer socket_test_harness.close();
+
+ const buffer_send = [_]u8{ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
+ var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
+
+ const sqe_send = try ring.send(0xeeeeeeee, socket_test_harness.client, buffer_send[0..], 0);
+ sqe_send.flags |= linux.IOSQE_IO_LINK;
+ _ = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ const cqe_send = try ring.copy_cqe();
+ if (cqe_send.err() == .INVAL) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xeeeeeeee,
+ .res = buffer_send.len,
+ .flags = 0,
+ }, cqe_send);
+
+ const cqe_recv = try ring.copy_cqe();
+ if (cqe_recv.err() == .INVAL) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xffffffff,
+ .res = buffer_recv.len,
+ // ignore IORING_CQE_F_SOCK_NONEMPTY since it is only set on some systems
+ .flags = cqe_recv.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
+ }, cqe_recv);
+
+ try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
+}
+
+test "sendmsg/recvmsg" {
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var address_server: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+
+ const server = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
+ defer posix.close(server);
+ try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEPORT, &mem.toBytes(@as(c_int, 1)));
+ try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
+ try posix.bind(server, addrAny(&address_server), @sizeOf(linux.sockaddr.in));
+
+ // set address_server to the OS-chosen IP/port.
+ var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
+ try posix.getsockname(server, addrAny(&address_server), &slen);
+
+ const client = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
+ defer posix.close(client);
+
+ const buffer_send = [_]u8{42} ** 128;
+ const iovecs_send = [_]iovec_const{
+ iovec_const{ .base = &buffer_send, .len = buffer_send.len },
+ };
+ const msg_send: linux.msghdr_const = .{
+ .name = addrAny(&address_server),
+ .namelen = @sizeOf(linux.sockaddr.in),
+ .iov = &iovecs_send,
+ .iovlen = 1,
+ .control = null,
+ .controllen = 0,
+ .flags = 0,
+ };
+ const sqe_sendmsg = try ring.sendmsg(0x11111111, client, &msg_send, 0);
+ sqe_sendmsg.flags |= linux.IOSQE_IO_LINK;
+ try testing.expectEqual(linux.IORING_OP.SENDMSG, sqe_sendmsg.opcode);
+ try testing.expectEqual(client, sqe_sendmsg.fd);
+
+ var buffer_recv = [_]u8{0} ** 128;
+ var iovecs_recv = [_]iovec{
+ iovec{ .base = &buffer_recv, .len = buffer_recv.len },
+ };
+ var address_recv: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = 0,
+ };
+ var msg_recv: linux.msghdr = .{
+ .name = addrAny(&address_recv),
+ .namelen = @sizeOf(linux.sockaddr.in),
+ .iov = &iovecs_recv,
+ .iovlen = 1,
+ .control = null,
+ .controllen = 0,
+ .flags = 0,
+ };
+ const sqe_recvmsg = try ring.recvmsg(0x22222222, server, &msg_recv, 0);
+ try testing.expectEqual(linux.IORING_OP.RECVMSG, sqe_recvmsg.opcode);
+ try testing.expectEqual(server, sqe_recvmsg.fd);
+
+ try testing.expectEqual(@as(u32, 2), ring.sq_ready());
+ try testing.expectEqual(@as(u32, 2), try ring.submit_and_wait(2));
+ try testing.expectEqual(@as(u32, 0), ring.sq_ready());
+ try testing.expectEqual(@as(u32, 2), ring.cq_ready());
+
+ const cqe_sendmsg = try ring.copy_cqe();
+ if (cqe_sendmsg.res == -@as(i32, @intFromEnum(linux.E.INVAL))) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x11111111,
+ .res = buffer_send.len,
+ .flags = 0,
+ }, cqe_sendmsg);
+
+ const cqe_recvmsg = try ring.copy_cqe();
+ if (cqe_recvmsg.res == -@as(i32, @intFromEnum(linux.E.INVAL))) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x22222222,
+ .res = buffer_recv.len,
+ // ignore IORING_CQE_F_SOCK_NONEMPTY since it is set non-deterministically
+ .flags = cqe_recvmsg.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
+ }, cqe_recvmsg);
+
+ try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
+}
+
+test "timeout (after a relative time)" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const ms = 10;
+ const margin = 5;
+ const ts: linux.kernel_timespec = .{ .sec = 0, .nsec = ms * 1000000 };
+
+ const started = try std.Io.Clock.awake.now(io);
+ const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
+ try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe.opcode);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ const cqe = try ring.copy_cqe();
+ const stopped = try std.Io.Clock.awake.now(io);
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x55555555,
+ .res = -@as(i32, @intFromEnum(linux.E.TIME)),
+ .flags = 0,
+ }, cqe);
+
+ // Tests should not depend on timings: skip test if outside margin.
+ const ms_elapsed = started.durationTo(stopped).toMilliseconds();
+ if (ms_elapsed > margin) return error.SkipZigTest;
+}
+
+test "timeout (after a number of completions)" {
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const ts: linux.kernel_timespec = .{ .sec = 3, .nsec = 0 };
+ const count_completions: u64 = 1;
+ const sqe_timeout = try ring.timeout(0x66666666, &ts, count_completions, 0);
+ try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
+ try testing.expectEqual(count_completions, sqe_timeout.off);
+ _ = try ring.nop(0x77777777);
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ const cqe_nop = try ring.copy_cqe();
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x77777777,
+ .res = 0,
+ .flags = 0,
+ }, cqe_nop);
+
+ const cqe_timeout = try ring.copy_cqe();
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x66666666,
+ .res = 0,
+ .flags = 0,
+ }, cqe_timeout);
+}
+
+test "timeout_remove" {
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const ts: linux.kernel_timespec = .{ .sec = 3, .nsec = 0 };
+ const sqe_timeout = try ring.timeout(0x88888888, &ts, 0, 0);
+ try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
+ try testing.expectEqual(@as(u64, 0x88888888), sqe_timeout.user_data);
+
+ const sqe_timeout_remove = try ring.timeout_remove(0x99999999, 0x88888888, 0);
+ try testing.expectEqual(linux.IORING_OP.TIMEOUT_REMOVE, sqe_timeout_remove.opcode);
+ try testing.expectEqual(@as(u64, 0x88888888), sqe_timeout_remove.addr);
+ try testing.expectEqual(@as(u64, 0x99999999), sqe_timeout_remove.user_data);
+
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ // The order in which the CQE arrive is not clearly documented and it changed with kernel 5.18:
+ // * kernel 5.10 gives user data 0x88888888 first, 0x99999999 second
+ // * kernel 5.18 gives user data 0x99999999 first, 0x88888888 second
+
+ var cqes: [2]linux.io_uring_cqe = undefined;
+ cqes[0] = try ring.copy_cqe();
+ cqes[1] = try ring.copy_cqe();
+
+ for (cqes) |cqe| {
+ // IORING_OP_TIMEOUT_REMOVE is not supported by this kernel version:
+ // Timeout remove operations set the fd to -1, which results in EBADF before EINVAL.
+ // We use IORING_FEAT_RW_CUR_POS as a safety check here to make sure we are at least pre-5.6.
+ // We don't want to skip this test for newer kernels.
+ if (cqe.user_data == 0x99999999 and
+ cqe.err() == .BADF and
+ (ring.features & linux.IORING_FEAT_RW_CUR_POS) == 0)
+ {
+ return error.SkipZigTest;
+ }
+
+ try testing.expect(cqe.user_data == 0x88888888 or cqe.user_data == 0x99999999);
+
+ if (cqe.user_data == 0x88888888) {
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x88888888,
+ .res = -@as(i32, @intFromEnum(linux.E.CANCELED)),
+ .flags = 0,
+ }, cqe);
+ } else if (cqe.user_data == 0x99999999) {
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x99999999,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+ }
+ }
+}
+
+test "accept/connect/recv/link_timeout" {
+ const io = testing.io;
+ _ = io;
+
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const socket_test_harness = try createSocketTestHarness(&ring);
+ defer socket_test_harness.close();
+
+ var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
+
+ const sqe_recv = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
+ sqe_recv.flags |= linux.IOSQE_IO_LINK;
+
+ const ts = linux.kernel_timespec{ .sec = 0, .nsec = 1000000 };
+ _ = try ring.link_timeout(0x22222222, &ts, 0);
+
+ const nr_wait = try ring.submit();
+ try testing.expectEqual(@as(u32, 2), nr_wait);
+
+ var i: usize = 0;
+ while (i < nr_wait) : (i += 1) {
+ const cqe = try ring.copy_cqe();
+ switch (cqe.user_data) {
+ 0xffffffff => {
+ if (cqe.res != -@as(i32, @intFromEnum(linux.E.INTR)) and
+ cqe.res != -@as(i32, @intFromEnum(linux.E.CANCELED)))
+ {
+ std.debug.print("Req 0x{x} got {d}\n", .{ cqe.user_data, cqe.res });
+ try testing.expect(false);
+ }
+ },
+ 0x22222222 => {
+ if (cqe.res != -@as(i32, @intFromEnum(linux.E.ALREADY)) and
+ cqe.res != -@as(i32, @intFromEnum(linux.E.TIME)))
+ {
+ std.debug.print("Req 0x{x} got {d}\n", .{ cqe.user_data, cqe.res });
+ try testing.expect(false);
+ }
+ },
+ else => @panic("should not happen"),
+ }
+ }
+}
+
+test "fallocate" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const path = "test_io_uring_fallocate";
+ const file = try tmp.dir.createFile(io, path, .{});
+ defer file.close(io);
+
+ try testing.expectEqual(@as(u64, 0), (try file.stat(io)).size);
+
+ const len: u64 = 65536;
+ const sqe = try ring.fallocate(0xaaaaaaaa, file.handle, 0, 0, len);
+ try testing.expectEqual(linux.IORING_OP.FALLOCATE, sqe.opcode);
+ try testing.expectEqual(file.handle, sqe.fd);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement fallocate():
+ .INVAL => return error.SkipZigTest,
+ // This kernel does not implement fallocate():
+ .NOSYS => return error.SkipZigTest,
+ // The filesystem containing the file referred to by fd does not support this operation;
+ // or the mode is not supported by the filesystem containing the file referred to by fd:
+ .OPNOTSUPP => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xaaaaaaaa,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+
+ try testing.expectEqual(len, (try file.stat(io)).size);
+}
+
+test "statx" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+ const path = "test_io_uring_statx";
+ const file = try tmp.dir.createFile(io, path, .{});
+ defer file.close(io);
+
+ try testing.expectEqual(@as(u64, 0), (try file.stat(io)).size);
+
+ try file.writeStreamingAll(io, "foobar");
+
+ var buf: linux.Statx = undefined;
+ const sqe = try ring.statx(
+ 0xaaaaaaaa,
+ tmp.dir.handle,
+ path,
+ 0,
+ .{ .SIZE = true },
+ &buf,
+ );
+ try testing.expectEqual(linux.IORING_OP.STATX, sqe.opcode);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), sqe.fd);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement statx():
+ .INVAL => return error.SkipZigTest,
+ // This kernel does not implement statx():
+ .NOSYS => return error.SkipZigTest,
+ // The filesystem containing the file referred to by fd does not support this operation;
+ // or the mode is not supported by the filesystem containing the file referred to by fd:
+ .OPNOTSUPP => return error.SkipZigTest,
+ // not supported on older kernels (5.4)
+ .BADF => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xaaaaaaaa,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+
+ try testing.expect(buf.mask.SIZE);
+ try testing.expectEqual(@as(u64, 6), buf.size);
+}
+
+test "accept/connect/recv/cancel" {
+ const io = testing.io;
+ _ = io;
+
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const socket_test_harness = try createSocketTestHarness(&ring);
+ defer socket_test_harness.close();
+
+ var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
+
+ _ = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const sqe_cancel = try ring.cancel(0x99999999, 0xffffffff, 0);
+ try testing.expectEqual(linux.IORING_OP.ASYNC_CANCEL, sqe_cancel.opcode);
+ try testing.expectEqual(@as(u64, 0xffffffff), sqe_cancel.addr);
+ try testing.expectEqual(@as(u64, 0x99999999), sqe_cancel.user_data);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ var cqe_recv = try ring.copy_cqe();
+ if (cqe_recv.err() == .INVAL) return error.SkipZigTest;
+ var cqe_cancel = try ring.copy_cqe();
+ if (cqe_cancel.err() == .INVAL) return error.SkipZigTest;
+
+ // The recv/cancel CQEs may arrive in any order, the recv CQE will sometimes come first:
+ if (cqe_recv.user_data == 0x99999999 and cqe_cancel.user_data == 0xffffffff) {
+ const a = cqe_recv;
+ const b = cqe_cancel;
+ cqe_recv = b;
+ cqe_cancel = a;
+ }
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xffffffff,
+ .res = -@as(i32, @intFromEnum(linux.E.CANCELED)),
+ .flags = 0,
+ }, cqe_recv);
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x99999999,
+ .res = 0,
+ .flags = 0,
+ }, cqe_cancel);
+}
+
+test "register_files_update" {
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
+ defer posix.close(fd);
+
+ var registered_fds = [_]linux.fd_t{0} ** 2;
+ const fd_index = 0;
+ const fd_index2 = 1;
+ registered_fds[fd_index] = fd;
+ registered_fds[fd_index2] = -1;
+
+ ring.register_files(registered_fds[0..]) catch |err| switch (err) {
+ // Happens when the kernel doesn't support sparse entry (-1) in the file descriptors array.
+ error.FileDescriptorInvalid => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ };
+
+ // Test IORING_REGISTER_FILES_UPDATE
+ // Only available since Linux 5.5
+
+ const fd2 = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
+ defer posix.close(fd2);
+
+ registered_fds[fd_index] = fd2;
+ registered_fds[fd_index2] = -1;
+ try ring.register_files_update(0, registered_fds[0..]);
+
+ var buffer = [_]u8{42} ** 128;
+ {
+ const sqe = try ring.read(0xcccccccc, fd_index, .{ .buffer = &buffer }, 0);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
+ sqe.flags |= linux.IOSQE_FIXED_FILE;
+
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xcccccccc,
+ .res = buffer.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer.len), buffer[0..]);
+ }
+
+ // Test with a non-zero offset
+
+ registered_fds[fd_index] = -1;
+ registered_fds[fd_index2] = -1;
+ try ring.register_files_update(1, registered_fds[1..]);
+
+ {
+ // Next read should still work since fd_index in the registered file descriptors hasn't been updated yet.
+ const sqe = try ring.read(0xcccccccc, fd_index, .{ .buffer = &buffer }, 0);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
+ sqe.flags |= linux.IOSQE_FIXED_FILE;
+
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xcccccccc,
+ .res = buffer.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer.len), buffer[0..]);
+ }
+
+ try ring.register_files_update(0, registered_fds[0..]);
+
+ {
+ // Now this should fail since both fds are sparse (-1)
+ const sqe = try ring.read(0xcccccccc, fd_index, .{ .buffer = &buffer }, 0);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
+ sqe.flags |= linux.IOSQE_FIXED_FILE;
+
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(linux.E.BADF, cqe.err());
+ }
+
+ try ring.unregister_files();
+}
+
+test "shutdown" {
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var address: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+
+ // Socket bound, expect shutdown to work
+ {
+ const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ defer posix.close(server);
+ try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
+ try posix.bind(server, addrAny(&address), @sizeOf(linux.sockaddr.in));
+ try posix.listen(server, 1);
+
+ // set address to the OS-chosen IP/port.
+ var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
+ try posix.getsockname(server, addrAny(&address), &slen);
+
+ const shutdown_sqe = try ring.shutdown(0x445445445, server, linux.SHUT.RD);
+ try testing.expectEqual(linux.IORING_OP.SHUTDOWN, shutdown_sqe.opcode);
+ try testing.expectEqual(@as(i32, server), shutdown_sqe.fd);
+
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement shutdown (kernel version < 5.11)
+ .INVAL => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x445445445,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+ }
+
+ // Socket not bound, expect to fail with ENOTCONN
+ {
+ const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ defer posix.close(server);
+
+ const shutdown_sqe = ring.shutdown(0x445445445, server, linux.SHUT.RD) catch |err| switch (err) {
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ };
+ try testing.expectEqual(linux.IORING_OP.SHUTDOWN, shutdown_sqe.opcode);
+ try testing.expectEqual(@as(i32, server), shutdown_sqe.fd);
+
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(@as(u64, 0x445445445), cqe.user_data);
+ try testing.expectEqual(linux.E.NOTCONN, cqe.err());
+ }
+}
+
+test "renameat" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const old_path = "test_io_uring_renameat_old";
+ const new_path = "test_io_uring_renameat_new";
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ // Write old file with data
+
+ const old_file = try tmp.dir.createFile(io, old_path, .{});
+ defer old_file.close(io);
+ try old_file.writeStreamingAll(io, "hello");
+
+ // Submit renameat
+
+ const sqe = try ring.renameat(
+ 0x12121212,
+ tmp.dir.handle,
+ old_path,
+ tmp.dir.handle,
+ new_path,
+ 0,
+ );
+ try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), @as(i32, @bitCast(sqe.len)));
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement renameat (kernel version < 5.11)
+ .BADF, .INVAL => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x12121212,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+
+ // Validate that the old file doesn't exist anymore
+ try testing.expectError(error.FileNotFound, tmp.dir.openFile(io, old_path, .{}));
+
+ // Validate that the new file exists with the proper content
+ var new_file_data: [16]u8 = undefined;
+ try testing.expectEqualStrings("hello", try tmp.dir.readFile(io, new_path, &new_file_data));
+}
+
+test "unlinkat" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const path = "test_io_uring_unlinkat";
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ // Write old file with data
+
+ const file = try tmp.dir.createFile(io, path, .{});
+ defer file.close(io);
+
+ // Submit unlinkat
+
+ const sqe = try ring.unlinkat(
+ 0x12121212,
+ tmp.dir.handle,
+ path,
+ 0,
+ );
+ try testing.expectEqual(linux.IORING_OP.UNLINKAT, sqe.opcode);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), sqe.fd);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement unlinkat (kernel version < 5.11)
+ .BADF, .INVAL => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x12121212,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+
+ // Validate that the file doesn't exist anymore
+ _ = tmp.dir.openFile(io, path, .{}) catch |err| switch (err) {
+ error.FileNotFound => {},
+ else => std.debug.panic("unexpected error: {}", .{err}),
+ };
+}
+
+test "mkdirat" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const path = "test_io_uring_mkdirat";
+
+ // Submit mkdirat
+
+ const sqe = try ring.mkdirat(
+ 0x12121212,
+ tmp.dir.handle,
+ path,
+ 0o0755,
+ );
+ try testing.expectEqual(linux.IORING_OP.MKDIRAT, sqe.opcode);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), sqe.fd);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement mkdirat (kernel version < 5.15)
+ .BADF, .INVAL => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x12121212,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+
+ // Validate that the directory exist
+ _ = try tmp.dir.openDir(io, path, .{});
+}
+
+test "symlinkat" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const path = "test_io_uring_symlinkat";
+ const link_path = "test_io_uring_symlinkat_link";
+
+ const file = try tmp.dir.createFile(io, path, .{});
+ defer file.close(io);
+
+ // Submit symlinkat
+
+ const sqe = try ring.symlinkat(
+ 0x12121212,
+ path,
+ tmp.dir.handle,
+ link_path,
+ );
+ try testing.expectEqual(linux.IORING_OP.SYMLINKAT, sqe.opcode);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), sqe.fd);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement symlinkat (kernel version < 5.15)
+ .BADF, .INVAL => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x12121212,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+
+ // Validate that the symlink exist
+ _ = try tmp.dir.openFile(io, link_path, .{});
+}
+
+test "linkat" {
+ const io = testing.io;
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+
+ const first_path = "test_io_uring_linkat_first";
+ const second_path = "test_io_uring_linkat_second";
+
+ // Write file with data
+
+ const first_file = try tmp.dir.createFile(io, first_path, .{});
+ defer first_file.close(io);
+ try first_file.writeStreamingAll(io, "hello");
+
+ // Submit linkat
+
+ const sqe = try ring.linkat(
+ 0x12121212,
+ tmp.dir.handle,
+ first_path,
+ tmp.dir.handle,
+ second_path,
+ 0,
+ );
+ try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), sqe.fd);
+ try testing.expectEqual(@as(i32, tmp.dir.handle), @as(i32, @bitCast(sqe.len)));
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ // This kernel's io_uring does not yet implement linkat (kernel version < 5.15)
+ .BADF, .INVAL => return error.SkipZigTest,
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0x12121212,
+ .res = 0,
+ .flags = 0,
+ }, cqe);
+
+ // Validate the second file
+ var second_file_data: [16]u8 = undefined;
+ try testing.expectEqualStrings("hello", try tmp.dir.readFile(io, second_path, &second_file_data));
+}
+
+test "provide_buffers: read" {
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
+ defer posix.close(fd);
+
+ const group_id = 1337;
+ const buffer_id = 0;
+
+ const buffer_len = 128;
+
+ var buffers: [4][buffer_len]u8 = undefined;
+
+ // Provide 4 buffers
+
+ {
+ const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
+ try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
+ try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
+ try testing.expectEqual(@as(u32, buffers[0].len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ // Happens when the kernel is < 5.7
+ .INVAL, .BADF => return error.SkipZigTest,
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(@as(u64, 0xcccccccc), cqe.user_data);
+ }
+
+ // Do 4 reads which should consume all buffers
+
+ var i: usize = 0;
+ while (i < buffers.len) : (i += 1) {
+ const sqe = try ring.read(0xdededede, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
+ try testing.expectEqual(@as(i32, fd), sqe.fd);
+ try testing.expectEqual(@as(u64, 0), sqe.addr);
+ try testing.expectEqual(@as(u32, buffer_len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
+ const used_buffer_id = cqe.flags >> 16;
+ try testing.expect(used_buffer_id >= 0 and used_buffer_id <= 3);
+ try testing.expectEqual(@as(i32, buffer_len), cqe.res);
+
+ try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
+ }
+
+ // This read should fail
+
+ {
+ const sqe = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
+ try testing.expectEqual(@as(i32, fd), sqe.fd);
+ try testing.expectEqual(@as(u64, 0), sqe.addr);
+ try testing.expectEqual(@as(u32, buffer_len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ // Expected
+ .NOBUFS => {},
+ .SUCCESS => std.debug.panic("unexpected success", .{}),
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
+ }
+
+ // Provide 1 buffer again
+
+ // Deliberately put something we don't expect in the buffers
+ @memset(mem.sliceAsBytes(&buffers), 42);
+
+ const reprovided_buffer_id = 2;
+
+ {
+ _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ }
+
+ // Final read which should work
+
+ {
+ const sqe = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(linux.IORING_OP.READ, sqe.opcode);
+ try testing.expectEqual(@as(i32, fd), sqe.fd);
+ try testing.expectEqual(@as(u64, 0), sqe.addr);
+ try testing.expectEqual(@as(u32, buffer_len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
+ const used_buffer_id = cqe.flags >> 16;
+ try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
+ try testing.expectEqual(@as(i32, buffer_len), cqe.res);
+ try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
+ }
+}
+
+test "remove_buffers" {
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
+ defer posix.close(fd);
+
+ const group_id = 1337;
+ const buffer_id = 0;
+
+ const buffer_len = 128;
+
+ var buffers: [4][buffer_len]u8 = undefined;
+
+ // Provide 4 buffers
+
+ {
+ _ = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .INVAL, .BADF => return error.SkipZigTest,
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(@as(u64, 0xcccccccc), cqe.user_data);
+ }
+
+ // Remove 3 buffers
+
+ {
+ const sqe = try ring.remove_buffers(0xbababababa, 3, group_id);
+ try testing.expectEqual(linux.IORING_OP.REMOVE_BUFFERS, sqe.opcode);
+ try testing.expectEqual(@as(i32, 3), sqe.fd);
+ try testing.expectEqual(@as(u64, 0), sqe.addr);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(@as(u64, 0xbababababa), cqe.user_data);
+ }
+
+ // This read should work
+
+ {
+ _ = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
+ const used_buffer_id = cqe.flags >> 16;
+ try testing.expect(used_buffer_id >= 0 and used_buffer_id < 4);
+ try testing.expectEqual(@as(i32, buffer_len), cqe.res);
+ try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
+ }
+
+ // Final read should _not_ work
+
+ {
+ _ = try ring.read(0xdfdfdfdf, fd, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ // Expected
+ .NOBUFS => {},
+ .SUCCESS => std.debug.panic("unexpected success", .{}),
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ }
+}
+
+test "provide_buffers: accept/connect/send/recv" {
+ const io = testing.io;
+ _ = io;
+
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const group_id = 1337;
+ const buffer_id = 0;
+
+ const buffer_len = 128;
+ var buffers: [4][buffer_len]u8 = undefined;
+
+ // Provide 4 buffers
+
+ {
+ const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
+ try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
+ try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
+ try testing.expectEqual(@as(u32, buffer_len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ // Happens when the kernel is < 5.7
+ .INVAL => return error.SkipZigTest,
+ // Happens on the kernel 5.4
+ .BADF => return error.SkipZigTest,
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(@as(u64, 0xcccccccc), cqe.user_data);
+ }
+
+ const socket_test_harness = try createSocketTestHarness(&ring);
+ defer socket_test_harness.close();
+
+ // Do 4 send on the socket
+
+ {
+ var i: usize = 0;
+ while (i < buffers.len) : (i += 1) {
+ _ = try ring.send(0xdeaddead, socket_test_harness.server, &([_]u8{'z'} ** buffer_len), 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ }
+
+ var cqes: [4]linux.io_uring_cqe = undefined;
+ try testing.expectEqual(@as(u32, 4), try ring.copy_cqes(&cqes, 4));
+ }
+
+ // Do 4 recv which should consume all buffers
+
+ // Deliberately put something we don't expect in the buffers
+ @memset(mem.sliceAsBytes(&buffers), 1);
+
+ var i: usize = 0;
+ while (i < buffers.len) : (i += 1) {
+ const sqe = try ring.recv(0xdededede, socket_test_harness.client, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(linux.IORING_OP.RECV, sqe.opcode);
+ try testing.expectEqual(@as(i32, socket_test_harness.client), sqe.fd);
+ try testing.expectEqual(@as(u64, 0), sqe.addr);
+ try testing.expectEqual(@as(u32, buffer_len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 0), sqe.rw_flags);
+ try testing.expectEqual(@as(u32, linux.IOSQE_BUFFER_SELECT), sqe.flags);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
+ const used_buffer_id = cqe.flags >> 16;
+ try testing.expect(used_buffer_id >= 0 and used_buffer_id <= 3);
+ try testing.expectEqual(@as(i32, buffer_len), cqe.res);
+
+ try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
+ const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
+ try testing.expectEqualSlices(u8, &([_]u8{'z'} ** buffer_len), buffer);
+ }
+
+ // This recv should fail
+
+ {
+ const sqe = try ring.recv(0xdfdfdfdf, socket_test_harness.client, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(linux.IORING_OP.RECV, sqe.opcode);
+ try testing.expectEqual(@as(i32, socket_test_harness.client), sqe.fd);
+ try testing.expectEqual(@as(u64, 0), sqe.addr);
+ try testing.expectEqual(@as(u32, buffer_len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 0), sqe.rw_flags);
+ try testing.expectEqual(@as(u32, linux.IOSQE_BUFFER_SELECT), sqe.flags);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ // Expected
+ .NOBUFS => {},
+ .SUCCESS => std.debug.panic("unexpected success", .{}),
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
+ }
+
+ // Provide 1 buffer again
+
+ const reprovided_buffer_id = 2;
+
+ {
+ _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+ }
+
+ // Redo 1 send on the server socket
+
+ {
+ _ = try ring.send(0xdeaddead, socket_test_harness.server, &([_]u8{'w'} ** buffer_len), 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ _ = try ring.copy_cqe();
+ }
+
+ // Final recv which should work
+
+ // Deliberately put something we don't expect in the buffers
+ @memset(mem.sliceAsBytes(&buffers), 1);
+
+ {
+ const sqe = try ring.recv(0xdfdfdfdf, socket_test_harness.client, .{ .buffer_selection = .{ .group_id = group_id, .len = buffer_len } }, 0);
+ try testing.expectEqual(linux.IORING_OP.RECV, sqe.opcode);
+ try testing.expectEqual(@as(i32, socket_test_harness.client), sqe.fd);
+ try testing.expectEqual(@as(u64, 0), sqe.addr);
+ try testing.expectEqual(@as(u32, buffer_len), sqe.len);
+ try testing.expectEqual(@as(u16, group_id), sqe.buf_index);
+ try testing.expectEqual(@as(u32, 0), sqe.rw_flags);
+ try testing.expectEqual(@as(u32, linux.IOSQE_BUFFER_SELECT), sqe.flags);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe = try ring.copy_cqe();
+ switch (cqe.err()) {
+ .SUCCESS => {},
+ else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
+ }
+
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER);
+ const used_buffer_id = cqe.flags >> 16;
+ try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
+ try testing.expectEqual(@as(i32, buffer_len), cqe.res);
+ try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
+ const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
+ try testing.expectEqualSlices(u8, &([_]u8{'w'} ** buffer_len), buffer);
+ }
+}
+
+test "accept multishot" {
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var address: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+ const listener_socket = try createListenerSocket(&address);
+ defer posix.close(listener_socket);
+
+ // submit multishot accept operation
+ var addr: posix.sockaddr = undefined;
+ var addr_len: posix.socklen_t = @sizeOf(@TypeOf(addr));
+ const userdata: u64 = 0xaaaaaaaa;
+ _ = try ring.accept_multishot(userdata, listener_socket, &addr, &addr_len, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ var nr: usize = 4; // number of clients to connect
+ while (nr > 0) : (nr -= 1) {
+ // connect client
+ const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ errdefer posix.close(client);
+ try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
+
+ // test accept completion
+ var cqe = try ring.copy_cqe();
+ if (cqe.err() == .INVAL) return error.SkipZigTest;
+ try testing.expect(cqe.res > 0);
+ try testing.expect(cqe.user_data == userdata);
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_MORE > 0); // more flag is set
+
+ posix.close(client);
+ }
+}
+
+test "accept/connect/send_zc/recv" {
+ try skipKernelLessThan(.{ .major = 6, .minor = 0, .patch = 0 });
+
+ const io = testing.io;
+ _ = io;
+
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const socket_test_harness = try createSocketTestHarness(&ring);
+ defer socket_test_harness.close();
+
+ const buffer_send = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
+ var buffer_recv = [_]u8{0} ** 10;
+
+ // zero-copy send
+ const sqe_send = try ring.send_zc(0xeeeeeeee, socket_test_harness.client, buffer_send[0..], 0, 0);
+ sqe_send.flags |= linux.IOSQE_IO_LINK;
+ _ = try ring.recv(0xffffffff, socket_test_harness.server, .{ .buffer = buffer_recv[0..] }, 0);
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ var cqe_send = try ring.copy_cqe();
+ // First completion of zero-copy send.
+ // IORING_CQE_F_MORE, means that there
+ // will be a second completion event / notification for the
+ // request, with the user_data field set to the same value.
+ // buffer_send must be keep alive until second cqe.
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xeeeeeeee,
+ .res = buffer_send.len,
+ .flags = linux.IORING_CQE_F_MORE,
+ }, cqe_send);
+
+ cqe_send, const cqe_recv = brk: {
+ const cqe1 = try ring.copy_cqe();
+ const cqe2 = try ring.copy_cqe();
+ break :brk if (cqe1.user_data == 0xeeeeeeee) .{ cqe1, cqe2 } else .{ cqe2, cqe1 };
+ };
+
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xffffffff,
+ .res = buffer_recv.len,
+ .flags = cqe_recv.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
+ }, cqe_recv);
+ try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
+
+ // Second completion of zero-copy send.
+ // IORING_CQE_F_NOTIF in flags signals that kernel is done with send_buffer
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xeeeeeeee,
+ .res = 0,
+ .flags = linux.IORING_CQE_F_NOTIF,
+ }, cqe_send);
+}
+
+test "accept_direct" {
+ try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+ var address: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+
+ // register direct file descriptors
+ var registered_fds = [_]linux.fd_t{-1} ** 2;
+ try ring.register_files(registered_fds[0..]);
+
+ const listener_socket = try createListenerSocket(&address);
+ defer posix.close(listener_socket);
+
+ const accept_userdata: u64 = 0xaaaaaaaa;
+ const read_userdata: u64 = 0xbbbbbbbb;
+ const data = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
+
+ for (0..2) |_| {
+ for (registered_fds, 0..) |_, i| {
+ var buffer_recv = [_]u8{0} ** 16;
+ const buffer_send: []const u8 = data[0 .. data.len - i]; // make it different at each loop
+
+ // submit accept, will chose registered fd and return index in cqe
+ _ = try ring.accept_direct(accept_userdata, listener_socket, null, null, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ // connect
+ const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
+ defer posix.close(client);
+
+ // accept completion
+ const cqe_accept = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe_accept.err());
+ const fd_index = cqe_accept.res;
+ try testing.expect(fd_index < registered_fds.len);
+ try testing.expect(cqe_accept.user_data == accept_userdata);
+
+ // send data
+ _ = try posix.send(client, buffer_send, 0);
+
+ // Example of how to use registered fd:
+ // Submit receive to fixed file returned by accept (fd_index).
+ // Fd field is set to registered file index, returned by accept.
+ // Flag linux.IOSQE_FIXED_FILE must be set.
+ const recv_sqe = try ring.recv(read_userdata, fd_index, .{ .buffer = &buffer_recv }, 0);
+ recv_sqe.flags |= linux.IOSQE_FIXED_FILE;
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ // accept receive
+ const recv_cqe = try ring.copy_cqe();
+ try testing.expect(recv_cqe.user_data == read_userdata);
+ try testing.expect(recv_cqe.res == buffer_send.len);
+ try testing.expectEqualSlices(u8, buffer_send, buffer_recv[0..buffer_send.len]);
+ }
+ // no more available fds, accept will get NFILE error
+ {
+ // submit accept
+ _ = try ring.accept_direct(accept_userdata, listener_socket, null, null, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ // connect
+ const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
+ defer posix.close(client);
+ // completion with error
+ const cqe_accept = try ring.copy_cqe();
+ try testing.expect(cqe_accept.user_data == accept_userdata);
+ try testing.expectEqual(posix.E.NFILE, cqe_accept.err());
+ }
+ // return file descriptors to kernel
+ try ring.register_files_update(0, registered_fds[0..]);
+ }
+ try ring.unregister_files();
+}
+
+test "accept_multishot_direct" {
+ try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
+
+ if (builtin.cpu.arch == .riscv64) {
+ // https://github.com/ziglang/zig/issues/25734
+ return error.SkipZigTest;
+ }
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var address: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+
+ var registered_fds = [_]linux.fd_t{-1} ** 2;
+ try ring.register_files(registered_fds[0..]);
+
+ const listener_socket = try createListenerSocket(&address);
+ defer posix.close(listener_socket);
+
+ const accept_userdata: u64 = 0xaaaaaaaa;
+
+ for (0..2) |_| {
+ // submit multishot accept
+ // Will chose registered fd and return index of the selected registered file in cqe.
+ _ = try ring.accept_multishot_direct(accept_userdata, listener_socket, null, null, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ for (registered_fds) |_| {
+ // connect
+ const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
+ defer posix.close(client);
+
+ // accept completion
+ const cqe_accept = try ring.copy_cqe();
+ const fd_index = cqe_accept.res;
+ try testing.expect(fd_index < registered_fds.len);
+ try testing.expect(cqe_accept.user_data == accept_userdata);
+ try testing.expect(cqe_accept.flags & linux.IORING_CQE_F_MORE > 0); // has more is set
+ }
+ // No more available fds, accept will get NFILE error.
+ // Multishot is terminated (more flag is not set).
+ {
+ // connect
+ const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
+ defer posix.close(client);
+ // completion with error
+ const cqe_accept = try ring.copy_cqe();
+ try testing.expect(cqe_accept.user_data == accept_userdata);
+ try testing.expectEqual(posix.E.NFILE, cqe_accept.err());
+ try testing.expect(cqe_accept.flags & linux.IORING_CQE_F_MORE == 0); // has more is not set
+ }
+ // return file descriptors to kernel
+ try ring.register_files_update(0, registered_fds[0..]);
+ }
+ try ring.unregister_files();
+}
+
+test "socket" {
+ try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
+
+ var ring = IoUring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ // prepare, submit socket operation
+ _ = try ring.socket(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ // test completion
+ var cqe = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ const fd: linux.fd_t = @intCast(cqe.res);
+ try testing.expect(fd > 2);
+
+ posix.close(fd);
+}
+
+test "socket_direct/socket_direct_alloc/close_direct" {
+ try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
+
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var registered_fds = [_]linux.fd_t{-1} ** 3;
+ try ring.register_files(registered_fds[0..]);
+
+ // create socket in registered file descriptor at index 0 (last param)
+ _ = try ring.socket_direct(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ var cqe_socket = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe_socket.err());
+ try testing.expect(cqe_socket.res == 0);
+
+ // create socket in registered file descriptor at index 1 (last param)
+ _ = try ring.socket_direct(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0, 1);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ cqe_socket = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe_socket.err());
+ try testing.expect(cqe_socket.res == 0); // res is 0 when index is specified
+
+ // create socket in kernel chosen file descriptor index (_alloc version)
+ // completion res has index from registered files
+ _ = try ring.socket_direct_alloc(0, linux.AF.INET, posix.SOCK.STREAM, 0, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ cqe_socket = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe_socket.err());
+ try testing.expect(cqe_socket.res == 2); // returns registered file index
+
+ // use sockets from registered_fds in connect operation
+ var address: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+ const listener_socket = try createListenerSocket(&address);
+ defer posix.close(listener_socket);
+ const accept_userdata: u64 = 0xaaaaaaaa;
+ const connect_userdata: u64 = 0xbbbbbbbb;
+ const close_userdata: u64 = 0xcccccccc;
+ for (registered_fds, 0..) |_, fd_index| {
+ // prepare accept
+ _ = try ring.accept(accept_userdata, listener_socket, null, null, 0);
+ // prepare connect with fixed socket
+ const connect_sqe = try ring.connect(connect_userdata, @intCast(fd_index), addrAny(&address), @sizeOf(linux.sockaddr.in));
+ connect_sqe.flags |= linux.IOSQE_FIXED_FILE; // fd is fixed file index
+ // submit both
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+ // get completions
+ var cqe_connect = try ring.copy_cqe();
+ var cqe_accept = try ring.copy_cqe();
+ // ignore order
+ if (cqe_connect.user_data == accept_userdata and cqe_accept.user_data == connect_userdata) {
+ const a = cqe_accept;
+ const b = cqe_connect;
+ cqe_accept = b;
+ cqe_connect = a;
+ }
+ // test connect completion
+ try testing.expect(cqe_connect.user_data == connect_userdata);
+ try testing.expectEqual(posix.E.SUCCESS, cqe_connect.err());
+ // test accept completion
+ try testing.expect(cqe_accept.user_data == accept_userdata);
+ try testing.expectEqual(posix.E.SUCCESS, cqe_accept.err());
+
+ // submit and test close_direct
+ _ = try ring.close_direct(close_userdata, @intCast(fd_index));
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ var cqe_close = try ring.copy_cqe();
+ try testing.expect(cqe_close.user_data == close_userdata);
+ try testing.expectEqual(posix.E.SUCCESS, cqe_close.err());
+ }
+
+ try ring.unregister_files();
+}
+
+test "openat_direct/close_direct" {
+ try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
+
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ var registered_fds = [_]linux.fd_t{-1} ** 3;
+ try ring.register_files(registered_fds[0..]);
+
+ var tmp = std.testing.tmpDir(.{});
+ defer tmp.cleanup();
+ const path = "test_io_uring_close_direct";
+ const flags: linux.O = .{ .ACCMODE = .RDWR, .CREAT = true };
+ const mode: posix.mode_t = 0o666;
+ const user_data: u64 = 0;
+
+ // use registered file at index 0 (last param)
+ _ = try ring.openat_direct(user_data, tmp.dir.handle, path, flags, mode, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ var cqe = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ try testing.expect(cqe.res == 0);
+
+ // use registered file at index 1
+ _ = try ring.openat_direct(user_data, tmp.dir.handle, path, flags, mode, 1);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ cqe = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ try testing.expect(cqe.res == 0); // res is 0 when we specify index
+
+ // let kernel choose registered file index
+ _ = try ring.openat_direct(user_data, tmp.dir.handle, path, flags, mode, linux.IORING_FILE_INDEX_ALLOC);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ cqe = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ try testing.expect(cqe.res == 2); // chosen index is in res
+
+ // close all open file descriptors
+ for (registered_fds, 0..) |_, fd_index| {
+ _ = try ring.close_direct(user_data, @intCast(fd_index));
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ var cqe_close = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe_close.err());
+ }
+ try ring.unregister_files();
+}
+
+test "ring mapped buffers recv" {
+ const io = testing.io;
+ _ = io;
+
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ // init buffer group
+ const group_id: u16 = 1; // buffers group id
+ const buffers_count: u16 = 2; // number of buffers in buffer group
+ const buffer_size: usize = 4; // size of each buffer in group
+ var buf_grp = BufferGroup.init(
+ &ring,
+ testing.allocator,
+ group_id,
+ buffer_size,
+ buffers_count,
+ ) catch |err| switch (err) {
+ // kernel older than 5.19
+ error.ArgumentsInvalid => return error.SkipZigTest,
+ else => return err,
+ };
+ defer buf_grp.deinit(testing.allocator);
+
+ // create client/server fds
+ const fds = try createSocketTestHarness(&ring);
+ defer fds.close();
+
+ // for random user_data in sqe/cqe
+ var Rnd = std.Random.DefaultPrng.init(std.testing.random_seed);
+ var rnd = Rnd.random();
+
+ var round: usize = 4; // repeat send/recv cycle round times
+ while (round > 0) : (round -= 1) {
+ // client sends data
+ const data = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
+ {
+ const user_data = rnd.int(u64);
+ _ = try ring.send(user_data, fds.client, data[0..], 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ const cqe_send = try ring.copy_cqe();
+ if (cqe_send.err() == .INVAL) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{ .user_data = user_data, .res = data.len, .flags = 0 }, cqe_send);
+ }
+ var pos: usize = 0;
+
+ // read first chunk
+ const cqe1 = try buf_grp_recv_submit_get_cqe(&ring, &buf_grp, fds.server, rnd.int(u64));
+ var buf = try buf_grp.get(cqe1);
+ try testing.expectEqualSlices(u8, data[pos..][0..buf.len], buf);
+ pos += buf.len;
+ // second chunk
+ const cqe2 = try buf_grp_recv_submit_get_cqe(&ring, &buf_grp, fds.server, rnd.int(u64));
+ buf = try buf_grp.get(cqe2);
+ try testing.expectEqualSlices(u8, data[pos..][0..buf.len], buf);
+ pos += buf.len;
+
+ // both buffers provided to the kernel are used so we get error
+ // 'no more buffers', until we put buffers to the kernel
+ {
+ const user_data = rnd.int(u64);
+ _ = try buf_grp.recv(user_data, fds.server, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(user_data, cqe.user_data);
+ try testing.expect(cqe.res < 0); // fail
+ try testing.expectEqual(posix.E.NOBUFS, cqe.err());
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == 0); // IORING_CQE_F_BUFFER flags is set on success only
+ try testing.expectError(error.NoBufferSelected, cqe.buffer_id());
+ }
+
+ // put buffers back to the kernel
+ try buf_grp.put(cqe1);
+ try buf_grp.put(cqe2);
+
+ // read remaining data
+ while (pos < data.len) {
+ const cqe = try buf_grp_recv_submit_get_cqe(&ring, &buf_grp, fds.server, rnd.int(u64));
+ buf = try buf_grp.get(cqe);
+ try testing.expectEqualSlices(u8, data[pos..][0..buf.len], buf);
+ pos += buf.len;
+ try buf_grp.put(cqe);
+ }
+ }
+}
+
+test "ring mapped buffers multishot recv" {
+ const io = testing.io;
+ _ = io;
+
+ var ring = IoUring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ // init buffer group
+ const group_id: u16 = 1; // buffers group id
+ const buffers_count: u16 = 2; // number of buffers in buffer group
+ const buffer_size: usize = 4; // size of each buffer in group
+ var buf_grp = BufferGroup.init(
+ &ring,
+ testing.allocator,
+ group_id,
+ buffer_size,
+ buffers_count,
+ ) catch |err| switch (err) {
+ // kernel older than 5.19
+ error.ArgumentsInvalid => return error.SkipZigTest,
+ else => return err,
+ };
+ defer buf_grp.deinit(testing.allocator);
+
+ // create client/server fds
+ const fds = try createSocketTestHarness(&ring);
+ defer fds.close();
+
+ // for random user_data in sqe/cqe
+ var Rnd = std.Random.DefaultPrng.init(std.testing.random_seed);
+ var rnd = Rnd.random();
+
+ var round: usize = 4; // repeat send/recv cycle round times
+ while (round > 0) : (round -= 1) {
+ // client sends data
+ const data = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf };
+ {
+ const user_data = rnd.int(u64);
+ _ = try ring.send(user_data, fds.client, data[0..], 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+ const cqe_send = try ring.copy_cqe();
+ if (cqe_send.err() == .INVAL) return error.SkipZigTest;
+ try testing.expectEqual(linux.io_uring_cqe{ .user_data = user_data, .res = data.len, .flags = 0 }, cqe_send);
+ }
+
+ // start multishot recv
+ var recv_user_data = rnd.int(u64);
+ _ = try buf_grp.recv_multishot(recv_user_data, fds.server, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit()); // submit
+
+ // server reads data into provided buffers
+ // there are 2 buffers of size 4, so each read gets only chunk of data
+ // we read four chunks of 4, 4, 4, 4 bytes each
+ var chunk: []const u8 = data[0..buffer_size]; // first chunk
+ const cqe1 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
+ try testing.expect(cqe1.flags & linux.IORING_CQE_F_MORE > 0);
+
+ chunk = data[buffer_size .. buffer_size * 2]; // second chunk
+ const cqe2 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
+ try testing.expect(cqe2.flags & linux.IORING_CQE_F_MORE > 0);
+
+ // both buffers provided to the kernel are used so we get error
+ // 'no more buffers', until we put buffers to the kernel
+ {
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(recv_user_data, cqe.user_data);
+ try testing.expect(cqe.res < 0); // fail
+ try testing.expectEqual(posix.E.NOBUFS, cqe.err());
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == 0); // IORING_CQE_F_BUFFER flags is set on success only
+ // has more is not set
+ // indicates that multishot is finished
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_MORE == 0);
+ try testing.expectError(error.NoBufferSelected, cqe.buffer_id());
+ }
+
+ // put buffers back to the kernel
+ try buf_grp.put(cqe1);
+ try buf_grp.put(cqe2);
+
+ // restart multishot
+ recv_user_data = rnd.int(u64);
+ _ = try buf_grp.recv_multishot(recv_user_data, fds.server, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit()); // submit
+
+ chunk = data[buffer_size * 2 .. buffer_size * 3]; // third chunk
+ const cqe3 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
+ try testing.expect(cqe3.flags & linux.IORING_CQE_F_MORE > 0);
+ try buf_grp.put(cqe3);
+
+ chunk = data[buffer_size * 3 ..]; // last chunk
+ const cqe4 = try expect_buf_grp_cqe(&ring, &buf_grp, recv_user_data, chunk);
+ try testing.expect(cqe4.flags & linux.IORING_CQE_F_MORE > 0);
+ try buf_grp.put(cqe4);
+
+ // cancel pending multishot recv operation
+ {
+ const cancel_user_data = rnd.int(u64);
+ _ = try ring.cancel(cancel_user_data, recv_user_data, 0);
+ try testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ // expect completion of cancel operation and completion of recv operation
+ var cqe_cancel = try ring.copy_cqe();
+ if (cqe_cancel.err() == .INVAL) return error.SkipZigTest;
+ var cqe_recv = try ring.copy_cqe();
+ if (cqe_recv.err() == .INVAL) return error.SkipZigTest;
+
+ // don't depend on order of completions
+ if (cqe_cancel.user_data == recv_user_data and cqe_recv.user_data == cancel_user_data) {
+ const a = cqe_cancel;
+ const b = cqe_recv;
+ cqe_cancel = b;
+ cqe_recv = a;
+ }
+
+ // Note on different kernel results:
+ // on older kernel (tested with v6.0.16, v6.1.57, v6.2.12, v6.4.16)
+ // cqe_cancel.err() == .NOENT
+ // cqe_recv.err() == .NOBUFS
+ // on kernel (tested with v6.5.0, v6.5.7)
+ // cqe_cancel.err() == .SUCCESS
+ // cqe_recv.err() == .CANCELED
+ // Upstream reference: https://github.com/axboe/liburing/issues/984
+
+ // cancel operation is success (or NOENT on older kernels)
+ try testing.expectEqual(cancel_user_data, cqe_cancel.user_data);
+ try testing.expect(cqe_cancel.err() == .NOENT or cqe_cancel.err() == .SUCCESS);
+
+ // recv operation is failed with err CANCELED (or NOBUFS on older kernels)
+ try testing.expectEqual(recv_user_data, cqe_recv.user_data);
+ try testing.expect(cqe_recv.res < 0);
+ try testing.expect(cqe_recv.err() == .NOBUFS or cqe_recv.err() == .CANCELED);
+ try testing.expect(cqe_recv.flags & linux.IORING_CQE_F_MORE == 0);
+ }
+ }
+}
+
+test "copy_cqes with wrapping sq.cqes buffer" {
+ var ring = IoUring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ try testing.expectEqual(2, ring.sq.sqes.len);
+ try testing.expectEqual(4, ring.cq.cqes.len);
+
+ // submit 2 entries, receive 2 completions
+ var cqes: [8]linux.io_uring_cqe = undefined;
+ {
+ for (0..2) |_| {
+ const sqe = try ring.get_sqe();
+ sqe.prep_timeout(&.{ .sec = 0, .nsec = 10000 }, 0, 0);
+ try testing.expect(try ring.submit() == 1);
+ }
+ var cqe_count: u32 = 0;
+ while (cqe_count < 2) {
+ cqe_count += try ring.copy_cqes(&cqes, 2 - cqe_count);
+ }
+ }
+
+ try testing.expectEqual(2, ring.cq.head.*);
+
+ // sq.sqes len is 4, starting at position 2
+ // every 4 entries submit wraps completion buffer
+ // we are reading ring.cq.cqes at indexes 2,3,0,1
+ for (1..1024) |i| {
+ for (0..4) |_| {
+ const sqe = try ring.get_sqe();
+ sqe.prep_timeout(&.{ .sec = 0, .nsec = 10000 }, 0, 0);
+ try testing.expect(try ring.submit() == 1);
+ }
+ var cqe_count: u32 = 0;
+ while (cqe_count < 4) {
+ cqe_count += try ring.copy_cqes(&cqes, 4 - cqe_count);
+ }
+ try testing.expectEqual(4, cqe_count);
+ try testing.expectEqual(2 + 4 * i, ring.cq.head.*);
+ }
+}
+
+test "bind/listen/connect" {
+ if (builtin.cpu.arch == .s390x) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/25956
+
+ var ring = IoUring.init(4, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err,
+ };
+ defer ring.deinit();
+
+ const probe = ring.get_probe() catch return error.SkipZigTest;
+ // LISTEN is higher required operation
+ if (!probe.is_supported(.LISTEN)) return error.SkipZigTest;
+
+ var addr: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+ const proto: u32 = if (addr.family == linux.AF.UNIX) 0 else linux.IPPROTO.TCP;
+
+ const listen_fd = brk: {
+ // Create socket
+ _ = try ring.socket(1, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
+ try testing.expectEqual(1, try ring.submit());
+ var cqe = try ring.copy_cqe();
+ try testing.expectEqual(1, cqe.user_data);
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ const listen_fd: linux.fd_t = @intCast(cqe.res);
+ try testing.expect(listen_fd > 2);
+
+ // Prepare: set socket option * 2, bind, listen
+ var optval: u32 = 1;
+ (try ring.setsockopt(2, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEADDR, mem.asBytes(&optval))).link_next();
+ (try ring.setsockopt(3, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEPORT, mem.asBytes(&optval))).link_next();
+ (try ring.bind(4, listen_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in), 0)).link_next();
+ _ = try ring.listen(5, listen_fd, 1, 0);
+ // Submit 4 operations
+ try testing.expectEqual(4, try ring.submit());
+ // Expect all to succeed
+ for (2..6) |user_data| {
+ cqe = try ring.copy_cqe();
+ try testing.expectEqual(user_data, cqe.user_data);
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ }
+
+ // Check that socket option is set
+ optval = 0;
+ _ = try ring.getsockopt(5, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEADDR, mem.asBytes(&optval));
+ try testing.expectEqual(1, try ring.submit());
+ cqe = try ring.copy_cqe();
+ try testing.expectEqual(5, cqe.user_data);
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ try testing.expectEqual(1, optval);
+
+ // Read system assigned port into addr
+ var addr_len: posix.socklen_t = @sizeOf(linux.sockaddr.in);
+ try posix.getsockname(listen_fd, addrAny(&addr), &addr_len);
+
+ break :brk listen_fd;
+ };
+
+ const connect_fd = brk: {
+ // Create connect socket
+ _ = try ring.socket(6, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
+ try testing.expectEqual(1, try ring.submit());
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(6, cqe.user_data);
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ // Get connect socket fd
+ const connect_fd: linux.fd_t = @intCast(cqe.res);
+ try testing.expect(connect_fd > 2 and connect_fd != listen_fd);
+ break :brk connect_fd;
+ };
+
+ // Prepare accept/connect operations
+ _ = try ring.accept(7, listen_fd, null, null, 0);
+ _ = try ring.connect(8, connect_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in));
+ try testing.expectEqual(2, try ring.submit());
+ // Get listener accepted socket
+ var accept_fd: posix.socket_t = 0;
+ for (0..2) |_| {
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ if (cqe.user_data == 7) {
+ accept_fd = @intCast(cqe.res);
+ } else {
+ try testing.expectEqual(8, cqe.user_data);
+ }
+ }
+ try testing.expect(accept_fd > 2 and accept_fd != listen_fd and accept_fd != connect_fd);
+
+ // Communicate
+ try testSendRecv(&ring, connect_fd, accept_fd);
+ try testSendRecv(&ring, accept_fd, connect_fd);
+
+ // Shutdown and close all sockets
+ for ([_]posix.socket_t{ connect_fd, accept_fd, listen_fd }) |fd| {
+ (try ring.shutdown(9, fd, posix.SHUT.RDWR)).link_next();
+ _ = try ring.close(10, fd);
+ try testing.expectEqual(2, try ring.submit());
+ for (0..2) |i| {
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ try testing.expectEqual(9 + i, cqe.user_data);
+ }
+ }
+}
+
+// Prepare, submit recv and get cqe using buffer group.
+fn buf_grp_recv_submit_get_cqe(
+ ring: *IoUring,
+ buf_grp: *BufferGroup,
+ fd: linux.fd_t,
+ user_data: u64,
+) !linux.io_uring_cqe {
+ // prepare and submit recv
+ const sqe = try buf_grp.recv(user_data, fd, 0);
+ try testing.expect(sqe.flags & linux.IOSQE_BUFFER_SELECT == linux.IOSQE_BUFFER_SELECT);
+ try testing.expect(sqe.buf_index == buf_grp.group_id);
+ try testing.expectEqual(@as(u32, 1), try ring.submit()); // submit
+ // get cqe, expect success
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(user_data, cqe.user_data);
+ try testing.expect(cqe.res >= 0); // success
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER); // IORING_CQE_F_BUFFER flag is set
+
+ return cqe;
+}
+
+fn expect_buf_grp_cqe(
+ ring: *IoUring,
+ buf_grp: *BufferGroup,
+ user_data: u64,
+ expected: []const u8,
+) !linux.io_uring_cqe {
+ // get cqe
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(user_data, cqe.user_data);
+ try testing.expect(cqe.res >= 0); // success
+ try testing.expect(cqe.flags & linux.IORING_CQE_F_BUFFER == linux.IORING_CQE_F_BUFFER); // IORING_CQE_F_BUFFER flag is set
+ try testing.expectEqual(expected.len, @as(usize, @intCast(cqe.res)));
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+
+ // get buffer from pool
+ const buffer_id = try cqe.buffer_id();
+ const len = @as(usize, @intCast(cqe.res));
+ const buf = buf_grp.get_by_id(buffer_id)[0..len];
+ try testing.expectEqualSlices(u8, expected, buf);
+
+ return cqe;
+}
+
+fn testSendRecv(ring: *IoUring, send_fd: posix.socket_t, recv_fd: posix.socket_t) !void {
+ const buffer_send = "0123456789abcdf" ** 10;
+ var buffer_recv: [buffer_send.len * 2]u8 = undefined;
+
+ // 2 sends
+ _ = try ring.send(1, send_fd, buffer_send, linux.MSG.WAITALL);
+ _ = try ring.send(2, send_fd, buffer_send, linux.MSG.WAITALL);
+ try testing.expectEqual(2, try ring.submit());
+ for (0..2) |i| {
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(1 + i, cqe.user_data);
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ try testing.expectEqual(buffer_send.len, @as(usize, @intCast(cqe.res)));
+ }
+
+ // receive
+ var recv_len: usize = 0;
+ while (recv_len < buffer_send.len * 2) {
+ _ = try ring.recv(3, recv_fd, .{ .buffer = buffer_recv[recv_len..] }, 0);
+ try testing.expectEqual(1, try ring.submit());
+ const cqe = try ring.copy_cqe();
+ try testing.expectEqual(3, cqe.user_data);
+ try testing.expectEqual(posix.E.SUCCESS, cqe.err());
+ recv_len += @intCast(cqe.res);
+ }
+
+ // inspect recv buffer
+ try testing.expectEqualSlices(u8, buffer_send, buffer_recv[0..buffer_send.len]);
+ try testing.expectEqualSlices(u8, buffer_send, buffer_recv[buffer_send.len..]);
+}
+
+/// Used for testing server/client interactions.
+pub const SocketTestHarness = struct {
+ listener: posix.socket_t,
+ server: posix.socket_t,
+ client: posix.socket_t,
+
+ pub fn close(self: SocketTestHarness) void {
+ posix.close(self.client);
+ posix.close(self.listener);
+ }
+};
+
+pub fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
+ // Create a TCP server socket
+ var address: linux.sockaddr.in = .{
+ .port = 0,
+ .addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
+ };
+ const listener_socket = try createListenerSocket(&address);
+ errdefer posix.close(listener_socket);
+
+ // Submit 1 accept
+ var accept_addr: posix.sockaddr = undefined;
+ var accept_addr_len: posix.socklen_t = @sizeOf(@TypeOf(accept_addr));
+ _ = try ring.accept(0xaaaaaaaa, listener_socket, &accept_addr, &accept_addr_len, 0);
+
+ // Create a TCP client socket
+ const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ errdefer posix.close(client);
+ _ = try ring.connect(0xcccccccc, client, addrAny(&address), @sizeOf(linux.sockaddr.in));
+
+ try testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ var cqe_accept = try ring.copy_cqe();
+ if (cqe_accept.err() == .INVAL) return error.SkipZigTest;
+ var cqe_connect = try ring.copy_cqe();
+ if (cqe_connect.err() == .INVAL) return error.SkipZigTest;
+
+ // The accept/connect CQEs may arrive in any order, the connect CQE will sometimes come first:
+ if (cqe_accept.user_data == 0xcccccccc and cqe_connect.user_data == 0xaaaaaaaa) {
+ const a = cqe_accept;
+ const b = cqe_connect;
+ cqe_accept = b;
+ cqe_connect = a;
+ }
+
+ try testing.expectEqual(@as(u64, 0xaaaaaaaa), cqe_accept.user_data);
+ if (cqe_accept.res <= 0) std.debug.print("\ncqe_accept.res={}\n", .{cqe_accept.res});
+ try testing.expect(cqe_accept.res > 0);
+ try testing.expectEqual(@as(u32, 0), cqe_accept.flags);
+ try testing.expectEqual(linux.io_uring_cqe{
+ .user_data = 0xcccccccc,
+ .res = 0,
+ .flags = 0,
+ }, cqe_connect);
+
+ // All good
+
+ return SocketTestHarness{
+ .listener = listener_socket,
+ .server = cqe_accept.res,
+ .client = client,
+ };
+}
+
+fn createListenerSocket(address: *linux.sockaddr.in) !posix.socket_t {
+ const kernel_backlog = 1;
+ const listener_socket = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
+ errdefer posix.close(listener_socket);
+
+ try posix.setsockopt(listener_socket, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
+ try posix.bind(listener_socket, addrAny(address), @sizeOf(linux.sockaddr.in));
+ try posix.listen(listener_socket, kernel_backlog);
+
+ // set address to the OS-chosen IP/port.
+ var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
+ try posix.getsockname(listener_socket, addrAny(address), &slen);
+
+ return listener_socket;
+}
+
+/// For use in tests. Returns SkipZigTest if kernel version is less than required.
+inline fn skipKernelLessThan(required: std.SemanticVersion) !void {
+ var uts: linux.utsname = undefined;
+ const res = linux.uname(&uts);
+ switch (linux.errno(res)) {
+ .SUCCESS => {},
+ else => |errno| return posix.unexpectedErrno(errno),
+ }
+
+ const release = mem.sliceTo(&uts.release, 0);
+ // Strips potential extra, as kernel version might not be semver compliant, example "6.8.9-300.fc40.x86_64"
+ const extra_index = std.mem.indexOfAny(u8, release, "-+");
+ const stripped = release[0..(extra_index orelse release.len)];
+ // Make sure the input don't rely on the extra we just stripped
+ try testing.expect(required.pre == null and required.build == null);
+
+ var current = try std.SemanticVersion.parse(stripped);
+ current.pre = null; // don't check pre field
+ if (required.order(current) == .gt) return error.SkipZigTest;
+}
+
+fn addrAny(addr: *linux.sockaddr.in) *linux.sockaddr {
+ return @ptrCast(addr);
+}
diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig
index 500c3f0bae..974ce0a25c 100644
--- a/lib/std/os/linux/test.zig
+++ b/lib/std/os/linux/test.zig
@@ -12,14 +12,16 @@ const fs = std.fs;
test "fallocate" {
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://codeberg.org/ziglang/zig/issues/30220
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
const path = "test_fallocate";
- const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
+ const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .permissions = .fromMode(0o666) });
+ defer file.close(io);
- try expect((try file.stat()).size == 0);
+ try expect((try file.stat(io)).size == 0);
const len: i64 = 65536;
switch (linux.errno(linux.fallocate(file.handle, 0, 0, len))) {
@@ -29,7 +31,7 @@ test "fallocate" {
else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
}
- try expect((try file.stat()).size == len);
+ try expect((try file.stat(io)).size == len);
}
test "getpid" {
@@ -77,12 +79,14 @@ test "timer" {
}
test "statx" {
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "just_a_temporary_file.txt";
- var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ var file = try tmp.dir.createFile(io, tmp_file_name, .{});
+ defer file.close(io);
var buf: linux.Statx = undefined;
switch (linux.errno(linux.statx(file.handle, "", linux.AT.EMPTY_PATH, .BASIC_STATS, &buf))) {
@@ -111,15 +115,17 @@ test "user and group ids" {
}
test "fadvise" {
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "temp_posix_fadvise.txt";
- var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ var file = try tmp.dir.createFile(io, tmp_file_name, .{});
+ defer file.close(io);
var buf: [2048]u8 = undefined;
- try file.writeAll(&buf);
+ try file.writeStreamingAll(io, &buf);
const ret = linux.fadvise(file.handle, 0, 0, linux.POSIX_FADV.SEQUENTIAL);
try expectEqual(@as(usize, 0), ret);
@@ -401,14 +407,6 @@ test "futex2_requeue" {
try expectEqual(0, rc);
}
-test "copy_file_range error" {
- const fds = try std.posix.pipe();
- defer std.posix.close(fds[0]);
- defer std.posix.close(fds[1]);
-
- try std.testing.expectError(error.InvalidArguments, linux.wrapped.copy_file_range(fds[0], null, fds[1], null, 1, 0));
-}
-
test {
_ = linux.IoUring;
}
diff --git a/lib/std/os/uefi/protocol/file.zig b/lib/std/os/uefi/protocol/file.zig
index 9e3e7cc081..9b371916a7 100644
--- a/lib/std/os/uefi/protocol/file.zig
+++ b/lib/std/os/uefi/protocol/file.zig
@@ -163,15 +163,6 @@ pub const File = extern struct {
}
}
- fn getEndPos(self: *File) SeekError!u64 {
- const start_pos = try self.getPosition();
- // ignore error
- defer self.setPosition(start_pos) catch {};
-
- try self.setPosition(end_of_file);
- return self.getPosition();
- }
-
pub fn setPosition(self: *File, position: u64) SeekError!void {
switch (self._set_position(self, position)) {
.success => {},
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index dd41879b3b..371883e9a4 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -215,6 +215,10 @@ pub const FILE = struct {
AccessFlags: ACCESS_MASK,
};
+ /// This is not separated into RENAME_INFORMATION and RENAME_INFORMATION_EX because
+ /// the only difference is the `Flags` type (BOOLEAN before _EX, ULONG in the _EX),
+ /// which doesn't affect the struct layout--the offset of RootDirectory is the same
+ /// regardless.
pub const RENAME_INFORMATION = extern struct {
Flags: FLAGS,
RootDirectory: ?HANDLE,
@@ -2310,17 +2314,15 @@ pub const OpenFileOptions = struct {
sa: ?*SECURITY_ATTRIBUTES = null,
share_access: FILE.SHARE = .VALID_FLAGS,
creation: FILE.CREATE_DISPOSITION,
- /// If true, tries to open path as a directory.
- /// Defaults to false.
- filter: Filter = .file_only,
+ filter: Filter = .non_directory_only,
/// If false, tries to open path as a reparse point without dereferencing it.
/// Defaults to true.
follow_symlinks: bool = true,
pub const Filter = enum {
/// Causes `OpenFile` to return `error.IsDir` if the opened handle would be a directory.
- file_only,
- /// Causes `OpenFile` to return `error.NotDir` if the opened handle would be a file.
+ non_directory_only,
+ /// Causes `OpenFile` to return `error.NotDir` if the opened handle is not a directory.
dir_only,
/// `OpenFile` does not discriminate between opening files and directories.
any,
@@ -2328,10 +2330,10 @@ pub const OpenFileOptions = struct {
};
pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HANDLE {
- if (mem.eql(u16, sub_path_w, &[_]u16{'.'}) and options.filter == .file_only) {
+ if (mem.eql(u16, sub_path_w, &[_]u16{'.'}) and options.filter == .non_directory_only) {
return error.IsDir;
}
- if (mem.eql(u16, sub_path_w, &[_]u16{ '.', '.' }) and options.filter == .file_only) {
+ if (mem.eql(u16, sub_path_w, &[_]u16{ '.', '.' }) and options.filter == .non_directory_only) {
return error.IsDir;
}
@@ -2366,7 +2368,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
options.creation,
.{
.DIRECTORY_FILE = options.filter == .dir_only,
- .NON_DIRECTORY_FILE = options.filter == .file_only,
+ .NON_DIRECTORY_FILE = options.filter == .non_directory_only,
.IO = if (options.follow_symlinks) .SYNCHRONOUS_NONALERT else .ASYNCHRONOUS,
.OPEN_REPARSE_POINT = !options.follow_symlinks,
},
@@ -2828,149 +2830,6 @@ pub fn CloseHandle(hObject: HANDLE) void {
assert(ntdll.NtClose(hObject) == .SUCCESS);
}
-pub const ReadFileError = error{
- BrokenPipe,
- /// The specified network name is no longer available.
- ConnectionResetByPeer,
- Canceled,
- /// Unable to read file due to lock.
- LockViolation,
- /// Known to be possible when:
- /// - Unable to read from disconnected virtual com port (Windows)
- AccessDenied,
- NotOpenForReading,
- Unexpected,
-};
-
-/// If buffer's length exceeds what a Windows DWORD integer can hold, it will be broken into
-/// multiple non-atomic reads.
-pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64) ReadFileError!usize {
- while (true) {
- const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len);
- var amt_read: DWORD = undefined;
- var overlapped_data: OVERLAPPED = undefined;
- const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
- overlapped_data = .{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = @as(u32, @truncate(off)),
- .OffsetHigh = @as(u32, @truncate(off >> 32)),
- },
- },
- .hEvent = null,
- };
- break :blk &overlapped_data;
- } else null;
- if (kernel32.ReadFile(in_hFile, buffer.ptr, want_read_count, &amt_read, overlapped) == 0) {
- switch (GetLastError()) {
- .IO_PENDING => unreachable,
- .OPERATION_ABORTED => continue,
- .BROKEN_PIPE => return 0,
- .HANDLE_EOF => return 0,
- .NETNAME_DELETED => return error.ConnectionResetByPeer,
- .LOCK_VIOLATION => return error.LockViolation,
- .ACCESS_DENIED => return error.AccessDenied,
- .INVALID_HANDLE => return error.NotOpenForReading,
- else => |err| return unexpectedError(err),
- }
- }
- return amt_read;
- }
-}
-
-pub const WriteFileError = error{
- SystemResources,
- Canceled,
- BrokenPipe,
- NotOpenForWriting,
- /// The process cannot access the file because another process has locked
- /// a portion of the file.
- LockViolation,
- /// The specified network name is no longer available.
- ConnectionResetByPeer,
- /// Known to be possible when:
- /// - Unable to write to disconnected virtual com port (Windows)
- AccessDenied,
- Unexpected,
-};
-
-pub fn WriteFile(
- handle: HANDLE,
- bytes: []const u8,
- offset: ?u64,
-) WriteFileError!usize {
- var bytes_written: DWORD = undefined;
- var overlapped_data: OVERLAPPED = undefined;
- const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
- overlapped_data = .{
- .Internal = 0,
- .InternalHigh = 0,
- .DUMMYUNIONNAME = .{
- .DUMMYSTRUCTNAME = .{
- .Offset = @truncate(off),
- .OffsetHigh = @truncate(off >> 32),
- },
- },
- .hEvent = null,
- };
- break :blk &overlapped_data;
- } else null;
- const adjusted_len = math.cast(u32, bytes.len) orelse maxInt(u32);
- if (kernel32.WriteFile(handle, bytes.ptr, adjusted_len, &bytes_written, overlapped) == 0) {
- switch (GetLastError()) {
- .INVALID_USER_BUFFER => return error.SystemResources,
- .NOT_ENOUGH_MEMORY => return error.SystemResources,
- .OPERATION_ABORTED => return error.Canceled,
- .NOT_ENOUGH_QUOTA => return error.SystemResources,
- .IO_PENDING => unreachable,
- .NO_DATA => return error.BrokenPipe,
- .INVALID_HANDLE => return error.NotOpenForWriting,
- .LOCK_VIOLATION => return error.LockViolation,
- .NETNAME_DELETED => return error.ConnectionResetByPeer,
- .ACCESS_DENIED => return error.AccessDenied,
- .WORKING_SET_QUOTA => return error.SystemResources,
- else => |err| return unexpectedError(err),
- }
- }
- return bytes_written;
-}
-
-pub const SetCurrentDirectoryError = error{
- NameTooLong,
- FileNotFound,
- NotDir,
- AccessDenied,
- NoDevice,
- BadPathName,
- Unexpected,
-};
-
-pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void {
- const path_len_bytes = math.cast(u16, path_name.len * 2) orelse return error.NameTooLong;
-
- var nt_name: UNICODE_STRING = .{
- .Length = path_len_bytes,
- .MaximumLength = path_len_bytes,
- .Buffer = @constCast(path_name.ptr),
- };
-
- const rc = ntdll.RtlSetCurrentDirectory_U(&nt_name);
- switch (rc) {
- .SUCCESS => {},
- .OBJECT_NAME_INVALID => return error.BadPathName,
- .OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
- .OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
- .NO_MEDIA_IN_DEVICE => return error.NoDevice,
- .INVALID_PARAMETER => unreachable,
- .ACCESS_DENIED => return error.AccessDenied,
- .OBJECT_PATH_SYNTAX_BAD => unreachable,
- .NOT_A_DIRECTORY => return error.NotDir,
- else => return unexpectedStatus(rc),
- }
-}
-
pub const GetCurrentDirectoryError = error{
NameTooLong,
Unexpected,
@@ -3040,7 +2899,7 @@ pub fn CreateSymbolicLink(
},
.dir = dir,
.creation = .CREATE,
- .filter = if (is_directory) .dir_only else .file_only,
+ .filter = if (is_directory) .dir_only else .non_directory_only,
}) catch |err| switch (err) {
error.IsDir => return error.PathAlreadyExists,
error.NotDir => return error.Unexpected,
@@ -3584,7 +3443,7 @@ test QueryObjectName {
//any file will do; canonicalization works on NTFS junctions and symlinks, hardlinks remain separate paths.
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
- const handle = tmp.dir.fd;
+ const handle = tmp.dir.handle;
var out_buffer: [PATH_MAX_WIDE]u16 = undefined;
const result_path = try QueryObjectName(handle, &out_buffer);
@@ -3597,7 +3456,6 @@ test QueryObjectName {
pub const GetFinalPathNameByHandleError = error{
AccessDenied,
- BadPathName,
FileNotFound,
NameTooLong,
/// The volume does not contain a recognized file system. File system
@@ -3622,6 +3480,8 @@ pub const GetFinalPathNameByHandleFormat = struct {
/// NT or DOS volume name (e.g., `\Device\HarddiskVolume0\foo.txt` versus `C:\foo.txt`).
/// If DOS volume name format is selected, note that this function does *not* prepend
/// `\\?\` prefix to the resultant path.
+///
+/// TODO move this function into std.Io.Threaded and add cancelation checks
pub fn GetFinalPathNameByHandle(
hFile: HANDLE,
fmt: GetFinalPathNameByHandleFormat,
@@ -3701,6 +3561,7 @@ pub fn GetFinalPathNameByHandle(
error.WouldBlock => return error.Unexpected,
error.NetworkNotFound => return error.Unexpected,
error.AntivirusInterference => return error.Unexpected,
+ error.BadPathName => return error.Unexpected,
else => |e| return e,
};
defer CloseHandle(mgmt_handle);
@@ -3746,9 +3607,7 @@ pub fn GetFinalPathNameByHandle(
const total_len = drive_letter.len + file_name_u16.len;
// Validate that DOS does not contain any spurious nul bytes.
- if (mem.findScalar(u16, out_buffer[0..total_len], 0)) |_| {
- return error.BadPathName;
- }
+ assert(mem.findScalar(u16, out_buffer[0..total_len], 0) == null);
return out_buffer[0..total_len];
} else if (mountmgrIsVolumeName(symlink)) {
@@ -3798,9 +3657,7 @@ pub fn GetFinalPathNameByHandle(
const total_len = volume_path.len + file_name_u16.len;
// Validate that DOS does not contain any spurious nul bytes.
- if (mem.findScalar(u16, out_buffer[0..total_len], 0)) |_| {
- return error.BadPathName;
- }
+ assert(mem.findScalar(u16, out_buffer[0..total_len], 0) == null);
return out_buffer[0..total_len];
}
@@ -3847,7 +3704,7 @@ test GetFinalPathNameByHandle {
//any file will do
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
- const handle = tmp.dir.fd;
+ const handle = tmp.dir.handle;
var buffer: [PATH_MAX_WIDE]u16 = undefined;
//check with sufficient size
@@ -4248,80 +4105,6 @@ pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter
assert(kernel32.InitOnceExecuteOnce(InitOnce, InitFn, Parameter, Context) != 0);
}
-pub const SetFileTimeError = error{Unexpected};
-
-pub fn SetFileTime(
- hFile: HANDLE,
- lpCreationTime: ?*const FILETIME,
- lpLastAccessTime: ?*const FILETIME,
- lpLastWriteTime: ?*const FILETIME,
-) SetFileTimeError!void {
- const rc = kernel32.SetFileTime(hFile, lpCreationTime, lpLastAccessTime, lpLastWriteTime);
- if (rc == 0) {
- switch (GetLastError()) {
- else => |err| return unexpectedError(err),
- }
- }
-}
-
-pub const LockFileError = error{
- SystemResources,
- WouldBlock,
-} || UnexpectedError;
-
-pub fn LockFile(
- FileHandle: HANDLE,
- Event: ?HANDLE,
- ApcRoutine: ?*const IO_APC_ROUTINE,
- ApcContext: ?*anyopaque,
- IoStatusBlock: *IO_STATUS_BLOCK,
- ByteOffset: *const LARGE_INTEGER,
- Length: *const LARGE_INTEGER,
- Key: ?*ULONG,
- FailImmediately: BOOLEAN,
- ExclusiveLock: BOOLEAN,
-) !void {
- const rc = ntdll.NtLockFile(
- FileHandle,
- Event,
- ApcRoutine,
- ApcContext,
- IoStatusBlock,
- ByteOffset,
- Length,
- Key,
- FailImmediately,
- ExclusiveLock,
- );
- switch (rc) {
- .SUCCESS => return,
- .INSUFFICIENT_RESOURCES => return error.SystemResources,
- .LOCK_NOT_GRANTED => return error.WouldBlock,
- .ACCESS_VIOLATION => unreachable, // bad io_status_block pointer
- else => return unexpectedStatus(rc),
- }
-}
-
-pub const UnlockFileError = error{
- RangeNotLocked,
-} || UnexpectedError;
-
-pub fn UnlockFile(
- FileHandle: HANDLE,
- IoStatusBlock: *IO_STATUS_BLOCK,
- ByteOffset: *const LARGE_INTEGER,
- Length: *const LARGE_INTEGER,
- Key: ULONG,
-) !void {
- const rc = ntdll.NtUnlockFile(FileHandle, IoStatusBlock, ByteOffset, Length, Key);
- switch (rc) {
- .SUCCESS => return,
- .RANGE_NOT_LOCKED => return error.RangeNotLocked,
- .ACCESS_VIOLATION => unreachable, // bad io_status_block pointer
- else => return unexpectedStatus(rc),
- }
-}
-
/// This is a workaround for the C backend until zig has the ability to put
/// C code in inline assembly.
extern fn zig_thumb_windows_teb() callconv(.c) *anyopaque;
@@ -4713,8 +4496,8 @@ pub fn wToPrefixedFileW(dir: ?HANDLE, path: [:0]const u16) Wtf16ToPrefixedFileWE
break :path_to_get path;
}
// We can also skip GetFinalPathNameByHandle if the handle matches
- // the handle returned by fs.cwd()
- if (dir.? == std.fs.cwd().fd) {
+ // the handle returned by Io.Dir.cwd()
+ if (dir.? == Io.Dir.cwd().handle) {
break :path_to_get path;
}
// At this point, we know we have a relative path that had too many
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index 36b0e04e5c..7e479de8d4 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -12,7 +12,7 @@ const math = std.math;
const mem = std.mem;
const coff = std.coff;
const fs = std.fs;
-const File = std.fs.File;
+const File = std.Io.File;
const debug = std.debug;
const ArrayList = std.ArrayList;
diff --git a/lib/std/posix.zig b/lib/std/posix.zig
index feeeeb9220..52bc5f83e8 100644
--- a/lib/std/posix.zig
+++ b/lib/std/posix.zig
@@ -15,15 +15,16 @@
//! deal with the exception.
const builtin = @import("builtin");
-const root = @import("root");
+const native_os = builtin.os.tag;
+
const std = @import("std.zig");
+const Io = std.Io;
const mem = std.mem;
const fs = std.fs;
-const max_path_bytes = fs.max_path_bytes;
+const max_path_bytes = std.fs.max_path_bytes;
const maxInt = std.math.maxInt;
const cast = std.math.cast;
const assert = std.debug.assert;
-const native_os = builtin.os.tag;
const page_size_min = std.heap.page_size_min;
test {
@@ -53,6 +54,7 @@ else switch (native_os) {
pub const uid_t = void;
pub const gid_t = void;
pub const mode_t = u0;
+ pub const nlink_t = u0;
pub const ino_t = void;
pub const IFNAMESIZE = {};
pub const SIG = void;
@@ -309,274 +311,6 @@ pub fn close(fd: fd_t) void {
}
}
-pub const FChmodError = error{
- AccessDenied,
- PermissionDenied,
- InputOutput,
- SymLinkLoop,
- FileNotFound,
- SystemResources,
- ReadOnlyFileSystem,
-} || UnexpectedError;
-
-/// Changes the mode of the file referred to by the file descriptor.
-///
-/// The process must have the correct privileges in order to do this
-/// successfully, or must have the effective user ID matching the owner
-/// of the file.
-pub fn fchmod(fd: fd_t, mode: mode_t) FChmodError!void {
- if (!fs.has_executable_bit) @compileError("fchmod unsupported by target OS");
-
- while (true) {
- const res = system.fchmod(fd, mode);
- switch (errno(res)) {
- .SUCCESS => return,
- .INTR => continue,
- .BADF => unreachable,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .IO => return error.InputOutput,
- .LOOP => return error.SymLinkLoop,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.FileNotFound,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-pub const FChmodAtError = FChmodError || error{
- /// A component of `path` exceeded `NAME_MAX`, or the entire path exceeded
- /// `PATH_MAX`.
- NameTooLong,
- /// `path` resolves to a symbolic link, and `AT.SYMLINK_NOFOLLOW` was set
- /// in `flags`. This error only occurs on Linux, where changing the mode of
- /// a symbolic link has no meaning and can cause undefined behaviour on
- /// certain filesystems.
- ///
- /// The procfs fallback was used but procfs was not mounted.
- OperationNotSupported,
- /// The procfs fallback was used but the process exceeded its open file
- /// limit.
- ProcessFdQuotaExceeded,
- /// The procfs fallback was used but the system exceeded it open file limit.
- SystemFdQuotaExceeded,
- Canceled,
-};
-
-/// Changes the `mode` of `path` relative to the directory referred to by
-/// `dirfd`. The process must have the correct privileges in order to do this
-/// successfully, or must have the effective user ID matching the owner of the
-/// file.
-///
-/// On Linux the `fchmodat2` syscall will be used if available, otherwise a
-/// workaround using procfs will be employed. Changing the mode of a symbolic
-/// link with `AT.SYMLINK_NOFOLLOW` set will also return
-/// `OperationNotSupported`, as:
-///
-/// 1. Permissions on the link are ignored when resolving its target.
-/// 2. This operation has been known to invoke undefined behaviour across
-/// different filesystems[1].
-///
-/// [1]: https://sourceware.org/legacy-ml/libc-alpha/2020-02/msg00467.html.
-pub inline fn fchmodat(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtError!void {
- if (!fs.has_executable_bit) @compileError("fchmodat unsupported by target OS");
-
- // No special handling for linux is needed if we can use the libc fallback
- // or `flags` is empty. Glibc only added the fallback in 2.32.
- const skip_fchmodat_fallback = native_os != .linux or
- (!builtin.abi.isAndroid() and std.c.versionCheck(.{ .major = 2, .minor = 32, .patch = 0 })) or
- flags == 0;
-
- // This function is marked inline so that when flags is comptime-known,
- // skip_fchmodat_fallback will be comptime-known true.
- if (skip_fchmodat_fallback)
- return fchmodat1(dirfd, path, mode, flags);
-
- return fchmodat2(dirfd, path, mode, flags);
-}
-
-fn fchmodat1(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtError!void {
- const path_c = try toPosixPath(path);
- while (true) {
- const res = system.fchmodat(dirfd, &path_c, mode, flags);
- switch (errno(res)) {
- .SUCCESS => return,
- .INTR => continue,
- .BADF => unreachable,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .IO => return error.InputOutput,
- .LOOP => return error.SymLinkLoop,
- .MFILE => return error.ProcessFdQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NFILE => return error.SystemFdQuotaExceeded,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .OPNOTSUPP => return error.OperationNotSupported,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-fn fchmodat2(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtError!void {
- const global = struct {
- var has_fchmodat2: bool = true;
- };
- const path_c = try toPosixPath(path);
- const use_fchmodat2 = (builtin.os.isAtLeast(.linux, .{ .major = 6, .minor = 6, .patch = 0 }) orelse false) and
- @atomicLoad(bool, &global.has_fchmodat2, .monotonic);
- while (use_fchmodat2) {
- // Later on this should be changed to `system.fchmodat2`
- // when the musl/glibc add a wrapper.
- const res = linux.fchmodat2(dirfd, &path_c, mode, flags);
- switch (linux.errno(res)) {
- .SUCCESS => return,
- .INTR => continue,
- .BADF => unreachable,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .IO => return error.InputOutput,
- .LOOP => return error.SymLinkLoop,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.FileNotFound,
- .OPNOTSUPP => return error.OperationNotSupported,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
-
- .NOSYS => {
- @atomicStore(bool, &global.has_fchmodat2, false, .monotonic);
- break;
- },
- else => |err| return unexpectedErrno(err),
- }
- }
-
- // Fallback to changing permissions using procfs:
- //
- // 1. Open `path` as a `PATH` descriptor.
- // 2. Stat the fd and check if it isn't a symbolic link.
- // 3. Generate the procfs reference to the fd via `/proc/self/fd/{fd}`.
- // 4. Pass the procfs path to `chmod` with the `mode`.
- var pathfd: fd_t = undefined;
- while (true) {
- const rc = system.openat(dirfd, &path_c, .{ .PATH = true, .NOFOLLOW = true, .CLOEXEC = true }, @as(mode_t, 0));
- switch (errno(rc)) {
- .SUCCESS => {
- pathfd = @intCast(rc);
- break;
- },
- .INTR => continue,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .LOOP => return error.SymLinkLoop,
- .MFILE => return error.ProcessFdQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NFILE => return error.SystemFdQuotaExceeded,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- else => |err| return unexpectedErrno(err),
- }
- }
- defer close(pathfd);
-
- const path_mode = if (linux.wrapped.statx(
- pathfd,
- "",
- AT.EMPTY_PATH,
- .{ .TYPE = true },
- )) |stx| blk: {
- assert(stx.mask.TYPE);
- break :blk stx.mode;
- } else |err| switch (err) {
- error.NameTooLong => unreachable,
- error.FileNotFound => unreachable,
- else => |e| return e,
- };
- // Even though we only wanted TYPE, the kernel can still fill in the additional bits.
- if ((path_mode & S.IFMT) == S.IFLNK)
- return error.OperationNotSupported;
-
- var procfs_buf: ["/proc/self/fd/-2147483648\x00".len]u8 = undefined;
- const proc_path = std.fmt.bufPrintSentinel(procfs_buf[0..], "/proc/self/fd/{d}", .{pathfd}, 0) catch unreachable;
- while (true) {
- const res = system.chmod(proc_path, mode);
- switch (errno(res)) {
- // Getting NOENT here means that procfs isn't mounted.
- .NOENT => return error.OperationNotSupported,
-
- .SUCCESS => return,
- .INTR => continue,
- .BADF => unreachable,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .IO => return error.InputOutput,
- .LOOP => return error.SymLinkLoop,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.FileNotFound,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-pub const FChownError = error{
- AccessDenied,
- PermissionDenied,
- InputOutput,
- SymLinkLoop,
- FileNotFound,
- SystemResources,
- ReadOnlyFileSystem,
-} || UnexpectedError;
-
-/// Changes the owner and group of the file referred to by the file descriptor.
-/// The process must have the correct privileges in order to do this
-/// successfully. The group may be changed by the owner of the directory to
-/// any group of which the owner is a member. If the owner or group is
-/// specified as `null`, the ID is not changed.
-pub fn fchown(fd: fd_t, owner: ?uid_t, group: ?gid_t) FChownError!void {
- switch (native_os) {
- .windows, .wasi => @compileError("Unsupported OS"),
- else => {},
- }
-
- while (true) {
- const res = system.fchown(fd, owner orelse ~@as(uid_t, 0), group orelse ~@as(gid_t, 0));
-
- switch (errno(res)) {
- .SUCCESS => return,
- .INTR => continue,
- .BADF => unreachable, // Can be reached if the fd refers to a directory opened without `Dir.OpenOptions{ .iterate = true }`
-
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .IO => return error.InputOutput,
- .LOOP => return error.SymLinkLoop,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.FileNotFound,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
pub const RebootError = error{
PermissionDenied,
} || UnexpectedError;
@@ -698,66 +432,6 @@ fn getRandomBytesDevURandom(buf: []u8) GetRandomError!void {
}
}
-/// Causes abnormal process termination.
-/// If linking against libc, this calls the abort() libc function. Otherwise
-/// it raises SIGABRT followed by SIGKILL and finally lo
-/// Invokes the current signal handler for SIGABRT, if any.
-pub fn abort() noreturn {
- @branchHint(.cold);
- // MSVCRT abort() sometimes opens a popup window which is undesirable, so
- // even when linking libc on Windows we use our own abort implementation.
- // See https://github.com/ziglang/zig/issues/2071 for more details.
- if (native_os == .windows) {
- if (builtin.mode == .Debug and windows.peb().BeingDebugged != 0) {
- @breakpoint();
- }
- windows.ntdll.RtlExitUserProcess(3);
- }
- if (!builtin.link_libc and native_os == .linux) {
- // The Linux man page says that the libc abort() function
- // "first unblocks the SIGABRT signal", but this is a footgun
- // for user-defined signal handlers that want to restore some state in
- // some program sections and crash in others.
- // So, the user-installed SIGABRT handler is run, if present.
- raise(.ABRT) catch {};
-
- // Disable all signal handlers.
- const filledset = linux.sigfillset();
- sigprocmask(SIG.BLOCK, &filledset, null);
-
- // Only one thread may proceed to the rest of abort().
- if (!builtin.single_threaded) {
- const global = struct {
- var abort_entered: bool = false;
- };
- while (@cmpxchgWeak(bool, &global.abort_entered, false, true, .seq_cst, .seq_cst)) |_| {}
- }
-
- // Install default handler so that the tkill below will terminate.
- const sigact = Sigaction{
- .handler = .{ .handler = SIG.DFL },
- .mask = sigemptyset(),
- .flags = 0,
- };
- sigaction(.ABRT, &sigact, null);
-
- _ = linux.tkill(linux.gettid(), .ABRT);
-
- var sigabrtmask = sigemptyset();
- sigaddset(&sigabrtmask, .ABRT);
- sigprocmask(SIG.UNBLOCK, &sigabrtmask, null);
-
- // Beyond this point should be unreachable.
- @as(*allowzero volatile u8, @ptrFromInt(0)).* = 0;
- raise(.KILL) catch {};
- exit(127); // Pid 1 might not be signalled in some containers.
- }
- switch (native_os) {
- .uefi, .wasi, .emscripten, .cuda, .amdhsa => @trap(),
- else => system.abort(),
- }
-}
-
pub const RaiseError = UnexpectedError;
pub fn raise(sig: SIG) RaiseError!void {
@@ -798,33 +472,6 @@ pub fn kill(pid: pid_t, sig: SIG) KillError!void {
}
}
-/// Exits all threads of the program with the specified status code.
-pub fn exit(status: u8) noreturn {
- if (builtin.link_libc) {
- std.c.exit(status);
- }
- if (native_os == .windows) {
- windows.ntdll.RtlExitUserProcess(status);
- }
- if (native_os == .wasi) {
- wasi.proc_exit(status);
- }
- if (native_os == .linux and !builtin.single_threaded) {
- linux.exit_group(status);
- }
- if (native_os == .uefi) {
- const uefi = std.os.uefi;
- // exit() is only available if exitBootServices() has not been called yet.
- // This call to exit should not fail, so we catch-ignore errors.
- if (uefi.system_table.boot_services) |bs| {
- bs.exit(uefi.handle, @enumFromInt(status), null) catch {};
- }
- // If we can't exit, reboot the system instead.
- uefi.system_table.runtime_services.resetSystem(.cold, @enumFromInt(status), null);
- }
- system.exit(status);
-}
-
pub const ReadError = std.Io.File.Reader.Error;
/// Returns the number of bytes that were read, which can be less than
@@ -839,34 +486,8 @@ pub const ReadError = std.Io.File.Reader.Error;
/// The corresponding POSIX limit is `maxInt(isize)`.
pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
if (buf.len == 0) return 0;
- if (native_os == .windows) {
- return windows.ReadFile(fd, buf, null);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- const iovs = [1]iovec{iovec{
- .base = buf.ptr,
- .len = buf.len,
- }};
-
- var nread: usize = undefined;
- switch (wasi.fd_read(fd, &iovs, iovs.len, &nread)) {
- .SUCCESS => return nread,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable,
- .BADF => return error.NotOpenForReading, // Can be a race condition.
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOBUFS => return error.SystemResources,
- .NOMEM => return error.SystemResources,
- .NOTCONN => return error.SocketUnconnected,
- .CONNRESET => return error.ConnectionResetByPeer,
- .TIMEDOUT => return error.Timeout,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
+ if (native_os == .windows) @compileError("unsupported OS");
+ if (native_os == .wasi) @compileError("unsupported OS");
// Prevents EINVAL.
const max_count = switch (native_os) {
@@ -881,7 +502,6 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
.INTR => continue,
.INVAL => unreachable,
.FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
.AGAIN => return error.WouldBlock,
.CANCELED => return error.Canceled,
.BADF => return error.NotOpenForReading, // Can be a race condition.
@@ -897,292 +517,6 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
}
}
-/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
-///
-/// For POSIX systems, if `fd` is opened in non blocking mode, the function will
-/// return error.WouldBlock when EAGAIN is received.
-/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
-/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
-///
-/// This operation is non-atomic on the following systems:
-/// * Windows
-/// On these systems, the read races with concurrent writes to the same file descriptor.
-///
-/// This function assumes that all vectors, including zero-length vectors, have
-/// a pointer within the address space of the application.
-pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
- if (native_os == .windows) {
- if (iov.len == 0) return 0;
- const first = iov[0];
- return read(fd, first.base[0..first.len]);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var nread: usize = undefined;
- switch (wasi.fd_read(fd, iov.ptr, iov.len, &nread)) {
- .SUCCESS => return nread,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable, // currently not support in WASI
- .BADF => return error.NotOpenForReading, // can be a race condition
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOBUFS => return error.SystemResources,
- .NOMEM => return error.SystemResources,
- .NOTCONN => return error.SocketUnconnected,
- .CONNRESET => return error.ConnectionResetByPeer,
- .TIMEDOUT => return error.Timeout,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- while (true) {
- const rc = system.readv(fd, iov.ptr, @min(iov.len, IOV_MAX));
- switch (errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .INTR => continue,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
- .AGAIN => return error.WouldBlock,
- .BADF => return error.NotOpenForReading, // can be a race condition
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOBUFS => return error.SystemResources,
- .NOMEM => return error.SystemResources,
- .NOTCONN => return error.SocketUnconnected,
- .CONNRESET => return error.ConnectionResetByPeer,
- .TIMEDOUT => return error.Timeout,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-pub const PReadError = std.Io.File.ReadPositionalError;
-
-/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
-///
-/// Retries when interrupted by a signal.
-///
-/// For POSIX systems, if `fd` is opened in non blocking mode, the function will
-/// return error.WouldBlock when EAGAIN is received.
-/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
-/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
-///
-/// Linux has a limit on how many bytes may be transferred in one `pread` call, which is `0x7ffff000`
-/// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as
-/// well as stuffing the errno codes into the last `4096` values. This is noted on the `read` man page.
-/// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL.
-/// The corresponding POSIX limit is `maxInt(isize)`.
-pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
- if (buf.len == 0) return 0;
- if (native_os == .windows) {
- return windows.ReadFile(fd, buf, offset);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- const iovs = [1]iovec{iovec{
- .base = buf.ptr,
- .len = buf.len,
- }};
-
- var nread: usize = undefined;
- switch (wasi.fd_pread(fd, &iovs, iovs.len, offset, &nread)) {
- .SUCCESS => return nread,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable,
- .BADF => return error.NotOpenForReading, // Can be a race condition.
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOBUFS => return error.SystemResources,
- .NOMEM => return error.SystemResources,
- .NOTCONN => return error.SocketUnconnected,
- .CONNRESET => return error.ConnectionResetByPeer,
- .TIMEDOUT => return error.Timeout,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- // Prevent EINVAL.
- const max_count = switch (native_os) {
- .linux => 0x7ffff000,
- .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => maxInt(i32),
- else => maxInt(isize),
- };
-
- const pread_sym = if (lfs64_abi) system.pread64 else system.pread;
- while (true) {
- const rc = pread_sym(fd, buf.ptr, @min(buf.len, max_count), @bitCast(offset));
- switch (errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .INTR => continue,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
- .AGAIN => return error.WouldBlock,
- .BADF => return error.NotOpenForReading, // Can be a race condition.
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOBUFS => return error.SystemResources,
- .NOMEM => return error.SystemResources,
- .NOTCONN => return error.SocketUnconnected,
- .CONNRESET => return error.ConnectionResetByPeer,
- .TIMEDOUT => return error.Timeout,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-pub const TruncateError = error{
- FileTooBig,
- InputOutput,
- FileBusy,
- AccessDenied,
- PermissionDenied,
- NonResizable,
-} || UnexpectedError;
-
-/// Length must be positive when treated as an i64.
-pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
- const signed_len: i64 = @bitCast(length);
- if (signed_len < 0) return error.FileTooBig; // avoid ambiguous EINVAL errors
-
- if (native_os == .windows) {
- var io_status_block: windows.IO_STATUS_BLOCK = undefined;
- const eof_info: windows.FILE.END_OF_FILE_INFORMATION = .{
- .EndOfFile = signed_len,
- };
- const rc = windows.ntdll.NtSetInformationFile(
- fd,
- &io_status_block,
- &eof_info,
- @sizeOf(windows.FILE.END_OF_FILE_INFORMATION),
- .EndOfFile,
- );
- switch (rc) {
- .SUCCESS => return,
- .INVALID_HANDLE => unreachable, // Handle not open for writing
- .ACCESS_DENIED => return error.AccessDenied,
- .USER_MAPPED_FILE => return error.AccessDenied,
- .INVALID_PARAMETER => return error.FileTooBig,
- else => return windows.unexpectedStatus(rc),
- }
- }
- if (native_os == .wasi and !builtin.link_libc) {
- switch (wasi.fd_filestat_set_size(fd, length)) {
- .SUCCESS => return,
- .INTR => unreachable,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .PERM => return error.PermissionDenied,
- .TXTBSY => return error.FileBusy,
- .BADF => unreachable, // Handle not open for writing
- .INVAL => return error.NonResizable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- const ftruncate_sym = if (lfs64_abi) system.ftruncate64 else system.ftruncate;
- while (true) {
- switch (errno(ftruncate_sym(fd, signed_len))) {
- .SUCCESS => return,
- .INTR => continue,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .PERM => return error.PermissionDenied,
- .TXTBSY => return error.FileBusy,
- .BADF => unreachable, // Handle not open for writing
- .INVAL => return error.NonResizable, // This is returned for /dev/null for example.
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
-///
-/// Retries when interrupted by a signal.
-///
-/// For POSIX systems, if `fd` is opened in non blocking mode, the function will
-/// return error.WouldBlock when EAGAIN is received.
-/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
-/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
-///
-/// This operation is non-atomic on the following systems:
-/// * Darwin
-/// * Windows
-/// On these systems, the read races with concurrent writes to the same file descriptor.
-pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
- const have_pread_but_not_preadv = switch (native_os) {
- .windows, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .haiku => true,
- else => false,
- };
- if (have_pread_but_not_preadv) {
- // We could loop here; but proper usage of `preadv` must handle partial reads anyway.
- // So we simply read into the first vector only.
- if (iov.len == 0) return 0;
- const first = iov[0];
- return pread(fd, first.base[0..first.len], offset);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var nread: usize = undefined;
- switch (wasi.fd_pread(fd, iov.ptr, iov.len, offset, &nread)) {
- .SUCCESS => return nread,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable,
- .BADF => return error.NotOpenForReading, // can be a race condition
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOBUFS => return error.SystemResources,
- .NOMEM => return error.SystemResources,
- .NOTCONN => return error.SocketUnconnected,
- .CONNRESET => return error.ConnectionResetByPeer,
- .TIMEDOUT => return error.Timeout,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- const preadv_sym = if (lfs64_abi) system.preadv64 else system.preadv;
- while (true) {
- const rc = preadv_sym(fd, iov.ptr, @min(iov.len, IOV_MAX), @bitCast(offset));
- switch (errno(rc)) {
- .SUCCESS => return @bitCast(rc),
- .INTR => continue,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
- .AGAIN => return error.WouldBlock,
- .BADF => return error.NotOpenForReading, // can be a race condition
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOBUFS => return error.SystemResources,
- .NOMEM => return error.SystemResources,
- .NOTCONN => return error.SocketUnconnected,
- .CONNRESET => return error.ConnectionResetByPeer,
- .TIMEDOUT => return error.Timeout,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
pub const WriteError = error{
DiskQuota,
FileTooBig,
@@ -1246,34 +580,8 @@ pub const WriteError = error{
/// The corresponding POSIX limit is `maxInt(isize)`.
pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
if (bytes.len == 0) return 0;
- if (native_os == .windows) {
- return windows.WriteFile(fd, bytes, null);
- }
-
- if (native_os == .wasi and !builtin.link_libc) {
- const ciovs = [_]iovec_const{iovec_const{
- .base = bytes.ptr,
- .len = bytes.len,
- }};
- var nwritten: usize = undefined;
- switch (wasi.fd_write(fd, &ciovs, ciovs.len, &nwritten)) {
- .SUCCESS => return nwritten,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable,
- .BADF => return error.NotOpenForWriting, // can be a race condition.
- .DESTADDRREQ => unreachable, // `connect` was never called.
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
+ if (native_os == .windows) @compileError("unsupported OS");
+ if (native_os == .wasi) @compileError("unsupported OS");
const max_count = switch (native_os) {
.linux => 0x7ffff000,
@@ -1287,7 +595,6 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
.INTR => continue,
.INVAL => return error.InvalidArgument,
.FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
.AGAIN => return error.WouldBlock,
.BADF => return error.NotOpenForWriting, // can be a race condition.
.DESTADDRREQ => unreachable, // `connect` was never called.
@@ -1307,257 +614,6 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
}
}
-/// Write multiple buffers to a file descriptor.
-/// Retries when interrupted by a signal.
-/// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero.
-///
-/// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can
-/// occur for various reasons; for example, because there was insufficient space on the disk
-/// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or
-/// similar was interrupted by a signal handler after it had transferred some, but before it had
-/// transferred all of the requested bytes. In the event of a partial write, the caller can make
-/// another write() call to transfer the remaining bytes. The subsequent call will either
-/// transfer further bytes or may result in an error (e.g., if the disk is now full).
-///
-/// For POSIX systems, if `fd` is opened in non blocking mode, the function will
-/// return error.WouldBlock when EAGAIN is received.
-/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
-/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
-///
-/// If `iov.len` is larger than `IOV_MAX`, a partial write will occur.
-///
-/// This function assumes that all vectors, including zero-length vectors, have
-/// a pointer within the address space of the application.
-pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
- if (native_os == .windows) {
- // TODO improve this to use WriteFileScatter
- if (iov.len == 0) return 0;
- const first = iov[0];
- return write(fd, first.base[0..first.len]);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var nwritten: usize = undefined;
- switch (wasi.fd_write(fd, iov.ptr, iov.len, &nwritten)) {
- .SUCCESS => return nwritten,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable,
- .BADF => return error.NotOpenForWriting, // can be a race condition.
- .DESTADDRREQ => unreachable, // `connect` was never called.
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- while (true) {
- const rc = system.writev(fd, iov.ptr, @min(iov.len, IOV_MAX));
- switch (errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .INTR => continue,
- .INVAL => return error.InvalidArgument,
- .FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
- .AGAIN => return error.WouldBlock,
- .BADF => return error.NotOpenForWriting, // Can be a race condition.
- .DESTADDRREQ => unreachable, // `connect` was never called.
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- .CONNRESET => return error.ConnectionResetByPeer,
- .BUSY => return error.DeviceBusy,
- .CANCELED => return error.Canceled,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-pub const PWriteError = WriteError || error{Unseekable};
-
-/// Write to a file descriptor, with a position offset.
-/// Retries when interrupted by a signal.
-/// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero.
-///
-/// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can
-/// occur for various reasons; for example, because there was insufficient space on the disk
-/// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or
-/// similar was interrupted by a signal handler after it had transferred some, but before it had
-/// transferred all of the requested bytes. In the event of a partial write, the caller can make
-/// another write() call to transfer the remaining bytes. The subsequent call will either
-/// transfer further bytes or may result in an error (e.g., if the disk is now full).
-///
-/// For POSIX systems, if `fd` is opened in non blocking mode, the function will
-/// return error.WouldBlock when EAGAIN is received.
-/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
-/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
-///
-/// Linux has a limit on how many bytes may be transferred in one `pwrite` call, which is `0x7ffff000`
-/// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as
-/// well as stuffing the errno codes into the last `4096` values. This is noted on the `write` man page.
-/// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL.
-/// The corresponding POSIX limit is `maxInt(isize)`.
-pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
- if (bytes.len == 0) return 0;
- if (native_os == .windows) {
- return windows.WriteFile(fd, bytes, offset);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- const ciovs = [1]iovec_const{iovec_const{
- .base = bytes.ptr,
- .len = bytes.len,
- }};
-
- var nwritten: usize = undefined;
- switch (wasi.fd_pwrite(fd, &ciovs, ciovs.len, offset, &nwritten)) {
- .SUCCESS => return nwritten,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable,
- .BADF => return error.NotOpenForWriting, // can be a race condition.
- .DESTADDRREQ => unreachable, // `connect` was never called.
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- // Prevent EINVAL.
- const max_count = switch (native_os) {
- .linux => 0x7ffff000,
- .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => maxInt(i32),
- else => maxInt(isize),
- };
-
- const pwrite_sym = if (lfs64_abi) system.pwrite64 else system.pwrite;
- while (true) {
- const rc = pwrite_sym(fd, bytes.ptr, @min(bytes.len, max_count), @bitCast(offset));
- switch (errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .INTR => continue,
- .INVAL => return error.InvalidArgument,
- .FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
- .AGAIN => return error.WouldBlock,
- .BADF => return error.NotOpenForWriting, // Can be a race condition.
- .DESTADDRREQ => unreachable, // `connect` was never called.
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .BUSY => return error.DeviceBusy,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-/// Write multiple buffers to a file descriptor, with a position offset.
-/// Retries when interrupted by a signal.
-/// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero.
-///
-/// Note that a successful write() may transfer fewer than count bytes. Such partial writes can
-/// occur for various reasons; for example, because there was insufficient space on the disk
-/// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or
-/// similar was interrupted by a signal handler after it had transferred some, but before it had
-/// transferred all of the requested bytes. In the event of a partial write, the caller can make
-/// another write() call to transfer the remaining bytes. The subsequent call will either
-/// transfer further bytes or may result in an error (e.g., if the disk is now full).
-///
-/// If `fd` is opened in non blocking mode, the function will
-/// return error.WouldBlock when EAGAIN is received.
-///
-/// The following systems do not have this syscall, and will return partial writes if more than one
-/// vector is provided:
-/// * Darwin
-/// * Windows
-///
-/// If `iov.len` is larger than `IOV_MAX`, a partial write will occur.
-pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usize {
- const have_pwrite_but_not_pwritev = switch (native_os) {
- .windows, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .haiku => true,
- else => false,
- };
-
- if (have_pwrite_but_not_pwritev) {
- // We could loop here; but proper usage of `pwritev` must handle partial writes anyway.
- // So we simply write the first vector only.
- if (iov.len == 0) return 0;
- const first = iov[0];
- return pwrite(fd, first.base[0..first.len], offset);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var nwritten: usize = undefined;
- switch (wasi.fd_pwrite(fd, iov.ptr, iov.len, offset, &nwritten)) {
- .SUCCESS => return nwritten,
- .INTR => unreachable,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .AGAIN => unreachable,
- .BADF => return error.NotOpenForWriting, // Can be a race condition.
- .DESTADDRREQ => unreachable, // `connect` was never called.
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- const pwritev_sym = if (lfs64_abi) system.pwritev64 else system.pwritev;
- while (true) {
- const rc = pwritev_sym(fd, iov.ptr, @min(iov.len, IOV_MAX), @bitCast(offset));
- switch (errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .INTR => continue,
- .INVAL => return error.InvalidArgument,
- .FAULT => unreachable,
- .SRCH => return error.ProcessNotFound,
- .AGAIN => return error.WouldBlock,
- .BADF => return error.NotOpenForWriting, // Can be a race condition.
- .DESTADDRREQ => unreachable, // `connect` was never called.
- .DQUOT => return error.DiskQuota,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .PIPE => return error.BrokenPipe,
- .NXIO => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .BUSY => return error.DeviceBusy,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
pub const OpenError = std.Io.File.OpenError || error{WouldBlock};
/// Open and possibly create a file. Keeps trying if it gets interrupted.
@@ -1606,7 +662,8 @@ pub fn openZ(file_path: [*:0]const u8, flags: O, perm: mode_t) OpenError!fd_t {
.NFILE => return error.SystemFdQuotaExceeded,
.NODEV => return error.NoDevice,
.NOENT => return error.FileNotFound,
- .SRCH => return error.ProcessNotFound,
+ // Can happen on Linux when opening procfs files.
+ .SRCH => return error.FileNotFound,
.NOMEM => return error.SystemResources,
.NOSPC => return error.NoSpaceLeft,
.NOTDIR => return error.NotDir,
@@ -1668,14 +725,14 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: O, mode: mode_t) O
.NFILE => return error.SystemFdQuotaExceeded,
.NODEV => return error.NoDevice,
.NOENT => return error.FileNotFound,
- .SRCH => return error.ProcessNotFound,
+ .SRCH => return error.FileNotFound,
.NOMEM => return error.SystemResources,
.NOSPC => return error.NoSpaceLeft,
.NOTDIR => return error.NotDir,
.PERM => return error.PermissionDenied,
.EXIST => return error.PathAlreadyExists,
.BUSY => return error.DeviceBusy,
- .OPNOTSUPP => return error.FileLocksNotSupported,
+ .OPNOTSUPP => return error.FileLocksUnsupported,
.AGAIN => return error.WouldBlock,
.TXTBSY => return error.FileBusy,
.NXIO => return error.NoDevice,
@@ -1926,687 +983,6 @@ pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 {
}
}
-pub const SymLinkError = error{
- /// In WASI, this error may occur when the file descriptor does
- /// not hold the required rights to create a new symbolic link relative to it.
- AccessDenied,
- PermissionDenied,
- DiskQuota,
- PathAlreadyExists,
- FileSystem,
- SymLinkLoop,
- FileNotFound,
- SystemResources,
- NoSpaceLeft,
- ReadOnlyFileSystem,
- NotDir,
- NameTooLong,
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- BadPathName,
-} || UnexpectedError;
-
-/// Creates a symbolic link named `sym_link_path` which contains the string `target_path`.
-/// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent
-/// one; the latter case is known as a dangling link.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-/// If `sym_link_path` exists, it will not be overwritten.
-/// See also `symlinkZ.
-pub fn symlink(target_path: []const u8, sym_link_path: []const u8) SymLinkError!void {
- if (native_os == .windows) {
- @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead");
- } else if (native_os == .wasi and !builtin.link_libc) {
- return symlinkat(target_path, AT.FDCWD, sym_link_path);
- }
- const target_path_c = try toPosixPath(target_path);
- const sym_link_path_c = try toPosixPath(sym_link_path);
- return symlinkZ(&target_path_c, &sym_link_path_c);
-}
-
-/// This is the same as `symlink` except the parameters are null-terminated pointers.
-/// See also `symlink`.
-pub fn symlinkZ(target_path: [*:0]const u8, sym_link_path: [*:0]const u8) SymLinkError!void {
- if (native_os == .windows) {
- @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead");
- } else if (native_os == .wasi and !builtin.link_libc) {
- return symlinkatZ(target_path, fs.cwd().fd, sym_link_path);
- }
- switch (errno(system.symlink(target_path, sym_link_path))) {
- .SUCCESS => return,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .DQUOT => return error.DiskQuota,
- .EXIST => return error.PathAlreadyExists,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .ROFS => return error.ReadOnlyFileSystem,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Similar to `symlink`, however, creates a symbolic link named `sym_link_path` which contains the string
-/// `target_path` **relative** to `newdirfd` directory handle.
-/// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent
-/// one; the latter case is known as a dangling link.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-/// If `sym_link_path` exists, it will not be overwritten.
-/// See also `symlinkatWasi`, `symlinkatZ` and `symlinkatW`.
-pub fn symlinkat(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void {
- if (native_os == .windows) {
- @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead");
- } else if (native_os == .wasi and !builtin.link_libc) {
- return symlinkatWasi(target_path, newdirfd, sym_link_path);
- }
- const target_path_c = try toPosixPath(target_path);
- const sym_link_path_c = try toPosixPath(sym_link_path);
- return symlinkatZ(&target_path_c, newdirfd, &sym_link_path_c);
-}
-
-/// WASI-only. The same as `symlinkat` but targeting WASI.
-/// See also `symlinkat`.
-pub fn symlinkatWasi(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void {
- switch (wasi.path_symlink(target_path.ptr, target_path.len, newdirfd, sym_link_path.ptr, sym_link_path.len)) {
- .SUCCESS => {},
- .FAULT => unreachable,
- .INVAL => unreachable,
- .BADF => unreachable,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .DQUOT => return error.DiskQuota,
- .EXIST => return error.PathAlreadyExists,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .ROFS => return error.ReadOnlyFileSystem,
- .NOTCAPABLE => return error.AccessDenied,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// The same as `symlinkat` except the parameters are null-terminated pointers.
-/// See also `symlinkat`.
-pub fn symlinkatZ(target_path: [*:0]const u8, newdirfd: fd_t, sym_link_path: [*:0]const u8) SymLinkError!void {
- if (native_os == .windows) {
- @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead");
- } else if (native_os == .wasi and !builtin.link_libc) {
- return symlinkat(mem.sliceTo(target_path, 0), newdirfd, mem.sliceTo(sym_link_path, 0));
- }
- switch (errno(system.symlinkat(target_path, newdirfd, sym_link_path))) {
- .SUCCESS => return,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .DQUOT => return error.DiskQuota,
- .EXIST => return error.PathAlreadyExists,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .ROFS => return error.ReadOnlyFileSystem,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-pub const LinkError = UnexpectedError || error{
- AccessDenied,
- PermissionDenied,
- DiskQuota,
- PathAlreadyExists,
- FileSystem,
- SymLinkLoop,
- LinkQuotaExceeded,
- NameTooLong,
- FileNotFound,
- SystemResources,
- NoSpaceLeft,
- ReadOnlyFileSystem,
- NotSameFileSystem,
- BadPathName,
-};
-
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn linkZ(oldpath: [*:0]const u8, newpath: [*:0]const u8) LinkError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- return link(mem.sliceTo(oldpath, 0), mem.sliceTo(newpath, 0));
- }
- switch (errno(system.link(oldpath, newpath))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .DQUOT => return error.DiskQuota,
- .EXIST => return error.PathAlreadyExists,
- .FAULT => unreachable,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .MLINK => return error.LinkQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- .XDEV => return error.NotSameFileSystem,
- .INVAL => unreachable,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn link(oldpath: []const u8, newpath: []const u8) LinkError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- return linkat(AT.FDCWD, oldpath, AT.FDCWD, newpath, 0) catch |err| switch (err) {
- error.NotDir => unreachable, // link() does not support directories
- else => |e| return e,
- };
- }
- const old = try toPosixPath(oldpath);
- const new = try toPosixPath(newpath);
- return try linkZ(&old, &new);
-}
-
-pub const LinkatError = LinkError || error{NotDir};
-
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn linkatZ(
- olddir: fd_t,
- oldpath: [*:0]const u8,
- newdir: fd_t,
- newpath: [*:0]const u8,
- flags: i32,
-) LinkatError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- return linkat(olddir, mem.sliceTo(oldpath, 0), newdir, mem.sliceTo(newpath, 0), flags);
- }
- switch (errno(system.linkat(olddir, oldpath, newdir, newpath, flags))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .DQUOT => return error.DiskQuota,
- .EXIST => return error.PathAlreadyExists,
- .FAULT => unreachable,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .MLINK => return error.LinkQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .NOTDIR => return error.NotDir,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- .XDEV => return error.NotSameFileSystem,
- .INVAL => unreachable,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn linkat(
- olddir: fd_t,
- oldpath: []const u8,
- newdir: fd_t,
- newpath: []const u8,
- flags: i32,
-) LinkatError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- const old: RelativePathWasi = .{ .dir_fd = olddir, .relative_path = oldpath };
- const new: RelativePathWasi = .{ .dir_fd = newdir, .relative_path = newpath };
- const old_flags: wasi.lookupflags_t = .{
- .SYMLINK_FOLLOW = (flags & AT.SYMLINK_FOLLOW) != 0,
- };
- switch (wasi.path_link(
- old.dir_fd,
- old_flags,
- old.relative_path.ptr,
- old.relative_path.len,
- new.dir_fd,
- new.relative_path.ptr,
- new.relative_path.len,
- )) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .DQUOT => return error.DiskQuota,
- .EXIST => return error.PathAlreadyExists,
- .FAULT => unreachable,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .MLINK => return error.LinkQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .NOTDIR => return error.NotDir,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- .XDEV => return error.NotSameFileSystem,
- .INVAL => unreachable,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
- }
- const old = try toPosixPath(oldpath);
- const new = try toPosixPath(newpath);
- return try linkatZ(olddir, &old, newdir, &new, flags);
-}
-
-pub const UnlinkError = error{
- FileNotFound,
-
- /// In WASI, this error may occur when the file descriptor does
- /// not hold the required rights to unlink a resource by path relative to it.
- AccessDenied,
- PermissionDenied,
- FileBusy,
- FileSystem,
- IsDir,
- SymLinkLoop,
- NameTooLong,
- NotDir,
- SystemResources,
- ReadOnlyFileSystem,
-
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- /// Windows: file paths cannot contain these characters:
- /// '/', '*', '?', '"', '<', '>', '|'
- BadPathName,
-
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
-} || UnexpectedError;
-
-/// Delete a name and possibly the file it refers to.
-/// On Windows, `file_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `file_path` should be encoded as valid UTF-8.
-/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
-/// See also `unlinkZ`.
-pub fn unlink(file_path: []const u8) UnlinkError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- return unlinkat(AT.FDCWD, file_path, 0) catch |err| switch (err) {
- error.DirNotEmpty => unreachable, // only occurs when targeting directories
- else => |e| return e,
- };
- } else if (native_os == .windows) {
- const file_path_w = try windows.sliceToPrefixedFileW(null, file_path);
- return unlinkW(file_path_w.span());
- } else {
- const file_path_c = try toPosixPath(file_path);
- return unlinkZ(&file_path_c);
- }
-}
-
-/// Same as `unlink` except the parameter is null terminated.
-pub fn unlinkZ(file_path: [*:0]const u8) UnlinkError!void {
- if (native_os == .windows) {
- const file_path_w = try windows.cStrToPrefixedFileW(null, file_path);
- return unlinkW(file_path_w.span());
- } else if (native_os == .wasi and !builtin.link_libc) {
- return unlink(mem.sliceTo(file_path, 0));
- }
- switch (errno(system.unlink(file_path))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BUSY => return error.FileBusy,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .IO => return error.FileSystem,
- .ISDIR => return error.IsDir,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .ROFS => return error.ReadOnlyFileSystem,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Windows-only. Same as `unlink` except the parameter is null-terminated, WTF16 LE encoded.
-pub fn unlinkW(file_path_w: []const u16) UnlinkError!void {
- windows.DeleteFile(file_path_w, .{ .dir = fs.cwd().fd }) catch |err| switch (err) {
- error.DirNotEmpty => unreachable, // we're not passing .remove_dir = true
- else => |e| return e,
- };
-}
-
-pub const UnlinkatError = UnlinkError || error{
- /// When passing `AT.REMOVEDIR`, this error occurs when the named directory is not empty.
- DirNotEmpty,
-};
-
-/// Delete a file name and possibly the file it refers to, based on an open directory handle.
-/// On Windows, `file_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `file_path` should be encoded as valid UTF-8.
-/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
-/// Asserts that the path parameter has no null bytes.
-pub fn unlinkat(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void {
- if (native_os == .windows) {
- const file_path_w = try windows.sliceToPrefixedFileW(dirfd, file_path);
- return unlinkatW(dirfd, file_path_w.span(), flags);
- } else if (native_os == .wasi and !builtin.link_libc) {
- return unlinkatWasi(dirfd, file_path, flags);
- } else {
- const file_path_c = try toPosixPath(file_path);
- return unlinkatZ(dirfd, &file_path_c, flags);
- }
-}
-
-/// WASI-only. Same as `unlinkat` but targeting WASI.
-/// See also `unlinkat`.
-pub fn unlinkatWasi(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void {
- const remove_dir = (flags & AT.REMOVEDIR) != 0;
- const res = if (remove_dir)
- wasi.path_remove_directory(dirfd, file_path.ptr, file_path.len)
- else
- wasi.path_unlink_file(dirfd, file_path.ptr, file_path.len);
- switch (res) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BUSY => return error.FileBusy,
- .FAULT => unreachable,
- .IO => return error.FileSystem,
- .ISDIR => return error.IsDir,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .ROFS => return error.ReadOnlyFileSystem,
- .NOTEMPTY => return error.DirNotEmpty,
- .NOTCAPABLE => return error.AccessDenied,
- .ILSEQ => return error.BadPathName,
-
- .INVAL => unreachable, // invalid flags, or pathname has . as last component
- .BADF => unreachable, // always a race condition
-
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Same as `unlinkat` but `file_path` is a null-terminated string.
-pub fn unlinkatZ(dirfd: fd_t, file_path_c: [*:0]const u8, flags: u32) UnlinkatError!void {
- if (native_os == .windows) {
- const file_path_w = try windows.cStrToPrefixedFileW(dirfd, file_path_c);
- return unlinkatW(dirfd, file_path_w.span(), flags);
- } else if (native_os == .wasi and !builtin.link_libc) {
- return unlinkat(dirfd, mem.sliceTo(file_path_c, 0), flags);
- }
- switch (errno(system.unlinkat(dirfd, file_path_c, flags))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BUSY => return error.FileBusy,
- .FAULT => unreachable,
- .IO => return error.FileSystem,
- .ISDIR => return error.IsDir,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .ROFS => return error.ReadOnlyFileSystem,
- .EXIST => return error.DirNotEmpty,
- .NOTEMPTY => return error.DirNotEmpty,
- .ILSEQ => return error.BadPathName,
-
- .INVAL => unreachable, // invalid flags, or pathname has . as last component
- .BADF => unreachable, // always a race condition
-
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Same as `unlinkat` but `sub_path_w` is WTF16LE, NT prefixed. Windows only.
-pub fn unlinkatW(dirfd: fd_t, sub_path_w: []const u16, flags: u32) UnlinkatError!void {
- const remove_dir = (flags & AT.REMOVEDIR) != 0;
- return windows.DeleteFile(sub_path_w, .{ .dir = dirfd, .remove_dir = remove_dir });
-}
-
-pub const RenameError = error{
- /// In WASI, this error may occur when the file descriptor does
- /// not hold the required rights to rename a resource by path relative to it.
- ///
- /// On Windows, this error may be returned instead of PathAlreadyExists when
- /// renaming a directory over an existing directory.
- AccessDenied,
- PermissionDenied,
- FileBusy,
- DiskQuota,
- IsDir,
- SymLinkLoop,
- LinkQuotaExceeded,
- NameTooLong,
- FileNotFound,
- NotDir,
- SystemResources,
- NoSpaceLeft,
- PathAlreadyExists,
- ReadOnlyFileSystem,
- RenameAcrossMountPoints,
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- BadPathName,
- NoDevice,
- SharingViolation,
- PipeBusy,
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
- /// On Windows, antivirus software is enabled by default. It can be
- /// disabled, but Windows Update sometimes ignores the user's preference
- /// and re-enables it. When enabled, antivirus software on Windows
- /// intercepts file system operations and makes them significantly slower
- /// in addition to possibly failing with this error code.
- AntivirusInterference,
-} || UnexpectedError;
-
-/// Change the name or location of a file.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn rename(old_path: []const u8, new_path: []const u8) RenameError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- return renameat(AT.FDCWD, old_path, AT.FDCWD, new_path);
- } else if (native_os == .windows) {
- const old_path_w = try windows.sliceToPrefixedFileW(null, old_path);
- const new_path_w = try windows.sliceToPrefixedFileW(null, new_path);
- return renameW(old_path_w.span().ptr, new_path_w.span().ptr);
- } else {
- const old_path_c = try toPosixPath(old_path);
- const new_path_c = try toPosixPath(new_path);
- return renameZ(&old_path_c, &new_path_c);
- }
-}
-
-/// Same as `rename` except the parameters are null-terminated.
-pub fn renameZ(old_path: [*:0]const u8, new_path: [*:0]const u8) RenameError!void {
- if (native_os == .windows) {
- const old_path_w = try windows.cStrToPrefixedFileW(null, old_path);
- const new_path_w = try windows.cStrToPrefixedFileW(null, new_path);
- return renameW(old_path_w.span().ptr, new_path_w.span().ptr);
- } else if (native_os == .wasi and !builtin.link_libc) {
- return rename(mem.sliceTo(old_path, 0), mem.sliceTo(new_path, 0));
- }
- switch (errno(system.rename(old_path, new_path))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BUSY => return error.FileBusy,
- .DQUOT => return error.DiskQuota,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ISDIR => return error.IsDir,
- .LOOP => return error.SymLinkLoop,
- .MLINK => return error.LinkQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .EXIST => return error.PathAlreadyExists,
- .NOTEMPTY => return error.PathAlreadyExists,
- .ROFS => return error.ReadOnlyFileSystem,
- .XDEV => return error.RenameAcrossMountPoints,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Same as `rename` except the parameters are null-terminated and WTF16LE encoded.
-/// Assumes target is Windows.
-pub fn renameW(old_path: [*:0]const u16, new_path: [*:0]const u16) RenameError!void {
- const cwd_handle = std.fs.cwd().fd;
- return windows.RenameFile(cwd_handle, mem.span(old_path), cwd_handle, mem.span(new_path), true);
-}
-
-/// Change the name or location of a file based on an open directory handle.
-/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, both paths should be encoded as valid UTF-8.
-/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
-pub fn renameat(
- old_dir_fd: fd_t,
- old_path: []const u8,
- new_dir_fd: fd_t,
- new_path: []const u8,
-) RenameError!void {
- if (native_os == .windows) {
- const old_path_w = try windows.sliceToPrefixedFileW(old_dir_fd, old_path);
- const new_path_w = try windows.sliceToPrefixedFileW(new_dir_fd, new_path);
- return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE);
- } else if (native_os == .wasi and !builtin.link_libc) {
- const old: RelativePathWasi = .{ .dir_fd = old_dir_fd, .relative_path = old_path };
- const new: RelativePathWasi = .{ .dir_fd = new_dir_fd, .relative_path = new_path };
- return renameatWasi(old, new);
- } else {
- const old_path_c = try toPosixPath(old_path);
- const new_path_c = try toPosixPath(new_path);
- return renameatZ(old_dir_fd, &old_path_c, new_dir_fd, &new_path_c);
- }
-}
-
-/// WASI-only. Same as `renameat` expect targeting WASI.
-/// See also `renameat`.
-fn renameatWasi(old: RelativePathWasi, new: RelativePathWasi) RenameError!void {
- switch (wasi.path_rename(old.dir_fd, old.relative_path.ptr, old.relative_path.len, new.dir_fd, new.relative_path.ptr, new.relative_path.len)) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BUSY => return error.FileBusy,
- .DQUOT => return error.DiskQuota,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ISDIR => return error.IsDir,
- .LOOP => return error.SymLinkLoop,
- .MLINK => return error.LinkQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .EXIST => return error.PathAlreadyExists,
- .NOTEMPTY => return error.PathAlreadyExists,
- .ROFS => return error.ReadOnlyFileSystem,
- .XDEV => return error.RenameAcrossMountPoints,
- .NOTCAPABLE => return error.AccessDenied,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// An fd-relative file path
-///
-/// This is currently only used for WASI-specific functionality, but the concept
-/// is the same as the dirfd/pathname pairs in the `*at(...)` POSIX functions.
-const RelativePathWasi = struct {
- /// Handle to directory
- dir_fd: fd_t,
- /// Path to resource within `dir_fd`.
- relative_path: []const u8,
-};
-
-/// Same as `renameat` except the parameters are null-terminated.
-pub fn renameatZ(
- old_dir_fd: fd_t,
- old_path: [*:0]const u8,
- new_dir_fd: fd_t,
- new_path: [*:0]const u8,
-) RenameError!void {
- if (native_os == .windows) {
- const old_path_w = try windows.cStrToPrefixedFileW(old_dir_fd, old_path);
- const new_path_w = try windows.cStrToPrefixedFileW(new_dir_fd, new_path);
- return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE);
- } else if (native_os == .wasi and !builtin.link_libc) {
- return renameat(old_dir_fd, mem.sliceTo(old_path, 0), new_dir_fd, mem.sliceTo(new_path, 0));
- }
-
- switch (errno(system.renameat(old_dir_fd, old_path, new_dir_fd, new_path))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BUSY => return error.FileBusy,
- .DQUOT => return error.DiskQuota,
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ISDIR => return error.IsDir,
- .LOOP => return error.SymLinkLoop,
- .MLINK => return error.LinkQuotaExceeded,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOTDIR => return error.NotDir,
- .NOMEM => return error.SystemResources,
- .NOSPC => return error.NoSpaceLeft,
- .EXIST => return error.PathAlreadyExists,
- .NOTEMPTY => return error.PathAlreadyExists,
- .ROFS => return error.ReadOnlyFileSystem,
- .XDEV => return error.RenameAcrossMountPoints,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Same as `renameat` but Windows-only and the path parameters are
-/// [WTF-16](https://wtf-8.codeberg.page/#potentially-ill-formed-utf-16) encoded.
-pub fn renameatW(
- old_dir_fd: fd_t,
- old_path_w: []const u16,
- new_dir_fd: fd_t,
- new_path_w: []const u16,
- ReplaceIfExists: windows.BOOLEAN,
-) RenameError!void {
- return windows.RenameFile(old_dir_fd, old_path_w, new_dir_fd, new_path_w, ReplaceIfExists != 0);
-}
-
/// On Windows, `sub_dir_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_dir_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_dir_path` is an opaque sequence of bytes with no particular encoding.
@@ -2651,7 +1027,7 @@ pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: mode_t) MakeDir
}
}
-pub const MakeDirError = std.Io.Dir.MakeError;
+pub const MakeDirError = std.Io.Dir.CreateDirError;
/// Create a directory.
/// `mode` is ignored on Windows and WASI.
@@ -2705,7 +1081,7 @@ pub fn mkdirZ(dir_path: [*:0]const u8, mode: mode_t) MakeDirError!void {
pub fn mkdirW(dir_path_w: []const u16, mode: mode_t) MakeDirError!void {
_ = mode;
const sub_dir_handle = windows.OpenFile(dir_path_w, .{
- .dir = fs.cwd().fd,
+ .dir = Io.Dir.cwd().handle,
.access_mask = .{
.STANDARD = .{ .SYNCHRONIZE = true },
.GENERIC = .{ .READ = true },
@@ -2723,84 +1099,6 @@ pub fn mkdirW(dir_path_w: []const u16, mode: mode_t) MakeDirError!void {
windows.CloseHandle(sub_dir_handle);
}
-pub const DeleteDirError = error{
- AccessDenied,
- PermissionDenied,
- FileBusy,
- SymLinkLoop,
- NameTooLong,
- FileNotFound,
- SystemResources,
- NotDir,
- DirNotEmpty,
- ReadOnlyFileSystem,
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- BadPathName,
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
-} || UnexpectedError;
-
-/// Deletes an empty directory.
-/// On Windows, `dir_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `dir_path` should be encoded as valid UTF-8.
-/// On other platforms, `dir_path` is an opaque sequence of bytes with no particular encoding.
-pub fn rmdir(dir_path: []const u8) DeleteDirError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- return unlinkat(AT.FDCWD, dir_path, AT.REMOVEDIR) catch |err| switch (err) {
- error.FileSystem => unreachable, // only occurs when targeting files
- error.IsDir => unreachable, // only occurs when targeting files
- else => |e| return e,
- };
- } else if (native_os == .windows) {
- const dir_path_w = try windows.sliceToPrefixedFileW(null, dir_path);
- return rmdirW(dir_path_w.span());
- } else {
- const dir_path_c = try toPosixPath(dir_path);
- return rmdirZ(&dir_path_c);
- }
-}
-
-/// Same as `rmdir` except the parameter is null-terminated.
-/// On Windows, `dir_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `dir_path` should be encoded as valid UTF-8.
-/// On other platforms, `dir_path` is an opaque sequence of bytes with no particular encoding.
-pub fn rmdirZ(dir_path: [*:0]const u8) DeleteDirError!void {
- if (native_os == .windows) {
- const dir_path_w = try windows.cStrToPrefixedFileW(null, dir_path);
- return rmdirW(dir_path_w.span());
- } else if (native_os == .wasi and !builtin.link_libc) {
- return rmdir(mem.sliceTo(dir_path, 0));
- }
- switch (errno(system.rmdir(dir_path))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BUSY => return error.FileBusy,
- .FAULT => unreachable,
- .INVAL => return error.BadPathName,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.NotDir,
- .EXIST => return error.DirNotEmpty,
- .NOTEMPTY => return error.DirNotEmpty,
- .ROFS => return error.ReadOnlyFileSystem,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Windows-only. Same as `rmdir` except the parameter is WTF-16 LE encoded.
-pub fn rmdirW(dir_path_w: []const u16) DeleteDirError!void {
- return windows.DeleteFile(dir_path_w, .{ .dir = fs.cwd().fd, .remove_dir = true }) catch |err| switch (err) {
- error.IsDir => unreachable,
- else => |e| return e,
- };
-}
-
pub const ChangeCurDirError = error{
AccessDenied,
FileSystem,
@@ -2821,11 +1119,9 @@ pub const ChangeCurDirError = error{
/// On other platforms, `dir_path` is an opaque sequence of bytes with no particular encoding.
pub fn chdir(dir_path: []const u8) ChangeCurDirError!void {
if (native_os == .wasi and !builtin.link_libc) {
- @compileError("WASI does not support os.chdir");
+ @compileError("unsupported OS");
} else if (native_os == .windows) {
- var wtf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined;
- const len = try windows.wtf8ToWtf16Le(&wtf16_dir_path, dir_path);
- return chdirW(wtf16_dir_path[0..len]);
+ @compileError("unsupported OS");
} else {
const dir_path_c = try toPosixPath(dir_path);
return chdirZ(&dir_path_c);
@@ -2838,12 +1134,9 @@ pub fn chdir(dir_path: []const u8) ChangeCurDirError!void {
/// On other platforms, `dir_path` is an opaque sequence of bytes with no particular encoding.
pub fn chdirZ(dir_path: [*:0]const u8) ChangeCurDirError!void {
if (native_os == .windows) {
- const dir_path_span = mem.span(dir_path);
- var wtf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined;
- const len = try windows.wtf8ToWtf16Le(&wtf16_dir_path, dir_path_span);
- return chdirW(wtf16_dir_path[0..len]);
+ @compileError("unsupported OS");
} else if (native_os == .wasi and !builtin.link_libc) {
- return chdir(mem.span(dir_path));
+ @compileError("unsupported OS");
}
switch (errno(system.chdir(dir_path))) {
.SUCCESS => return,
@@ -2860,14 +1153,6 @@ pub fn chdirZ(dir_path: [*:0]const u8) ChangeCurDirError!void {
}
}
-/// Windows-only. Same as `chdir` except the parameter is WTF16 LE encoded.
-pub fn chdirW(dir_path: []const u16) ChangeCurDirError!void {
- windows.SetCurrentDirectory(dir_path) catch |err| switch (err) {
- error.NoDevice => return error.FileSystem,
- else => |e| return e,
- };
-}
-
pub const FchdirError = error{
AccessDenied,
NotDir,
@@ -2889,194 +1174,6 @@ pub fn fchdir(dirfd: fd_t) FchdirError!void {
}
}
-pub const ReadLinkError = error{
- /// In WASI, this error may occur when the file descriptor does
- /// not hold the required rights to read value of a symbolic link relative to it.
- AccessDenied,
- PermissionDenied,
- FileSystem,
- SymLinkLoop,
- NameTooLong,
- FileNotFound,
- SystemResources,
- NotLink,
- NotDir,
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- BadPathName,
- /// Windows-only. This error may occur if the opened reparse point is
- /// of unsupported type.
- UnsupportedReparsePointType,
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
- /// On Windows, antivirus software is enabled by default. It can be
- /// disabled, but Windows Update sometimes ignores the user's preference
- /// and re-enables it. When enabled, antivirus software on Windows
- /// intercepts file system operations and makes them significantly slower
- /// in addition to possibly failing with this error code.
- AntivirusInterference,
-} || UnexpectedError;
-
-/// Read value of a symbolic link.
-/// On Windows, `file_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `file_path` should be encoded as valid UTF-8.
-/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
-/// The return value is a slice of `out_buffer` from index 0.
-/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, the result is encoded as UTF-8.
-/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-pub fn readlink(file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 {
- if (native_os == .wasi and !builtin.link_libc) {
- return readlinkat(AT.FDCWD, file_path, out_buffer);
- } else if (native_os == .windows) {
- var file_path_w = try windows.sliceToPrefixedFileW(null, file_path);
- const result_w = try readlinkW(file_path_w.span(), &file_path_w.data);
-
- const len = std.unicode.calcWtf8Len(result_w);
- if (len > out_buffer.len) return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, result_w);
- return out_buffer[0..end_index];
- } else {
- const file_path_c = try toPosixPath(file_path);
- return readlinkZ(&file_path_c, out_buffer);
- }
-}
-
-/// Windows-only. Same as `readlink` except `file_path` is WTF-16 LE encoded, NT-prefixed.
-/// The result is encoded as WTF-16 LE.
-///
-/// `file_path` will never be accessed after `out_buffer` has been written to, so it
-/// is safe to reuse a single buffer for both.
-///
-/// See also `readlinkZ`.
-pub fn readlinkW(file_path: []const u16, out_buffer: []u16) ReadLinkError![]u16 {
- return windows.ReadLink(fs.cwd().fd, file_path, out_buffer);
-}
-
-/// Same as `readlink` except `file_path` is null-terminated.
-pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 {
- if (native_os == .windows) {
- var file_path_w = try windows.cStrToPrefixedFileW(null, file_path);
- const result_w = try readlinkW(file_path_w.span(), &file_path_w.data);
-
- const len = std.unicode.calcWtf8Len(result_w);
- if (len > out_buffer.len) return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, result_w);
- return out_buffer[0..end_index];
- } else if (native_os == .wasi and !builtin.link_libc) {
- return readlink(mem.sliceTo(file_path, 0), out_buffer);
- }
- const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len);
- switch (errno(rc)) {
- .SUCCESS => return out_buffer[0..@bitCast(rc)],
- .ACCES => return error.AccessDenied,
- .FAULT => unreachable,
- .INVAL => return error.NotLink,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.NotDir,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Similar to `readlink` except reads value of a symbolink link **relative** to `dirfd` directory handle.
-/// On Windows, `file_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, `file_path` should be encoded as valid UTF-8.
-/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
-/// The return value is a slice of `out_buffer` from index 0.
-/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// On WASI, the result is encoded as UTF-8.
-/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-/// See also `readlinkatWasi`, `realinkatZ` and `realinkatW`.
-pub fn readlinkat(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 {
- if (native_os == .wasi and !builtin.link_libc) {
- return readlinkatWasi(dirfd, file_path, out_buffer);
- }
- if (native_os == .windows) {
- var file_path_w = try windows.sliceToPrefixedFileW(dirfd, file_path);
- const result_w = try readlinkatW(dirfd, file_path_w.span(), &file_path_w.data);
-
- const len = std.unicode.calcWtf8Len(result_w);
- if (len > out_buffer.len) return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, result_w);
- return out_buffer[0..end_index];
- }
- const file_path_c = try toPosixPath(file_path);
- return readlinkatZ(dirfd, &file_path_c, out_buffer);
-}
-
-/// WASI-only. Same as `readlinkat` but targets WASI.
-/// See also `readlinkat`.
-pub fn readlinkatWasi(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 {
- var bufused: usize = undefined;
- switch (wasi.path_readlink(dirfd, file_path.ptr, file_path.len, out_buffer.ptr, out_buffer.len, &bufused)) {
- .SUCCESS => return out_buffer[0..bufused],
- .ACCES => return error.AccessDenied,
- .FAULT => unreachable,
- .INVAL => return error.NotLink,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.NotDir,
- .NOTCAPABLE => return error.AccessDenied,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Windows-only. Same as `readlinkat` except `file_path` WTF16 LE encoded, NT-prefixed.
-/// The result is encoded as WTF-16 LE.
-///
-/// `file_path` will never be accessed after `out_buffer` has been written to, so it
-/// is safe to reuse a single buffer for both.
-///
-/// See also `readlinkat`.
-pub fn readlinkatW(dirfd: fd_t, file_path: []const u16, out_buffer: []u16) ReadLinkError![]u16 {
- return windows.ReadLink(dirfd, file_path, out_buffer);
-}
-
-/// Same as `readlinkat` except `file_path` is null-terminated.
-/// See also `readlinkat`.
-pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 {
- if (native_os == .windows) {
- var file_path_w = try windows.cStrToPrefixedFileW(dirfd, file_path);
- const result_w = try readlinkatW(dirfd, file_path_w.span(), &file_path_w.data);
-
- const len = std.unicode.calcWtf8Len(result_w);
- if (len > out_buffer.len) return error.NameTooLong;
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, result_w);
- return out_buffer[0..end_index];
- } else if (native_os == .wasi and !builtin.link_libc) {
- return readlinkat(dirfd, mem.sliceTo(file_path, 0), out_buffer);
- }
- const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len);
- switch (errno(rc)) {
- .SUCCESS => return out_buffer[0..@bitCast(rc)],
- .ACCES => return error.AccessDenied,
- .FAULT => unreachable,
- .INVAL => return error.NotLink,
- .IO => return error.FileSystem,
- .LOOP => return error.SymLinkLoop,
- .NAMETOOLONG => return error.NameTooLong,
- .NOENT => return error.FileNotFound,
- .NOMEM => return error.SystemResources,
- .NOTDIR => return error.NotDir,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
pub const SetEidError = error{
InvalidUserId,
PermissionDenied,
@@ -3176,47 +1273,6 @@ pub fn getegid() gid_t {
return system.getegid();
}
-/// Test whether a file descriptor refers to a terminal.
-pub fn isatty(handle: fd_t) bool {
- if (native_os == .windows) {
- if (fs.File.isCygwinPty(.{ .handle = handle }))
- return true;
-
- var out: windows.DWORD = undefined;
- return windows.kernel32.GetConsoleMode(handle, &out) != 0;
- }
- if (builtin.link_libc) {
- return system.isatty(handle) != 0;
- }
- if (native_os == .wasi) {
- var statbuf: wasi.fdstat_t = undefined;
- const err = wasi.fd_fdstat_get(handle, &statbuf);
- if (err != .SUCCESS)
- return false;
-
- // A tty is a character device that we can't seek or tell on.
- if (statbuf.fs_filetype != .CHARACTER_DEVICE)
- return false;
- if (statbuf.fs_rights_base.FD_SEEK or statbuf.fs_rights_base.FD_TELL)
- return false;
-
- return true;
- }
- if (native_os == .linux) {
- while (true) {
- var wsz: winsize = undefined;
- const fd: usize = @bitCast(@as(isize, handle));
- const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz));
- switch (linux.errno(rc)) {
- .SUCCESS => return true,
- .INTR => continue,
- else => return false,
- }
- }
- }
- return system.isatty(handle) != 0;
-}
-
pub const SocketError = error{
/// Permission to create a socket of the specified type and/or
/// pro‐tocol is denied.
@@ -3406,7 +1462,7 @@ pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!voi
pub const ListenError = error{
FileDescriptorNotASocket,
- OperationNotSupported,
+ OperationUnsupported,
} || std.Io.net.IpAddress.ListenError || std.Io.net.UnixAddress.ListenError;
pub fn listen(sock: socket_t, backlog: u31) ListenError!void {
@@ -3419,7 +1475,7 @@ pub fn listen(sock: socket_t, backlog: u31) ListenError!void {
.ADDRINUSE => return error.AddressInUse,
.BADF => unreachable,
.NOTSOCK => return error.FileDescriptorNotASocket,
- .OPNOTSUPP => return error.OperationNotSupported,
+ .OPNOTSUPP => return error.OperationUnsupported,
else => |err| return unexpectedErrno(err),
}
}
@@ -4081,7 +2137,7 @@ pub const FanotifyMarkError = error{
SystemResources,
UserMarkQuotaExceeded,
NotDir,
- OperationNotSupported,
+ OperationUnsupported,
PermissionDenied,
NotSameFileSystem,
NameTooLong,
@@ -4121,7 +2177,7 @@ pub fn fanotify_markZ(
.NOMEM => return error.SystemResources,
.NOSPC => return error.UserMarkQuotaExceeded,
.NOTDIR => return error.NotDir,
- .OPNOTSUPP => return error.OperationNotSupported,
+ .OPNOTSUPP => return error.OperationUnsupported,
.PERM => return error.PermissionDenied,
.XDEV => return error.NotSameFileSystem,
else => |err| return unexpectedErrno(err),
@@ -4381,67 +2437,6 @@ pub fn msync(memory: []align(page_size_min) u8, flags: i32) MSyncError!void {
}
}
-pub const AccessError = error{
- AccessDenied,
- PermissionDenied,
- FileNotFound,
- NameTooLong,
- InputOutput,
- SystemResources,
- FileBusy,
- SymLinkLoop,
- ReadOnlyFileSystem,
- /// WASI: file paths must be valid UTF-8.
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- BadPathName,
- Canceled,
-} || UnexpectedError;
-
-/// check user's permissions for a file
-///
-/// * On Windows, asserts `path` is valid [WTF-8](https://wtf-8.codeberg.page/).
-/// * On WASI, invalid UTF-8 passed to `path` causes `error.BadPathName`.
-/// * On other platforms, `path` is an opaque sequence of bytes with no particular encoding.
-///
-/// On Windows, `mode` is ignored. This is a POSIX API that is only partially supported by
-/// Windows. See `fs` for the cross-platform file system API.
-pub fn access(path: []const u8, mode: u32) AccessError!void {
- if (native_os == .windows) {
- @compileError("use std.Io instead");
- } else if (native_os == .wasi and !builtin.link_libc) {
- @compileError("wasi doesn't support absolute paths");
- }
- const path_c = try toPosixPath(path);
- return accessZ(&path_c, mode);
-}
-
-/// Same as `access` except `path` is null-terminated.
-pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void {
- if (native_os == .windows) {
- @compileError("use std.Io instead");
- } else if (native_os == .wasi and !builtin.link_libc) {
- return access(mem.sliceTo(path, 0), mode);
- }
- switch (errno(system.access(path, mode))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .ROFS => return error.ReadOnlyFileSystem,
- .LOOP => return error.SymLinkLoop,
- .TXTBSY => return error.FileBusy,
- .NOTDIR => return error.FileNotFound,
- .NOENT => return error.FileNotFound,
- .NAMETOOLONG => return error.NameTooLong,
- .INVAL => unreachable,
- .FAULT => unreachable,
- .IO => return error.InputOutput,
- .NOMEM => return error.SystemResources,
- .ILSEQ => return error.BadPathName,
- else => |err| return unexpectedErrno(err),
- }
-}
-
pub const PipeError = error{
SystemFdQuotaExceeded,
ProcessFdQuotaExceeded,
@@ -4586,177 +2581,6 @@ pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) void {
}
}
-pub const SeekError = std.Io.File.SeekError;
-
-pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
- if (native_os == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
- var result: u64 = undefined;
- switch (errno(system.llseek(fd, offset, &result, SEEK.SET))) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
- }
- if (native_os == .windows) {
- return windows.SetFilePointerEx_BEGIN(fd, offset);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var new_offset: wasi.filesize_t = undefined;
- switch (wasi.fd_seek(fd, @bitCast(offset), .SET, &new_offset)) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
- switch (errno(lseek_sym(fd, @bitCast(offset), SEEK.SET))) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Repositions read/write file offset relative to the current offset.
-pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
- if (native_os == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
- var result: u64 = undefined;
- switch (errno(system.llseek(fd, @bitCast(offset), &result, SEEK.CUR))) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
- }
- if (native_os == .windows) {
- return windows.SetFilePointerEx_CURRENT(fd, offset);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var new_offset: wasi.filesize_t = undefined;
- switch (wasi.fd_seek(fd, offset, .CUR, &new_offset)) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
- const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
- switch (errno(lseek_sym(fd, @bitCast(offset), SEEK.CUR))) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Repositions read/write file offset relative to the end.
-pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void {
- if (native_os == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
- var result: u64 = undefined;
- switch (errno(system.llseek(fd, @bitCast(offset), &result, SEEK.END))) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
- }
- if (native_os == .windows) {
- return windows.SetFilePointerEx_END(fd, offset);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var new_offset: wasi.filesize_t = undefined;
- switch (wasi.fd_seek(fd, offset, .END, &new_offset)) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
- const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
- switch (errno(lseek_sym(fd, @bitCast(offset), SEEK.END))) {
- .SUCCESS => return,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
-}
-
-/// Returns the read/write file offset relative to the beginning.
-pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 {
- if (native_os == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
- var result: u64 = undefined;
- switch (errno(system.llseek(fd, 0, &result, SEEK.CUR))) {
- .SUCCESS => return result,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
- }
- if (native_os == .windows) {
- return windows.SetFilePointerEx_CURRENT_get(fd);
- }
- if (native_os == .wasi and !builtin.link_libc) {
- var new_offset: wasi.filesize_t = undefined;
- switch (wasi.fd_seek(fd, 0, .CUR, &new_offset)) {
- .SUCCESS => return new_offset,
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- .NOTCAPABLE => return error.AccessDenied,
- else => |err| return unexpectedErrno(err),
- }
- }
- const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
- const rc = lseek_sym(fd, 0, SEEK.CUR);
- switch (errno(rc)) {
- .SUCCESS => return @bitCast(rc),
- .BADF => unreachable, // always a race condition
- .INVAL => return error.Unseekable,
- .OVERFLOW => return error.Unseekable,
- .SPIPE => return error.Unseekable,
- .NXIO => return error.Unseekable,
- else => |err| return unexpectedErrno(err),
- }
-}
-
pub const FcntlError = error{
PermissionDenied,
FileBusy,
@@ -4786,185 +2610,6 @@ pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize {
}
}
-pub const FlockError = error{
- WouldBlock,
-
- /// The kernel ran out of memory for allocating file locks
- SystemResources,
-
- /// The underlying filesystem does not support file locks
- FileLocksNotSupported,
-} || UnexpectedError;
-
-/// Depending on the operating system `flock` may or may not interact with
-/// `fcntl` locks made by other processes.
-pub fn flock(fd: fd_t, operation: i32) FlockError!void {
- while (true) {
- const rc = system.flock(fd, operation);
- switch (errno(rc)) {
- .SUCCESS => return,
- .BADF => unreachable,
- .INTR => continue,
- .INVAL => unreachable, // invalid parameters
- .NOLCK => return error.SystemResources,
- .AGAIN => return error.WouldBlock, // TODO: integrate with async instead of just returning an error
- .OPNOTSUPP => return error.FileLocksNotSupported,
- else => |err| return unexpectedErrno(err),
- }
- }
-}
-
-pub const RealPathError = error{
- FileNotFound,
- AccessDenied,
- PermissionDenied,
- NameTooLong,
- NotSupported,
- NotDir,
- SymLinkLoop,
- InputOutput,
- FileTooBig,
- IsDir,
- ProcessFdQuotaExceeded,
- SystemFdQuotaExceeded,
- NoDevice,
- SystemResources,
- NoSpaceLeft,
- FileSystem,
- DeviceBusy,
- ProcessNotFound,
-
- SharingViolation,
- PipeBusy,
-
- /// Windows: file paths provided by the user must be valid WTF-8.
- /// https://wtf-8.codeberg.page/
- BadPathName,
-
- /// On Windows, `\\server` or `\\server\share` was not found.
- NetworkNotFound,
-
- PathAlreadyExists,
-
- /// On Windows, antivirus software is enabled by default. It can be
- /// disabled, but Windows Update sometimes ignores the user's preference
- /// and re-enables it. When enabled, antivirus software on Windows
- /// intercepts file system operations and makes them significantly slower
- /// in addition to possibly failing with this error code.
- AntivirusInterference,
-
- /// On Windows, the volume does not contain a recognized file system. File
- /// system drivers might not be loaded, or the volume may be corrupt.
- UnrecognizedVolume,
-
- Canceled,
-} || UnexpectedError;
-
-/// Return the canonicalized absolute pathname.
-///
-/// Expands all symbolic links and resolves references to `.`, `..`, and
-/// extra `/` characters in `pathname`.
-///
-/// On Windows, `pathname` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
-///
-/// On other platforms, `pathname` is an opaque sequence of bytes with no particular encoding.
-///
-/// The return value is a slice of `out_buffer`, but not necessarily from the beginning.
-///
-/// See also `realpathZ` and `realpathW`.
-///
-/// * On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-/// * On other platforms, the result is an opaque sequence of bytes with no particular encoding.
-///
-/// Calling this function is usually a bug.
-pub fn realpath(pathname: []const u8, out_buffer: *[max_path_bytes]u8) RealPathError![]u8 {
- if (native_os == .windows) {
- var pathname_w = try windows.sliceToPrefixedFileW(null, pathname);
-
- const wide_slice = try realpathW2(pathname_w.span(), &pathname_w.data);
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
- return out_buffer[0..end_index];
- } else if (native_os == .wasi and !builtin.link_libc) {
- @compileError("WASI does not support os.realpath");
- }
- const pathname_c = try toPosixPath(pathname);
- return realpathZ(&pathname_c, out_buffer);
-}
-
-/// Same as `realpath` except `pathname` is null-terminated.
-///
-/// Calling this function is usually a bug.
-pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[max_path_bytes]u8) RealPathError![]u8 {
- if (native_os == .windows) {
- var pathname_w = try windows.cStrToPrefixedFileW(null, pathname);
-
- const wide_slice = try realpathW2(pathname_w.span(), &pathname_w.data);
-
- const end_index = std.unicode.wtf16LeToWtf8(out_buffer, wide_slice);
- return out_buffer[0..end_index];
- } else if (native_os == .wasi and !builtin.link_libc) {
- return realpath(mem.sliceTo(pathname, 0), out_buffer);
- }
- if (!builtin.link_libc) {
- const flags: O = switch (native_os) {
- .linux => .{
- .NONBLOCK = true,
- .CLOEXEC = true,
- .PATH = true,
- },
- else => .{
- .NONBLOCK = true,
- .CLOEXEC = true,
- },
- };
- const fd = openZ(pathname, flags, 0) catch |err| switch (err) {
- error.FileLocksNotSupported => unreachable,
- error.WouldBlock => unreachable,
- error.FileBusy => unreachable, // not asking for write permissions
- else => |e| return e,
- };
- defer close(fd);
-
- return std.os.getFdPath(fd, out_buffer);
- }
- const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@as(E, @enumFromInt(std.c._errno().*))) {
- .SUCCESS => unreachable,
- .INVAL => unreachable,
- .BADF => unreachable,
- .FAULT => unreachable,
- .ACCES => return error.AccessDenied,
- .NOENT => return error.FileNotFound,
- .OPNOTSUPP => return error.NotSupported,
- .NOTDIR => return error.NotDir,
- .NAMETOOLONG => return error.NameTooLong,
- .LOOP => return error.SymLinkLoop,
- .IO => return error.InputOutput,
- else => |err| return unexpectedErrno(err),
- };
- return mem.sliceTo(result_path, 0);
-}
-
-/// Deprecated: use `realpathW2`.
-///
-/// Same as `realpath` except `pathname` is WTF16LE-encoded.
-///
-/// The result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
-///
-/// Calling this function is usually a bug.
-pub fn realpathW(pathname: []const u16, out_buffer: *[max_path_bytes]u8) RealPathError![]u8 {
- return fs.cwd().realpathW(pathname, out_buffer);
-}
-
-/// Same as `realpath` except `pathname` is WTF16LE-encoded.
-///
-/// The result is encoded as WTF16LE.
-///
-/// Calling this function is usually a bug.
-pub fn realpathW2(pathname: []const u16, out_buffer: *[std.os.windows.PATH_MAX_WIDE]u16) RealPathError![]u16 {
- return fs.cwd().realpathW2(pathname, out_buffer);
-}
-
/// Spurious wakeups are possible and no precision of timing is guaranteed.
pub fn nanosleep(seconds: u64, nanoseconds: u64) void {
var req = timespec{
@@ -5049,16 +2694,16 @@ pub fn dl_iterate_phdr(
// Last return value from the callback function.
while (it.next()) |entry| {
- const phdrs: []elf.ElfN.Phdr = if (entry.l_addr != 0) phdrs: {
- const ehdr: *elf.ElfN.Ehdr = @ptrFromInt(entry.l_addr);
+ const phdrs: []elf.ElfN.Phdr = if (entry.addr != 0) phdrs: {
+ const ehdr: *elf.ElfN.Ehdr = @ptrFromInt(entry.addr);
assert(mem.eql(u8, ehdr.ident[0..4], elf.MAGIC));
- const phdrs: [*]elf.ElfN.Phdr = @ptrFromInt(entry.l_addr + ehdr.phoff);
+ const phdrs: [*]elf.ElfN.Phdr = @ptrFromInt(entry.addr + ehdr.phoff);
break :phdrs phdrs[0..ehdr.phnum];
} else getSelfPhdrs();
var info: dl_phdr_info = .{
- .addr = entry.l_addr,
- .name = entry.l_name,
+ .addr = entry.addr,
+ .name = entry.name,
.phdr = phdrs.ptr,
.phnum = @intCast(phdrs.len),
};
@@ -5229,74 +2874,6 @@ pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*
}
}
-pub const FutimensError = error{
- /// times is NULL, or both nsec values are UTIME_NOW, and either:
- /// * the effective user ID of the caller does not match the owner
- /// of the file, the caller does not have write access to the
- /// file, and the caller is not privileged (Linux: does not have
- /// either the CAP_FOWNER or the CAP_DAC_OVERRIDE capability);
- /// or,
- /// * the file is marked immutable (see chattr(1)).
- AccessDenied,
-
- /// The caller attempted to change one or both timestamps to a value
- /// other than the current time, or to change one of the timestamps
- /// to the current time while leaving the other timestamp unchanged,
- /// (i.e., times is not NULL, neither nsec field is UTIME_NOW,
- /// and neither nsec field is UTIME_OMIT) and either:
- /// * the caller's effective user ID does not match the owner of
- /// file, and the caller is not privileged (Linux: does not have
- /// the CAP_FOWNER capability); or,
- /// * the file is marked append-only or immutable (see chattr(1)).
- PermissionDenied,
-
- ReadOnlyFileSystem,
-} || UnexpectedError;
-
-pub fn futimens(fd: fd_t, times: ?*const [2]timespec) FutimensError!void {
- if (native_os == .wasi and !builtin.link_libc) {
- // TODO WASI encodes `wasi.fstflags` to signify magic values
- // similar to UTIME_NOW and UTIME_OMIT. Currently, we ignore
- // this here, but we should really handle it somehow.
- const error_code = blk: {
- if (times) |times_arr| {
- const atim = times_arr[0].toTimestamp();
- const mtim = times_arr[1].toTimestamp();
- break :blk wasi.fd_filestat_set_times(fd, atim, mtim, .{
- .ATIM = true,
- .MTIM = true,
- });
- }
-
- break :blk wasi.fd_filestat_set_times(fd, 0, 0, .{
- .ATIM_NOW = true,
- .MTIM_NOW = true,
- });
- };
- switch (error_code) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BADF => unreachable, // always a race condition
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ROFS => return error.ReadOnlyFileSystem,
- else => |err| return unexpectedErrno(err),
- }
- }
-
- switch (errno(system.futimens(fd, times))) {
- .SUCCESS => return,
- .ACCES => return error.AccessDenied,
- .PERM => return error.PermissionDenied,
- .BADF => unreachable, // always a race condition
- .FAULT => unreachable,
- .INVAL => unreachable,
- .ROFS => return error.ReadOnlyFileSystem,
- else => |err| return unexpectedErrno(err),
- }
-}
-
pub const GetHostNameError = error{PermissionDenied} || UnexpectedError;
pub fn gethostname(name_buffer: *[HOST_NAME_MAX]u8) GetHostNameError![]u8 {
@@ -5612,98 +3189,6 @@ pub fn send(
};
}
-pub const CopyFileRangeError = error{
- FileTooBig,
- InputOutput,
- /// `fd_in` is not open for reading; or `fd_out` is not open for writing;
- /// or the `APPEND` flag is set for `fd_out`.
- FilesOpenedWithWrongFlags,
- IsDir,
- OutOfMemory,
- NoSpaceLeft,
- Unseekable,
- PermissionDenied,
- SwapFile,
- CorruptedData,
-} || PReadError || PWriteError || UnexpectedError;
-
-/// Transfer data between file descriptors at specified offsets.
-///
-/// Returns the number of bytes written, which can less than requested.
-///
-/// The `copy_file_range` call copies `len` bytes from one file descriptor to another. When possible,
-/// this is done within the operating system kernel, which can provide better performance
-/// characteristics than transferring data from kernel to user space and back, such as with
-/// `pread` and `pwrite` calls.
-///
-/// `fd_in` must be a file descriptor opened for reading, and `fd_out` must be a file descriptor
-/// opened for writing. They may be any kind of file descriptor; however, if `fd_in` is not a regular
-/// file system file, it may cause this function to fall back to calling `pread` and `pwrite`, in which case
-/// atomicity guarantees no longer apply.
-///
-/// If `fd_in` and `fd_out` are the same, source and target ranges must not overlap.
-/// The file descriptor seek positions are ignored and not updated.
-/// When `off_in` is past the end of the input file, it successfully reads 0 bytes.
-///
-/// `flags` has different meanings per operating system; refer to the respective man pages.
-///
-/// These systems support in-kernel data copying:
-/// * Linux (cross-filesystem from version 5.3)
-/// * FreeBSD 13.0
-///
-/// Other systems fall back to calling `pread` / `pwrite`.
-///
-/// Maximum offsets on Linux and FreeBSD are `maxInt(i64)`.
-pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize {
- if (builtin.os.tag == .freebsd or builtin.os.tag == .linux) {
- const use_c = native_os != .linux or
- std.c.versionCheck(if (builtin.abi.isAndroid()) .{ .major = 34, .minor = 0, .patch = 0 } else .{ .major = 2, .minor = 27, .patch = 0 });
- const sys = if (use_c) std.c else linux;
-
- var off_in_copy: i64 = @bitCast(off_in);
- var off_out_copy: i64 = @bitCast(off_out);
-
- while (true) {
- const rc = sys.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
- if (native_os == .freebsd) {
- switch (sys.errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .BADF => return error.FilesOpenedWithWrongFlags,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOSPC => return error.NoSpaceLeft,
- .INVAL => break, // these may not be regular files, try fallback
- .INTEGRITY => return error.CorruptedData,
- .INTR => continue,
- else => |err| return unexpectedErrno(err),
- }
- } else { // assume linux
- switch (sys.errno(rc)) {
- .SUCCESS => return @intCast(rc),
- .BADF => return error.FilesOpenedWithWrongFlags,
- .FBIG => return error.FileTooBig,
- .IO => return error.InputOutput,
- .ISDIR => return error.IsDir,
- .NOSPC => return error.NoSpaceLeft,
- .INVAL => break, // these may not be regular files, try fallback
- .NOMEM => return error.OutOfMemory,
- .OVERFLOW => return error.Unseekable,
- .PERM => return error.PermissionDenied,
- .TXTBSY => return error.SwapFile,
- .XDEV => break, // support for cross-filesystem copy added in Linux 5.3, use fallback
- else => |err| return unexpectedErrno(err),
- }
- }
- }
- }
-
- var buf: [8 * 4096]u8 = undefined;
- const amt_read = try pread(fd_in, buf[0..@min(buf.len, len)], off_in);
- if (amt_read == 0) return 0;
- return pwrite(fd_out, buf[0..amt_read], off_out);
-}
-
pub const PollError = error{
/// The network subsystem has failed.
NetworkDown,
@@ -5916,7 +3401,7 @@ pub const SetSockOptError = error{
/// Setting the socket option requires more elevated permissions.
PermissionDenied,
- OperationNotSupported,
+ OperationUnsupported,
NetworkDown,
FileDescriptorNotASocket,
SocketNotBound,
@@ -5952,7 +3437,7 @@ pub fn setsockopt(fd: socket_t, level: i32, optname: u32, opt: []const u8) SetSo
.NOBUFS => return error.SystemResources,
.PERM => return error.PermissionDenied,
.NODEV => return error.NoDevice,
- .OPNOTSUPP => return error.OperationNotSupported,
+ .OPNOTSUPP => return error.OperationUnsupported,
else => |err| return unexpectedErrno(err),
}
}
@@ -6118,12 +3603,7 @@ pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
}
}
-pub const SyncError = error{
- InputOutput,
- NoSpaceLeft,
- DiskQuota,
- AccessDenied,
-} || UnexpectedError;
+pub const SyncError = std.Io.File.SyncError;
/// Write all pending file contents and metadata modifications to all filesystems.
pub fn sync() void {
@@ -6143,38 +3623,8 @@ pub fn syncfs(fd: fd_t) SyncError!void {
}
}
-/// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem.
-pub fn fsync(fd: fd_t) SyncError!void {
- if (native_os == .windows) {
- if (windows.kernel32.FlushFileBuffers(fd) != 0)
- return;
- switch (windows.GetLastError()) {
- .SUCCESS => return,
- .INVALID_HANDLE => unreachable,
- .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time
- .UNEXP_NET_ERR => return error.InputOutput,
- else => return error.InputOutput,
- }
- }
- const rc = system.fsync(fd);
- switch (errno(rc)) {
- .SUCCESS => return,
- .BADF, .INVAL, .ROFS => unreachable,
- .IO => return error.InputOutput,
- .NOSPC => return error.NoSpaceLeft,
- .DQUOT => return error.DiskQuota,
- else => |err| return unexpectedErrno(err),
- }
-}
-
/// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata.
pub fn fdatasync(fd: fd_t) SyncError!void {
- if (native_os == .windows) {
- return fsync(fd) catch |err| switch (err) {
- SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced
- else => return err,
- };
- }
const rc = system.fdatasync(fd);
switch (errno(rc)) {
.SUCCESS => return,
@@ -6197,7 +3647,7 @@ pub const PrctlError = error{
/// or PR_MPX_DISABLE_MANAGEMENT
UnsupportedFeature,
/// Can only occur with PR_SET_FP_MODE
- OperationNotSupported,
+ OperationUnsupported,
PermissionDenied,
} || UnexpectedError;
@@ -6221,7 +3671,7 @@ pub fn prctl(option: PR, args: anytype) PrctlError!u31 {
.FAULT => return error.InvalidAddress,
.INVAL => unreachable,
.NODEV, .NXIO => return error.UnsupportedFeature,
- .OPNOTSUPP => return error.OperationNotSupported,
+ .OPNOTSUPP => return error.OperationUnsupported,
.PERM, .BUSY => return error.PermissionDenied,
.RANGE => unreachable,
else => |err| return unexpectedErrno(err),
@@ -6480,7 +3930,7 @@ pub const PtraceError = error{
DeviceBusy,
InputOutput,
NameTooLong,
- OperationNotSupported,
+ OperationUnsupported,
OutOfMemory,
ProcessNotFound,
PermissionDenied,
@@ -6582,7 +4032,7 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, data: usize) PtraceError!vo
.INVAL => unreachable,
.PERM => error.PermissionDenied,
.BUSY => error.DeviceBusy,
- .NOTSUP => error.OperationNotSupported,
+ .NOTSUP => error.OperationUnsupported,
else => |err| return unexpectedErrno(err),
},
@@ -6593,7 +4043,7 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, data: usize) PtraceError!vo
pub const NameToFileHandleAtError = error{
FileNotFound,
NotDir,
- OperationNotSupported,
+ OperationUnsupported,
NameTooLong,
Unexpected,
};
@@ -6622,7 +4072,7 @@ pub fn name_to_handle_atZ(
.INVAL => unreachable, // bad flags, or handle_bytes too big
.NOENT => return error.FileNotFound,
.NOTDIR => return error.NotDir,
- .OPNOTSUPP => return error.OperationNotSupported,
+ .OPNOTSUPP => return error.OperationUnsupported,
.OVERFLOW => return error.NameTooLong,
else => |err| return unexpectedErrno(err),
}
diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig
index 3bb5e64c73..64845d15ee 100644
--- a/lib/std/posix/test.zig
+++ b/lib/std/posix/test.zig
@@ -1,34 +1,24 @@
+const builtin = @import("builtin");
+const native_os = builtin.target.os.tag;
+const AtomicRmwOp = std.builtin.AtomicRmwOp;
+const AtomicOrder = std.builtin.AtomicOrder;
+
const std = @import("../std.zig");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const posix = std.posix;
-const testing = std.testing;
-const expect = testing.expect;
-const expectEqual = testing.expectEqual;
-const expectError = testing.expectError;
-const fs = std.fs;
const mem = std.mem;
const elf = std.elf;
const linux = std.os.linux;
+const AT = std.posix.AT;
-const a = std.testing.allocator;
-
-const builtin = @import("builtin");
-const AtomicRmwOp = std.builtin.AtomicRmwOp;
-const AtomicOrder = std.builtin.AtomicOrder;
-const native_os = builtin.target.os.tag;
+const testing = std.testing;
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+const expectEqualSlices = std.testing.expectEqualSlices;
+const expectEqualStrings = std.testing.expectEqualStrings;
+const expectError = std.testing.expectError;
const tmpDir = std.testing.tmpDir;
-const AT = posix.AT;
-
-// NOTE: several additional tests are in test/standalone/posix/. Any tests that mutate
-// process-wide POSIX state (cwd, signals, etc) cannot be Zig unit tests and should be over there.
-
-// https://github.com/ziglang/zig/issues/20288
-test "WTF-8 to WTF-16 conversion buffer overflows" {
- if (native_os != .windows) return error.SkipZigTest;
-
- const input_wtf8 = "\u{10FFFF}" ** 16385;
- try expectError(error.NameTooLong, posix.chdir(input_wtf8));
- try expectError(error.NameTooLong, posix.chdirZ(input_wtf8));
-}
test "check WASI CWD" {
if (native_os == .wasi) {
@@ -43,206 +33,6 @@ test "check WASI CWD" {
}
}
-test "open smoke test" {
- if (native_os == .wasi) return error.SkipZigTest;
- if (native_os == .windows) return error.SkipZigTest;
- if (native_os == .openbsd) return error.SkipZigTest;
-
- // TODO verify file attributes using `fstat`
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- const base_path = try tmp.dir.realpathAlloc(a, ".");
- defer a.free(base_path);
-
- const mode: posix.mode_t = if (native_os == .windows) 0 else 0o666;
-
- {
- // Create some file using `open`.
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
- const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode);
- posix.close(fd);
- }
-
- {
- // Try this again with the same flags. This op should fail with error.PathAlreadyExists.
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
- try expectError(error.PathAlreadyExists, posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode));
- }
-
- {
- // Try opening without `EXCL` flag.
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
- const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true }, mode);
- posix.close(fd);
- }
-
- {
- // Try opening as a directory which should fail.
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
- try expectError(error.NotDir, posix.open(file_path, .{ .ACCMODE = .RDWR, .DIRECTORY = true }, mode));
- }
-
- {
- // Create some directory
- const file_path = try fs.path.join(a, &.{ base_path, "some_dir" });
- defer a.free(file_path);
- try posix.mkdir(file_path, mode);
- }
-
- {
- // Open dir using `open`
- const file_path = try fs.path.join(a, &.{ base_path, "some_dir" });
- defer a.free(file_path);
- const fd = try posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode);
- posix.close(fd);
- }
-
- {
- // Try opening as file which should fail.
- const file_path = try fs.path.join(a, &.{ base_path, "some_dir" });
- defer a.free(file_path);
- try expectError(error.IsDir, posix.open(file_path, .{ .ACCMODE = .RDWR }, mode));
- }
-}
-
-test "readlink on Windows" {
- if (native_os != .windows) return error.SkipZigTest;
-
- try testReadlink("C:\\ProgramData", "C:\\Users\\All Users");
- try testReadlink("C:\\Users\\Default", "C:\\Users\\Default User");
- try testReadlink("C:\\Users", "C:\\Documents and Settings");
-}
-
-fn testReadlink(target_path: []const u8, symlink_path: []const u8) !void {
- var buffer: [fs.max_path_bytes]u8 = undefined;
- const given = try posix.readlink(symlink_path, buffer[0..]);
- try expect(mem.eql(u8, target_path, given));
-}
-
-fn getLinkInfo(fd: posix.fd_t) !struct { posix.ino_t, posix.nlink_t } {
- if (native_os == .linux) {
- const stx = try linux.wrapped.statx(
- fd,
- "",
- posix.AT.EMPTY_PATH,
- .{ .INO = true, .NLINK = true },
- );
- std.debug.assert(stx.mask.INO);
- std.debug.assert(stx.mask.NLINK);
- return .{ stx.ino, stx.nlink };
- }
-
- const st = try posix.fstat(fd);
- return .{ st.ino, st.nlink };
-}
-
-test "linkat with different directories" {
- switch (native_os) {
- .wasi, .linux, .illumos => {},
- else => return error.SkipZigTest,
- }
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- const target_name = "link-target";
- const link_name = "newlink";
-
- const subdir = try tmp.dir.makeOpenPath("subdir", .{});
-
- defer tmp.dir.deleteFile(target_name) catch {};
- try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "example" });
-
- // Test 1: link from file in subdir back up to target in parent directory
- try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0);
-
- const efd = try tmp.dir.openFile(target_name, .{});
- defer efd.close();
-
- const nfd = try subdir.openFile(link_name, .{});
- defer nfd.close();
-
- {
- const eino, _ = try getLinkInfo(efd.handle);
- const nino, const nlink = try getLinkInfo(nfd.handle);
- try testing.expectEqual(eino, nino);
- try testing.expectEqual(@as(posix.nlink_t, 2), nlink);
- }
-
- // Test 2: remove link
- try posix.unlinkat(subdir.fd, link_name, 0);
- _, const elink = try getLinkInfo(efd.handle);
- try testing.expectEqual(@as(posix.nlink_t, 1), elink);
-}
-
-test "fstatat" {
- if (posix.Stat == void) return error.SkipZigTest;
- if (native_os == .wasi and !builtin.link_libc) return error.SkipZigTest;
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- // create dummy file
- const contents = "nonsense";
- try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = contents });
-
- // fetch file's info on the opened fd directly
- const file = try tmp.dir.openFile("file.txt", .{});
- const stat = try posix.fstat(file.handle);
- defer file.close();
-
- // now repeat but using `fstatat` instead
- const statat = try posix.fstatat(tmp.dir.fd, "file.txt", posix.AT.SYMLINK_NOFOLLOW);
-
- try expectEqual(stat.dev, statat.dev);
- try expectEqual(stat.ino, statat.ino);
- try expectEqual(stat.nlink, statat.nlink);
- try expectEqual(stat.mode, statat.mode);
- try expectEqual(stat.uid, statat.uid);
- try expectEqual(stat.gid, statat.gid);
- try expectEqual(stat.rdev, statat.rdev);
- try expectEqual(stat.size, statat.size);
- try expectEqual(stat.blksize, statat.blksize);
- // The stat.blocks/statat.blocks count is managed by the filesystem and may
- // change if the file is stored in a journal or "inline".
- // try expectEqual(stat.blocks, statat.blocks);
-}
-
-test "readlinkat" {
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- // create file
- try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = "nonsense" });
-
- // create a symbolic link
- if (native_os == .windows) {
- std.os.windows.CreateSymbolicLink(
- tmp.dir.fd,
- &[_]u16{ 'l', 'i', 'n', 'k' },
- &[_:0]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' },
- false,
- ) catch |err| switch (err) {
- // Symlink requires admin privileges on windows, so this test can legitimately fail.
- error.AccessDenied => return error.SkipZigTest,
- else => return err,
- };
- } else {
- try posix.symlinkat("file.txt", tmp.dir.fd, "link");
- }
-
- // read the link
- var buffer: [fs.max_path_bytes]u8 = undefined;
- const read_link = try posix.readlinkat(tmp.dir.fd, "link", buffer[0..]);
- try expect(mem.eql(u8, "file.txt", read_link));
-}
-
test "getrandom" {
var buf_a: [50]u8 = undefined;
var buf_b: [50]u8 = undefined;
@@ -273,7 +63,7 @@ test "sigaltstack" {
// Setting a stack size less than MINSIGSTKSZ returns ENOMEM
st.flags = 0;
st.size = 1;
- try testing.expectError(error.SizeTooSmall, posix.sigaltstack(&st, null));
+ try expectError(error.SizeTooSmall, posix.sigaltstack(&st, null));
}
// If the type is not available use void to avoid erroring out when `iter_fn` is
@@ -345,7 +135,7 @@ test "pipe" {
try expect((try posix.write(fds[1], "hello")) == 5);
var buf: [16]u8 = undefined;
try expect((try posix.read(fds[0], buf[0..])) == 5);
- try testing.expectEqualSlices(u8, buf[0..5], "hello");
+ try expectEqualSlices(u8, buf[0..5], "hello");
posix.close(fds[1]);
posix.close(fds[0]);
}
@@ -356,6 +146,8 @@ test "argsAlloc" {
}
test "memfd_create" {
+ const io = testing.io;
+
// memfd_create is only supported by linux and freebsd.
switch (native_os) {
.linux => {},
@@ -366,21 +158,22 @@ test "memfd_create" {
else => return error.SkipZigTest,
}
- const fd = try posix.memfd_create("test", 0);
- defer posix.close(fd);
- try expect((try posix.write(fd, "test")) == 4);
- try posix.lseek_SET(fd, 0);
+ const file: Io.File = .{ .handle = try posix.memfd_create("test", 0) };
+ defer file.close(io);
+ try file.writePositionalAll(io, "test", 0);
var buf: [10]u8 = undefined;
- const bytes_read = try posix.read(fd, &buf);
+ const bytes_read = try file.readPositionalAll(io, &buf, 0);
try expect(bytes_read == 4);
- try expect(mem.eql(u8, buf[0..4], "test"));
+ try expectEqualStrings("test", buf[0..4]);
}
test "mmap" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -396,14 +189,14 @@ test "mmap" {
);
defer posix.munmap(data);
- try testing.expectEqual(@as(usize, 1234), data.len);
+ try expectEqual(@as(usize, 1234), data.len);
// By definition the data returned by mmap is zero-filled
- try testing.expect(mem.eql(u8, data, &[_]u8{0x00} ** 1234));
+ try expect(mem.eql(u8, data, &[_]u8{0x00} ** 1234));
// Make sure the memory is writeable as requested
@memset(data, 0x55);
- try testing.expect(mem.eql(u8, data, &[_]u8{0x55} ** 1234));
+ try expect(mem.eql(u8, data, &[_]u8{0x55} ** 1234));
}
const test_out_file = "os_tmp_test";
@@ -412,10 +205,10 @@ test "mmap" {
// Create a file used for testing mmap() calls with a file descriptor
{
- const file = try tmp.dir.createFile(test_out_file, .{});
- defer file.close();
+ const file = try tmp.dir.createFile(io, test_out_file, .{});
+ defer file.close(io);
- var stream = file.writer(&.{});
+ var stream = file.writer(io, &.{});
var i: usize = 0;
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
@@ -425,8 +218,8 @@ test "mmap" {
// Map the whole file
{
- const file = try tmp.dir.openFile(test_out_file, .{});
- defer file.close();
+ const file = try tmp.dir.openFile(io, test_out_file, .{});
+ defer file.close(io);
const data = try posix.mmap(
null,
@@ -442,7 +235,7 @@ test "mmap" {
var i: usize = 0;
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
- try testing.expectEqual(i, try stream.takeInt(u32, .little));
+ try expectEqual(i, try stream.takeInt(u32, .little));
}
}
@@ -450,8 +243,8 @@ test "mmap" {
// Map the upper half of the file
{
- const file = try tmp.dir.openFile(test_out_file, .{});
- defer file.close();
+ const file = try tmp.dir.openFile(io, test_out_file, .{});
+ defer file.close(io);
const data = try posix.mmap(
null,
@@ -467,7 +260,7 @@ test "mmap" {
var i: usize = alloc_size / 2 / @sizeOf(u32);
while (i < alloc_size / @sizeOf(u32)) : (i += 1) {
- try testing.expectEqual(i, try stream.takeInt(u32, .little));
+ try expectEqual(i, try stream.takeInt(u32, .little));
}
}
}
@@ -476,13 +269,15 @@ test "fcntl" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
- const file = try tmp.dir.createFile(test_out_file, .{});
- defer file.close();
+ const file = try tmp.dir.createFile(io, test_out_file, .{});
+ defer file.close(io);
// Note: The test assumes createFile opens the file with CLOEXEC
{
@@ -522,18 +317,20 @@ test "sync" {
test "fsync" {
switch (native_os) {
- .linux, .windows, .illumos => {},
+ .linux, .illumos => {},
else => return error.SkipZigTest,
}
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
- const file = try tmp.dir.createFile(test_out_file, .{});
- defer file.close();
+ const file = try tmp.dir.createFile(io, test_out_file, .{});
+ defer file.close(io);
- try posix.fsync(file.handle);
+ try file.sync(io);
try posix.fdatasync(file.handle);
}
@@ -571,9 +368,9 @@ test "sigrtmin/max" {
return error.SkipZigTest;
}
- try std.testing.expect(posix.sigrtmin() >= 32);
- try std.testing.expect(posix.sigrtmin() >= posix.system.sigrtmin());
- try std.testing.expect(posix.sigrtmin() < posix.system.sigrtmax());
+ try expect(posix.sigrtmin() >= 32);
+ try expect(posix.sigrtmin() >= posix.system.sigrtmin());
+ try expect(posix.sigrtmin() < posix.system.sigrtmax());
}
test "sigset empty/full" {
@@ -646,27 +443,29 @@ test "dup & dup2" {
else => return error.SkipZigTest,
}
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
{
- var file = try tmp.dir.createFile("os_dup_test", .{});
- defer file.close();
+ var file = try tmp.dir.createFile(io, "os_dup_test", .{});
+ defer file.close(io);
- var duped = std.fs.File{ .handle = try posix.dup(file.handle) };
- defer duped.close();
- try duped.writeAll("dup");
+ var duped = Io.File{ .handle = try posix.dup(file.handle) };
+ defer duped.close(io);
+ try duped.writeStreamingAll(io, "dup");
// Tests aren't run in parallel so using the next fd shouldn't be an issue.
const new_fd = duped.handle + 1;
try posix.dup2(file.handle, new_fd);
- var dup2ed = std.fs.File{ .handle = new_fd };
- defer dup2ed.close();
- try dup2ed.writeAll("dup2");
+ var dup2ed = Io.File{ .handle = new_fd };
+ defer dup2ed.close(io);
+ try dup2ed.writeStreamingAll(io, "dup2");
}
var buffer: [8]u8 = undefined;
- try testing.expectEqualStrings("dupdup2", try tmp.dir.readFile("os_dup_test", &buffer));
+ try expectEqualStrings("dupdup2", try tmp.dir.readFile(io, "os_dup_test", &buffer));
}
test "getpid" {
@@ -684,208 +483,78 @@ test "getppid" {
try expect(posix.getppid() >= 0);
}
-test "writev longer than IOV_MAX" {
- if (native_os == .windows or native_os == .wasi) return error.SkipZigTest;
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- var file = try tmp.dir.createFile("pwritev", .{});
- defer file.close();
-
- const iovecs = [_]posix.iovec_const{.{ .base = "a", .len = 1 }} ** (posix.IOV_MAX + 1);
- const amt = try file.writev(&iovecs);
- try testing.expectEqual(@as(usize, posix.IOV_MAX), amt);
-}
-
-test "POSIX file locking with fcntl" {
- if (native_os == .windows or native_os == .wasi) {
- // Not POSIX.
- return error.SkipZigTest;
- }
-
- if (true) {
- // https://github.com/ziglang/zig/issues/11074
- return error.SkipZigTest;
- }
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- // Create a temporary lock file
- var file = try tmp.dir.createFile("lock", .{ .read = true });
- defer file.close();
- try file.setEndPos(2);
- const fd = file.handle;
-
- // Place an exclusive lock on the first byte, and a shared lock on the second byte:
- var struct_flock = std.mem.zeroInit(posix.Flock, .{ .type = posix.F.WRLCK });
- _ = try posix.fcntl(fd, posix.F.SETLK, @intFromPtr(&struct_flock));
- struct_flock.start = 1;
- struct_flock.type = posix.F.RDLCK;
- _ = try posix.fcntl(fd, posix.F.SETLK, @intFromPtr(&struct_flock));
-
- // Check the locks in a child process:
- const pid = try posix.fork();
- if (pid == 0) {
- // child expects be denied the exclusive lock:
- struct_flock.start = 0;
- struct_flock.type = posix.F.WRLCK;
- try expectError(error.Locked, posix.fcntl(fd, posix.F.SETLK, @intFromPtr(&struct_flock)));
- // child expects to get the shared lock:
- struct_flock.start = 1;
- struct_flock.type = posix.F.RDLCK;
- _ = try posix.fcntl(fd, posix.F.SETLK, @intFromPtr(&struct_flock));
- // child waits for the exclusive lock in order to test deadlock:
- struct_flock.start = 0;
- struct_flock.type = posix.F.WRLCK;
- _ = try posix.fcntl(fd, posix.F.SETLKW, @intFromPtr(&struct_flock));
- // child exits without continuing:
- posix.exit(0);
- } else {
- // parent waits for child to get shared lock:
- std.Thread.sleep(1 * std.time.ns_per_ms);
- // parent expects deadlock when attempting to upgrade the shared lock to exclusive:
- struct_flock.start = 1;
- struct_flock.type = posix.F.WRLCK;
- try expectError(error.DeadLock, posix.fcntl(fd, posix.F.SETLKW, @intFromPtr(&struct_flock)));
- // parent releases exclusive lock:
- struct_flock.start = 0;
- struct_flock.type = posix.F.UNLCK;
- _ = try posix.fcntl(fd, posix.F.SETLK, @intFromPtr(&struct_flock));
- // parent releases shared lock:
- struct_flock.start = 1;
- struct_flock.type = posix.F.UNLCK;
- _ = try posix.fcntl(fd, posix.F.SETLK, @intFromPtr(&struct_flock));
- // parent waits for child:
- const result = posix.waitpid(pid, 0);
- try expect(result.status == 0 * 256);
- }
-}
-
test "rename smoke test" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .windows) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+ const gpa = testing.allocator;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
- const base_path = try tmp.dir.realpathAlloc(a, ".");
- defer a.free(base_path);
+ const base_path = try tmp.dir.realPathFileAlloc(io, ".", gpa);
+ defer gpa.free(base_path);
const mode: posix.mode_t = if (native_os == .windows) 0 else 0o666;
{
// Create some file using `open`.
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
+ const file_path = try Dir.path.join(gpa, &.{ base_path, "some_file" });
+ defer gpa.free(file_path);
const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode);
posix.close(fd);
// Rename the file
- const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_file" });
- defer a.free(new_file_path);
- try posix.rename(file_path, new_file_path);
+ const new_file_path = try Dir.path.join(gpa, &.{ base_path, "some_other_file" });
+ defer gpa.free(new_file_path);
+ try Io.Dir.renameAbsolute(file_path, new_file_path, io);
}
{
// Try opening renamed file
- const file_path = try fs.path.join(a, &.{ base_path, "some_other_file" });
- defer a.free(file_path);
+ const file_path = try Dir.path.join(gpa, &.{ base_path, "some_other_file" });
+ defer gpa.free(file_path);
const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR }, mode);
posix.close(fd);
}
{
// Try opening original file - should fail with error.FileNotFound
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
+ const file_path = try Dir.path.join(gpa, &.{ base_path, "some_file" });
+ defer gpa.free(file_path);
try expectError(error.FileNotFound, posix.open(file_path, .{ .ACCMODE = .RDWR }, mode));
}
{
// Create some directory
- const file_path = try fs.path.join(a, &.{ base_path, "some_dir" });
- defer a.free(file_path);
+ const file_path = try Dir.path.join(gpa, &.{ base_path, "some_dir" });
+ defer gpa.free(file_path);
try posix.mkdir(file_path, mode);
// Rename the directory
- const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_dir" });
- defer a.free(new_file_path);
- try posix.rename(file_path, new_file_path);
+ const new_file_path = try Dir.path.join(gpa, &.{ base_path, "some_other_dir" });
+ defer gpa.free(new_file_path);
+ try Io.Dir.renameAbsolute(file_path, new_file_path, io);
}
{
// Try opening renamed directory
- const file_path = try fs.path.join(a, &.{ base_path, "some_other_dir" });
- defer a.free(file_path);
+ const file_path = try Dir.path.join(gpa, &.{ base_path, "some_other_dir" });
+ defer gpa.free(file_path);
const fd = try posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode);
posix.close(fd);
}
{
// Try opening original directory - should fail with error.FileNotFound
- const file_path = try fs.path.join(a, &.{ base_path, "some_dir" });
- defer a.free(file_path);
+ const file_path = try Dir.path.join(gpa, &.{ base_path, "some_dir" });
+ defer gpa.free(file_path);
try expectError(error.FileNotFound, posix.open(file_path, .{ .ACCMODE = .RDONLY, .DIRECTORY = true }, mode));
}
}
-test "access smoke test" {
- if (native_os == .wasi) return error.SkipZigTest;
- if (native_os == .windows) return error.SkipZigTest;
- if (native_os == .openbsd) return error.SkipZigTest;
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- const base_path = try tmp.dir.realpathAlloc(a, ".");
- defer a.free(base_path);
-
- const mode: posix.mode_t = if (native_os == .windows) 0 else 0o666;
- {
- // Create some file using `open`.
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
- const fd = try posix.open(file_path, .{ .ACCMODE = .RDWR, .CREAT = true, .EXCL = true }, mode);
- posix.close(fd);
- }
-
- {
- // Try to access() the file
- const file_path = try fs.path.join(a, &.{ base_path, "some_file" });
- defer a.free(file_path);
- if (native_os == .windows) {
- try posix.access(file_path, posix.F_OK);
- } else {
- try posix.access(file_path, posix.F_OK | posix.W_OK | posix.R_OK);
- }
- }
-
- {
- // Try to access() a non-existent file - should fail with error.FileNotFound
- const file_path = try fs.path.join(a, &.{ base_path, "some_other_file" });
- defer a.free(file_path);
- try expectError(error.FileNotFound, posix.access(file_path, posix.F_OK));
- }
-
- {
- // Create some directory
- const file_path = try fs.path.join(a, &.{ base_path, "some_dir" });
- defer a.free(file_path);
- try posix.mkdir(file_path, mode);
- }
-
- {
- // Try to access() the directory
- const file_path = try fs.path.join(a, &.{ base_path, "some_dir" });
- defer a.free(file_path);
-
- try posix.access(file_path, posix.F_OK);
- }
-}
-
test "timerfd" {
if (native_os != .linux) return error.SkipZigTest;
@@ -903,142 +572,3 @@ test "timerfd" {
const expect_disarmed_timer: linux.itimerspec = .{ .it_interval = .{ .sec = 0, .nsec = 0 }, .it_value = .{ .sec = 0, .nsec = 0 } };
try expectEqual(expect_disarmed_timer, git);
}
-
-test "isatty" {
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- var file = try tmp.dir.createFile("foo", .{});
- defer file.close();
-
- try expectEqual(posix.isatty(file.handle), false);
-}
-
-test "pread with empty buffer" {
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- var file = try tmp.dir.createFile("pread_empty", .{ .read = true });
- defer file.close();
-
- const bytes = try a.alloc(u8, 0);
- defer a.free(bytes);
-
- const rc = try posix.pread(file.handle, bytes, 0);
- try expectEqual(rc, 0);
-}
-
-test "write with empty buffer" {
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- var file = try tmp.dir.createFile("write_empty", .{});
- defer file.close();
-
- const bytes = try a.alloc(u8, 0);
- defer a.free(bytes);
-
- const rc = try posix.write(file.handle, bytes);
- try expectEqual(rc, 0);
-}
-
-test "pwrite with empty buffer" {
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- var file = try tmp.dir.createFile("pwrite_empty", .{});
- defer file.close();
-
- const bytes = try a.alloc(u8, 0);
- defer a.free(bytes);
-
- const rc = try posix.pwrite(file.handle, bytes, 0);
- try expectEqual(rc, 0);
-}
-
-fn getFileMode(dir: posix.fd_t, path: []const u8) !posix.mode_t {
- const path_z = try posix.toPosixPath(path);
- const mode: posix.mode_t = if (native_os == .linux) blk: {
- const stx = try linux.wrapped.statx(
- dir,
- &path_z,
- posix.AT.SYMLINK_NOFOLLOW,
- .{ .MODE = true },
- );
- std.debug.assert(stx.mask.MODE);
- break :blk stx.mode;
- } else blk: {
- const st = try posix.fstatatZ(dir, &path_z, posix.AT.SYMLINK_NOFOLLOW);
- break :blk st.mode;
- };
-
- return mode & 0b111_111_111;
-}
-
-fn expectMode(dir: posix.fd_t, file: []const u8, mode: posix.mode_t) !void {
- const actual = try getFileMode(dir, file);
- try expectEqual(mode, actual & 0b111_111_111);
-}
-
-test "fchmodat smoke test" {
- if (!std.fs.has_executable_bit) return error.SkipZigTest;
-
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
-
- try expectError(error.FileNotFound, posix.fchmodat(tmp.dir.fd, "regfile", 0o666, 0));
- const fd = try posix.openat(
- tmp.dir.fd,
- "regfile",
- .{ .ACCMODE = .WRONLY, .CREAT = true, .EXCL = true, .TRUNC = true },
- 0o644,
- );
- posix.close(fd);
-
- try posix.symlinkat("regfile", tmp.dir.fd, "symlink");
- const sym_mode = try getFileMode(tmp.dir.fd, "symlink");
-
- try posix.fchmodat(tmp.dir.fd, "regfile", 0o640, 0);
- try expectMode(tmp.dir.fd, "regfile", 0o640);
- try posix.fchmodat(tmp.dir.fd, "regfile", 0o600, posix.AT.SYMLINK_NOFOLLOW);
- try expectMode(tmp.dir.fd, "regfile", 0o600);
-
- try posix.fchmodat(tmp.dir.fd, "symlink", 0o640, 0);
- try expectMode(tmp.dir.fd, "regfile", 0o640);
- try expectMode(tmp.dir.fd, "symlink", sym_mode);
-
- var test_link = true;
- posix.fchmodat(tmp.dir.fd, "symlink", 0o600, posix.AT.SYMLINK_NOFOLLOW) catch |err| switch (err) {
- error.OperationNotSupported => test_link = false,
- else => |e| return e,
- };
- if (test_link)
- try expectMode(tmp.dir.fd, "symlink", 0o600);
- try expectMode(tmp.dir.fd, "regfile", 0o640);
-}
-
-const CommonOpenFlags = packed struct {
- ACCMODE: posix.ACCMODE = .RDONLY,
- CREAT: bool = false,
- EXCL: bool = false,
- LARGEFILE: bool = false,
- DIRECTORY: bool = false,
- CLOEXEC: bool = false,
- NONBLOCK: bool = false,
-
- pub fn lower(cof: CommonOpenFlags) posix.O {
- var result: posix.O = if (native_os == .wasi) .{
- .read = cof.ACCMODE != .WRONLY,
- .write = cof.ACCMODE != .RDONLY,
- } else .{
- .ACCMODE = cof.ACCMODE,
- };
- result.CREAT = cof.CREAT;
- result.EXCL = cof.EXCL;
- result.DIRECTORY = cof.DIRECTORY;
- result.NONBLOCK = cof.NONBLOCK;
- if (@hasField(posix.O, "CLOEXEC")) result.CLOEXEC = cof.CLOEXEC;
- if (@hasField(posix.O, "LARGEFILE")) result.LARGEFILE = cof.LARGEFILE;
- return result;
- }
-};
diff --git a/lib/std/process.zig b/lib/std/process.zig
index a0a26c766f..865376d907 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -1,24 +1,33 @@
-const std = @import("std.zig");
const builtin = @import("builtin");
+const native_os = builtin.os.tag;
+
+const std = @import("std.zig");
+const Io = std.Io;
+const File = std.Io.File;
const fs = std.fs;
const mem = std.mem;
const math = std.math;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const testing = std.testing;
-const native_os = builtin.os.tag;
const posix = std.posix;
const windows = std.os.windows;
const unicode = std.unicode;
+const max_path_bytes = std.fs.max_path_bytes;
pub const Child = @import("process/Child.zig");
-pub const abort = posix.abort;
-pub const exit = posix.exit;
pub const changeCurDir = posix.chdir;
pub const changeCurDirZ = posix.chdirZ;
pub const GetCwdError = posix.GetCwdError;
+/// This is the global, process-wide protection to coordinate stderr writes.
+///
+/// The primary motivation for recursive mutex here is so that a panic while
+/// stderr mutex is held still dumps the stack trace and other debug
+/// information.
+pub var stderr_thread_mutex: std.Thread.Mutex.Recursive = .init;
+
/// The result is a slice of `out_buffer`, from index `0`.
/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
@@ -35,7 +44,7 @@ pub const GetCwdAllocError = Allocator.Error || error{CurrentWorkingDirectoryUnl
pub fn getCwdAlloc(allocator: Allocator) GetCwdAllocError![]u8 {
// The use of max_path_bytes here is just a heuristic: most paths will fit
// in stack_buf, avoiding an extra allocation in the common case.
- var stack_buf: [fs.max_path_bytes]u8 = undefined;
+ var stack_buf: [max_path_bytes]u8 = undefined;
var heap_buf: ?[]u8 = null;
defer if (heap_buf) |buf| allocator.free(buf);
@@ -437,25 +446,25 @@ pub fn getEnvVarOwned(allocator: Allocator, key: []const u8) GetEnvVarOwnedError
}
/// On Windows, `key` must be valid WTF-8.
-pub fn hasEnvVarConstant(comptime key: []const u8) bool {
+pub inline fn hasEnvVarConstant(comptime key: []const u8) bool {
if (native_os == .windows) {
const key_w = comptime unicode.wtf8ToWtf16LeStringLiteral(key);
return getenvW(key_w) != null;
} else if (native_os == .wasi and !builtin.link_libc) {
- @compileError("hasEnvVarConstant is not supported for WASI without libc");
+ return false;
} else {
return posix.getenv(key) != null;
}
}
/// On Windows, `key` must be valid WTF-8.
-pub fn hasNonEmptyEnvVarConstant(comptime key: []const u8) bool {
+pub inline fn hasNonEmptyEnvVarConstant(comptime key: []const u8) bool {
if (native_os == .windows) {
const key_w = comptime unicode.wtf8ToWtf16LeStringLiteral(key);
const value = getenvW(key_w) orelse return false;
return value.len != 0;
} else if (native_os == .wasi and !builtin.link_libc) {
- @compileError("hasNonEmptyEnvVarConstant is not supported for WASI without libc");
+ return false;
} else {
const value = posix.getenv(key) orelse return false;
return value.len != 0;
@@ -1571,9 +1580,9 @@ pub fn getUserInfo(name: []const u8) !UserInfo {
/// TODO this reads /etc/passwd. But sometimes the user/id mapping is in something else
/// like NIS, AD, etc. See `man nss` or look at an strace for `id myuser`.
-pub fn posixGetUserInfo(name: []const u8) !UserInfo {
- const file = try std.fs.openFileAbsolute("/etc/passwd", .{});
- defer file.close();
+pub fn posixGetUserInfo(io: Io, name: []const u8) !UserInfo {
+ const file = try Io.Dir.openFileAbsolute(io, "/etc/passwd", .{});
+ defer file.close(io);
var buffer: [4096]u8 = undefined;
var file_reader = file.reader(&buffer);
return posixGetUserInfoPasswdStream(name, &file_reader.interface) catch |err| switch (err) {
@@ -1839,21 +1848,19 @@ pub fn totalSystemMemory() TotalSystemMemoryError!u64 {
}
}
-/// Indicate that we are now terminating with a successful exit code.
-/// In debug builds, this is a no-op, so that the calling code's
-/// cleanup mechanisms are tested and so that external tools that
-/// check for resource leaks can be accurate. In release builds, this
-/// calls exit(0), and does not return.
-pub fn cleanExit() void {
- if (builtin.mode == .Debug) {
- return;
- } else {
- std.debug.lockStdErr();
- exit(0);
- }
+/// Indicate intent to terminate with a successful exit code.
+///
+/// In debug builds, this is a no-op, so that the calling code's cleanup
+/// mechanisms are tested and so that external tools checking for resource
+/// leaks can be accurate. In release builds, this calls `exit` with code zero,
+/// and does not return.
+pub fn cleanExit(io: Io) void {
+ if (builtin.mode == .Debug) return;
+ _ = io.lockStderr(&.{}, .no_color) catch {};
+ exit(0);
}
-/// Raise the open file descriptor limit.
+/// Request ability to have more open file descriptors simultaneously.
///
/// On some systems, this raises the limit before seeing ProcessFdQuotaExceeded
/// errors. On other systems, this does nothing.
@@ -2110,3 +2117,215 @@ pub fn fatal(comptime format: []const u8, format_arguments: anytype) noreturn {
std.log.err(format, format_arguments);
exit(1);
}
+
+pub const ExecutablePathBaseError = error{
+ FileNotFound,
+ AccessDenied,
+ /// The operating system does not support an executable learning its own
+ /// path.
+ OperationUnsupported,
+ NotDir,
+ SymLinkLoop,
+ InputOutput,
+ FileTooBig,
+ IsDir,
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ SystemResources,
+ NoSpaceLeft,
+ FileSystem,
+ BadPathName,
+ DeviceBusy,
+ SharingViolation,
+ PipeBusy,
+ NotLink,
+ PathAlreadyExists,
+ /// On Windows, `\\server` or `\\server\share` was not found.
+ NetworkNotFound,
+ ProcessNotFound,
+ /// On Windows, antivirus software is enabled by default. It can be
+ /// disabled, but Windows Update sometimes ignores the user's preference
+ /// and re-enables it. When enabled, antivirus software on Windows
+ /// intercepts file system operations and makes them significantly slower
+ /// in addition to possibly failing with this error code.
+ AntivirusInterference,
+ /// On Windows, the volume does not contain a recognized file system. File
+ /// system drivers might not be loaded, or the volume may be corrupt.
+ UnrecognizedVolume,
+ PermissionDenied,
+} || Io.Cancelable || Io.UnexpectedError;
+
+pub const ExecutablePathAllocError = ExecutablePathBaseError || Allocator.Error;
+
+pub fn executablePathAlloc(io: Io, allocator: Allocator) ExecutablePathAllocError![:0]u8 {
+ var buffer: [max_path_bytes]u8 = undefined;
+ const n = executablePath(io, &buffer) catch |err| switch (err) {
+ error.NameTooLong => unreachable,
+ else => |e| return e,
+ };
+ return allocator.dupeZ(u8, buffer[0..n]);
+}
+
+pub const ExecutablePathError = ExecutablePathBaseError || error{NameTooLong};
+
+/// Get the path to the current executable, following symlinks.
+///
+/// This function may return an error if the current executable
+/// was deleted after spawning.
+///
+/// Returned value is a slice of out_buffer.
+///
+/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
+///
+/// On Linux, depends on procfs being mounted. If the currently executing binary has
+/// been deleted, the file path looks something like "/a/b/c/exe (deleted)".
+///
+/// See also:
+/// * `executableDirPath` - to obtain only the directory
+/// * `openExecutable` - to obtain only an open file handle
+pub fn executablePath(io: Io, out_buffer: []u8) ExecutablePathError!usize {
+ return io.vtable.processExecutablePath(io.userdata, out_buffer);
+}
+
+/// Get the directory path that contains the current executable.
+///
+/// Returns index into `out_buffer`.
+///
+/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
+/// On other platforms, the result is an opaque sequence of bytes with no particular encoding.
+pub fn executableDirPath(io: Io, out_buffer: []u8) ExecutablePathError!usize {
+ const n = try executablePath(io, out_buffer);
+ // Assert that the OS APIs return absolute paths, and therefore dirname
+ // will not return null.
+ return std.fs.path.dirname(out_buffer[0..n]).?.len;
+}
+
+/// Same as `executableDirPath` except allocates the result.
+pub fn executableDirPathAlloc(io: Io, allocator: Allocator) ExecutablePathAllocError![]u8 {
+ var buffer: [max_path_bytes]u8 = undefined;
+ const dir_path_len = executableDirPath(io, &buffer) catch |err| switch (err) {
+ error.NameTooLong => unreachable,
+ else => |e| return e,
+ };
+ return allocator.dupe(u8, buffer[0..dir_path_len]);
+}
+
+pub const OpenExecutableError = File.OpenError || ExecutablePathError || File.LockError;
+
+pub fn openExecutable(io: Io, flags: File.OpenFlags) OpenExecutableError!File {
+ return io.vtable.processExecutableOpen(io.userdata, flags);
+}
+
+/// Causes abnormal process termination.
+///
+/// If linking against libc, this calls `std.c.abort`. Otherwise it raises
+/// SIGABRT followed by SIGKILL.
+///
+/// Invokes the current signal handler for SIGABRT, if any.
+pub fn abort() noreturn {
+ @branchHint(.cold);
+ // MSVCRT abort() sometimes opens a popup window which is undesirable, so
+ // even when linking libc on Windows we use our own abort implementation.
+ // See https://github.com/ziglang/zig/issues/2071 for more details.
+ if (native_os == .windows) {
+ if (builtin.mode == .Debug and windows.peb().BeingDebugged != 0) {
+ @breakpoint();
+ }
+ windows.ntdll.RtlExitUserProcess(3);
+ }
+ if (!builtin.link_libc and native_os == .linux) {
+ // The Linux man page says that the libc abort() function
+ // "first unblocks the SIGABRT signal", but this is a footgun
+ // for user-defined signal handlers that want to restore some state in
+ // some program sections and crash in others.
+ // So, the user-installed SIGABRT handler is run, if present.
+ posix.raise(.ABRT) catch {};
+
+ // Disable all signal handlers.
+ const filledset = std.os.linux.sigfillset();
+ posix.sigprocmask(posix.SIG.BLOCK, &filledset, null);
+
+ // Only one thread may proceed to the rest of abort().
+ if (!builtin.single_threaded) {
+ const global = struct {
+ var abort_entered: bool = false;
+ };
+ while (@cmpxchgWeak(bool, &global.abort_entered, false, true, .seq_cst, .seq_cst)) |_| {}
+ }
+
+ // Install default handler so that the tkill below will terminate.
+ const sigact: posix.Sigaction = .{
+ .handler = .{ .handler = posix.SIG.DFL },
+ .mask = posix.sigemptyset(),
+ .flags = 0,
+ };
+ posix.sigaction(.ABRT, &sigact, null);
+
+ _ = std.os.linux.tkill(std.os.linux.gettid(), .ABRT);
+
+ var sigabrtmask = posix.sigemptyset();
+ posix.sigaddset(&sigabrtmask, .ABRT);
+ posix.sigprocmask(posix.SIG.UNBLOCK, &sigabrtmask, null);
+
+ // Beyond this point should be unreachable.
+ @as(*allowzero volatile u8, @ptrFromInt(0)).* = 0;
+ posix.raise(.KILL) catch {};
+ exit(127); // Pid 1 might not be signalled in some containers.
+ }
+ switch (native_os) {
+ .uefi, .wasi, .emscripten, .cuda, .amdhsa => @trap(),
+ else => posix.system.abort(),
+ }
+}
+
+/// Exits all threads of the program with the specified status code.
+pub fn exit(status: u8) noreturn {
+ if (builtin.link_libc) {
+ std.c.exit(status);
+ } else switch (native_os) {
+ .windows => windows.ntdll.RtlExitUserProcess(status),
+ .wasi => std.os.wasi.proc_exit(status),
+ .linux => {
+ if (!builtin.single_threaded) std.os.linux.exit_group(status);
+ posix.system.exit(status);
+ },
+ .uefi => {
+ const uefi = std.os.uefi;
+ // exit() is only available if exitBootServices() has not been called yet.
+ // This call to exit should not fail, so we catch-ignore errors.
+ if (uefi.system_table.boot_services) |bs| {
+ bs.exit(uefi.handle, @enumFromInt(status), null) catch {};
+ }
+ // If we can't exit, reboot the system instead.
+ uefi.system_table.runtime_services.resetSystem(.cold, @enumFromInt(status), null);
+ },
+ else => posix.system.exit(status),
+ }
+}
+
+pub const SetCurrentDirError = error{
+ AccessDenied,
+ BadPathName,
+ FileNotFound,
+ FileSystem,
+ NameTooLong,
+ NoDevice,
+ NotDir,
+ OperationUnsupported,
+ UnrecognizedVolume,
+} || Io.Cancelable || Io.UnexpectedError;
+
+/// Changes the current working directory to the open directory handle.
+/// Corresponds to "fchdir" in libc.
+///
+/// This modifies global process state and can have surprising effects in
+/// multithreaded applications. Most applications and especially libraries
+/// should not call this function as a general rule, however it can have use
+/// cases in, for example, implementing a shell, or child process execution.
+///
+/// Calling this function makes code less portable and less reusable.
+pub fn setCurrentDir(io: Io, dir: Io.Dir) !void {
+ return io.vtable.processSetCurrentDir(io.userdata, dir);
+}
diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig
index 5d4d65ec01..0db02aa824 100644
--- a/lib/std/process/Child.zig
+++ b/lib/std/process/Child.zig
@@ -1,13 +1,14 @@
-const ChildProcess = @This();
+const Child = @This();
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const std = @import("../std.zig");
+const Io = std.Io;
const unicode = std.unicode;
const fs = std.fs;
const process = std.process;
-const File = std.fs.File;
+const File = std.Io.File;
const windows = std.os.windows;
const linux = std.os.linux;
const posix = std.posix;
@@ -30,7 +31,7 @@ pub const Id = switch (native_os) {
id: Id,
thread_handle: if (native_os == .windows) windows.HANDLE else void,
-allocator: mem.Allocator,
+allocator: Allocator,
/// The writing end of the child process's standard input pipe.
/// Usage requires `stdin_behavior == StdIo.Pipe`.
@@ -76,7 +77,7 @@ cwd: ?[]const u8,
/// Set to change the current working directory when spawning the child process.
/// This is not yet implemented for Windows. See https://github.com/ziglang/zig/issues/5190
/// Once that is done, `cwd` will be deprecated in favor of this field.
-cwd_dir: ?fs.Dir = null,
+cwd_dir: ?Io.Dir = null,
err_pipe: if (native_os == .windows) void else ?posix.fd_t,
@@ -228,7 +229,7 @@ pub const StdIo = enum {
};
/// First argument in argv is the executable.
-pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess {
+pub fn init(argv: []const []const u8, allocator: Allocator) Child {
return .{
.allocator = allocator,
.argv = argv,
@@ -251,7 +252,7 @@ pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess {
};
}
-pub fn setUserName(self: *ChildProcess, name: []const u8) !void {
+pub fn setUserName(self: *Child, name: []const u8) !void {
const user_info = try process.getUserInfo(name);
self.uid = user_info.uid;
self.gid = user_info.gid;
@@ -259,35 +260,35 @@ pub fn setUserName(self: *ChildProcess, name: []const u8) !void {
/// On success must call `kill` or `wait`.
/// After spawning the `id` is available.
-pub fn spawn(self: *ChildProcess) SpawnError!void {
+pub fn spawn(self: *Child, io: Io) SpawnError!void {
if (!process.can_spawn) {
@compileError("the target operating system cannot spawn processes");
}
if (native_os == .windows) {
- return self.spawnWindows();
+ return self.spawnWindows(io);
} else {
- return self.spawnPosix();
+ return self.spawnPosix(io);
}
}
-pub fn spawnAndWait(self: *ChildProcess) SpawnError!Term {
- try self.spawn();
- return self.wait();
+pub fn spawnAndWait(child: *Child, io: Io) SpawnError!Term {
+ try child.spawn(io);
+ return child.wait(io);
}
/// Forcibly terminates child process and then cleans up all resources.
-pub fn kill(self: *ChildProcess) !Term {
+pub fn kill(self: *Child, io: Io) !Term {
if (native_os == .windows) {
- return self.killWindows(1);
+ return self.killWindows(io, 1);
} else {
- return self.killPosix();
+ return self.killPosix(io);
}
}
-pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
+pub fn killWindows(self: *Child, io: Io, exit_code: windows.UINT) !Term {
if (self.term) |term| {
- self.cleanupStreams();
+ self.cleanupStreams(io);
return term;
}
@@ -303,20 +304,20 @@ pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
},
else => return err,
};
- try self.waitUnwrappedWindows();
+ try self.waitUnwrappedWindows(io);
return self.term.?;
}
-pub fn killPosix(self: *ChildProcess) !Term {
+pub fn killPosix(self: *Child, io: Io) !Term {
if (self.term) |term| {
- self.cleanupStreams();
+ self.cleanupStreams(io);
return term;
}
posix.kill(self.id, posix.SIG.TERM) catch |err| switch (err) {
error.ProcessNotFound => return error.AlreadyTerminated,
else => return err,
};
- self.waitUnwrappedPosix();
+ self.waitUnwrappedPosix(io);
return self.term.?;
}
@@ -324,7 +325,7 @@ pub const WaitError = SpawnError || std.os.windows.GetProcessMemoryInfoError;
/// On some targets, `spawn` may not report all spawn errors, such as `error.InvalidExe`.
/// This function will block until any spawn errors can be reported, and return them.
-pub fn waitForSpawn(self: *ChildProcess) SpawnError!void {
+pub fn waitForSpawn(self: *Child) SpawnError!void {
if (native_os == .windows) return; // `spawn` reports everything
if (self.term) |term| {
_ = term catch |spawn_err| return spawn_err;
@@ -354,15 +355,15 @@ pub fn waitForSpawn(self: *ChildProcess) SpawnError!void {
}
/// Blocks until child process terminates and then cleans up all resources.
-pub fn wait(self: *ChildProcess) WaitError!Term {
+pub fn wait(self: *Child, io: Io) WaitError!Term {
try self.waitForSpawn(); // report spawn errors
if (self.term) |term| {
- self.cleanupStreams();
+ self.cleanupStreams(io);
return term;
}
switch (native_os) {
- .windows => try self.waitUnwrappedWindows(),
- else => self.waitUnwrappedPosix(),
+ .windows => try self.waitUnwrappedWindows(io),
+ else => self.waitUnwrappedPosix(io),
}
self.id = undefined;
return self.term.?;
@@ -380,7 +381,7 @@ pub const RunResult = struct {
///
/// The process must be started with stdout_behavior and stderr_behavior == .Pipe
pub fn collectOutput(
- child: ChildProcess,
+ child: Child,
/// Used for `stdout` and `stderr`.
allocator: Allocator,
stdout: *ArrayList(u8),
@@ -434,11 +435,10 @@ pub const RunError = posix.GetCwdError || posix.ReadError || SpawnError || posix
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
-pub fn run(args: struct {
- allocator: mem.Allocator,
+pub fn run(allocator: Allocator, io: Io, args: struct {
argv: []const []const u8,
cwd: ?[]const u8 = null,
- cwd_dir: ?fs.Dir = null,
+ cwd_dir: ?Io.Dir = null,
/// Required if unable to access the current env map (e.g. building a
/// library on some platforms).
env_map: ?*const EnvMap = null,
@@ -446,7 +446,7 @@ pub fn run(args: struct {
expand_arg0: Arg0Expand = .no_expand,
progress_node: std.Progress.Node = std.Progress.Node.none,
}) RunError!RunResult {
- var child = ChildProcess.init(args.argv, args.allocator);
+ var child = Child.init(args.argv, allocator);
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
@@ -457,24 +457,24 @@ pub fn run(args: struct {
child.progress_node = args.progress_node;
var stdout: ArrayList(u8) = .empty;
- defer stdout.deinit(args.allocator);
+ defer stdout.deinit(allocator);
var stderr: ArrayList(u8) = .empty;
- defer stderr.deinit(args.allocator);
+ defer stderr.deinit(allocator);
- try child.spawn();
+ try child.spawn(io);
errdefer {
- _ = child.kill() catch {};
+ _ = child.kill(io) catch {};
}
- try child.collectOutput(args.allocator, &stdout, &stderr, args.max_output_bytes);
+ try child.collectOutput(allocator, &stdout, &stderr, args.max_output_bytes);
return .{
- .stdout = try stdout.toOwnedSlice(args.allocator),
- .stderr = try stderr.toOwnedSlice(args.allocator),
- .term = try child.wait(),
+ .stdout = try stdout.toOwnedSlice(allocator),
+ .stderr = try stderr.toOwnedSlice(allocator),
+ .term = try child.wait(io),
};
}
-fn waitUnwrappedWindows(self: *ChildProcess) WaitError!void {
+fn waitUnwrappedWindows(self: *Child, io: Io) WaitError!void {
const result = windows.WaitForSingleObjectEx(self.id, windows.INFINITE, false);
self.term = @as(SpawnError!Term, x: {
@@ -492,11 +492,11 @@ fn waitUnwrappedWindows(self: *ChildProcess) WaitError!void {
posix.close(self.id);
posix.close(self.thread_handle);
- self.cleanupStreams();
+ self.cleanupStreams(io);
return result;
}
-fn waitUnwrappedPosix(self: *ChildProcess) void {
+fn waitUnwrappedPosix(self: *Child, io: Io) void {
const res: posix.WaitPidResult = res: {
if (self.request_resource_usage_statistics) {
switch (native_os) {
@@ -527,25 +527,25 @@ fn waitUnwrappedPosix(self: *ChildProcess) void {
break :res posix.waitpid(self.id, 0);
};
const status = res.status;
- self.cleanupStreams();
+ self.cleanupStreams(io);
self.handleWaitResult(status);
}
-fn handleWaitResult(self: *ChildProcess, status: u32) void {
+fn handleWaitResult(self: *Child, status: u32) void {
self.term = statusToTerm(status);
}
-fn cleanupStreams(self: *ChildProcess) void {
+fn cleanupStreams(self: *Child, io: Io) void {
if (self.stdin) |*stdin| {
- stdin.close();
+ stdin.close(io);
self.stdin = null;
}
if (self.stdout) |*stdout| {
- stdout.close();
+ stdout.close(io);
self.stdout = null;
}
if (self.stderr) |*stderr| {
- stderr.close();
+ stderr.close(io);
self.stderr = null;
}
}
@@ -561,7 +561,7 @@ fn statusToTerm(status: u32) Term {
Term{ .Unknown = status };
}
-fn spawnPosix(self: *ChildProcess) SpawnError!void {
+fn spawnPosix(self: *Child, io: Io) SpawnError!void {
// The child process does need to access (one end of) these pipes. However,
// we must initially set CLOEXEC to avoid a race condition. If another thread
// is racing to spawn a different child process, we don't want it to inherit
@@ -596,7 +596,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void {
error.NoSpaceLeft => unreachable,
error.FileTooBig => unreachable,
error.DeviceBusy => unreachable,
- error.FileLocksNotSupported => unreachable,
+ error.FileLocksUnsupported => unreachable,
error.BadPathName => unreachable, // Windows-only
error.WouldBlock => unreachable,
error.NetworkNotFound => unreachable, // Windows-only
@@ -659,7 +659,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void {
})).ptr;
} else {
// TODO come up with a solution for this.
- @panic("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");
+ @panic("missing std lib enhancement: std.process.Child implementation has no way to collect the environment variables to forward to the child process");
}
};
@@ -671,41 +671,41 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void {
const pid_result = try posix.fork();
if (pid_result == 0) {
// we are the child
- setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
- setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
- setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
+ setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err);
+ setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err);
+ setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err);
if (self.cwd_dir) |cwd| {
- posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err);
+ posix.fchdir(cwd.handle) catch |err| forkChildErrReport(io, err_pipe[1], err);
} else if (self.cwd) |cwd| {
- posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err);
+ posix.chdir(cwd) catch |err| forkChildErrReport(io, err_pipe[1], err);
}
// Must happen after fchdir above, the cwd file descriptor might be
// equal to prog_fileno and be clobbered by this dup2 call.
- if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err);
+ if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(io, err_pipe[1], err);
if (self.gid) |gid| {
- posix.setregid(gid, gid) catch |err| forkChildErrReport(err_pipe[1], err);
+ posix.setregid(gid, gid) catch |err| forkChildErrReport(io, err_pipe[1], err);
}
if (self.uid) |uid| {
- posix.setreuid(uid, uid) catch |err| forkChildErrReport(err_pipe[1], err);
+ posix.setreuid(uid, uid) catch |err| forkChildErrReport(io, err_pipe[1], err);
}
if (self.pgid) |pid| {
- posix.setpgid(0, pid) catch |err| forkChildErrReport(err_pipe[1], err);
+ posix.setpgid(0, pid) catch |err| forkChildErrReport(io, err_pipe[1], err);
}
if (self.start_suspended) {
- posix.kill(posix.getpid(), .STOP) catch |err| forkChildErrReport(err_pipe[1], err);
+ posix.kill(posix.getpid(), .STOP) catch |err| forkChildErrReport(io, err_pipe[1], err);
}
const err = switch (self.expand_arg0) {
.expand => posix.execvpeZ_expandArg0(.expand, argv_buf.ptr[0].?, argv_buf.ptr, envp),
.no_expand => posix.execvpeZ_expandArg0(.no_expand, argv_buf.ptr[0].?, argv_buf.ptr, envp),
};
- forkChildErrReport(err_pipe[1], err);
+ forkChildErrReport(io, err_pipe[1], err);
}
// we are the parent
@@ -750,7 +750,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void {
self.progress_node.setIpcFd(prog_pipe[0]);
}
-fn spawnWindows(self: *ChildProcess) SpawnError!void {
+fn spawnWindows(self: *Child, io: Io) SpawnError!void {
var saAttr = windows.SECURITY_ATTRIBUTES{
.nLength = @sizeOf(windows.SECURITY_ATTRIBUTES),
.bInheritHandle = windows.TRUE,
@@ -880,7 +880,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
const app_name_wtf8 = self.argv[0];
const app_name_is_absolute = fs.path.isAbsolute(app_name_wtf8);
- // the cwd set in ChildProcess is in effect when choosing the executable path
+ // the cwd set in Child is in effect when choosing the executable path
// to match posix semantics
var cwd_path_w_needs_free = false;
const cwd_path_w = x: {
@@ -953,7 +953,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
try dir_buf.appendSlice(self.allocator, app_dir);
}
- windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, flags, &siStartInfo, &piProcInfo) catch |no_path_err| {
+ windowsCreateProcessPathExt(self.allocator, io, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, flags, &siStartInfo, &piProcInfo) catch |no_path_err| {
const original_err = switch (no_path_err) {
// argv[0] contains unsupported characters that will never resolve to a valid exe.
error.InvalidArg0 => return error.FileNotFound,
@@ -965,7 +965,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
// If the app name had path separators, that disallows PATH searching,
// and there's no need to search the PATH if the app name is absolute.
// We still search the path if the cwd is absolute because of the
- // "cwd set in ChildProcess is in effect when choosing the executable path
+ // "cwd set in Child is in effect when choosing the executable path
// to match posix semantics" behavior--we don't want to skip searching
// the PATH just because we were trying to set the cwd of the child process.
if (app_dirname_w != null or app_name_is_absolute) {
@@ -977,7 +977,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
dir_buf.clearRetainingCapacity();
try dir_buf.appendSlice(self.allocator, search_path);
- if (windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, flags, &siStartInfo, &piProcInfo)) {
+ if (windowsCreateProcessPathExt(self.allocator, io, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, flags, &siStartInfo, &piProcInfo)) {
break :run;
} else |err| switch (err) {
// argv[0] contains unsupported characters that will never resolve to a valid exe.
@@ -1039,8 +1039,8 @@ fn destroyPipe(pipe: [2]posix.fd_t) void {
// Child of fork calls this to report an error to the fork parent.
// Then the child exits.
-fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
- writeIntFd(fd, @as(ErrInt, @intFromError(err))) catch {};
+fn forkChildErrReport(io: Io, fd: i32, err: Child.SpawnError) noreturn {
+ writeIntFd(io, fd, @as(ErrInt, @intFromError(err))) catch {};
// If we're linking libc, some naughty applications may have registered atexit handlers
// which we really do not want to run in the fork child. I caught LLVM doing this and
// it caused a deadlock instead of doing an exit syscall. In the words of Avril Lavigne,
@@ -1049,12 +1049,12 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
// The _exit(2) function does nothing but make the exit syscall, unlike exit(3)
std.c._exit(1);
}
- posix.exit(1);
+ posix.system.exit(1);
}
-fn writeIntFd(fd: i32, value: ErrInt) !void {
+fn writeIntFd(io: Io, fd: i32, value: ErrInt) !void {
var buffer: [8]u8 = undefined;
- var fw: std.fs.File.Writer = .initStreaming(.{ .handle = fd }, &buffer);
+ var fw: File.Writer = .initStreaming(.{ .handle = fd }, io, &buffer);
fw.interface.writeInt(u64, value, .little) catch unreachable;
fw.interface.flush() catch return error.SystemResources;
}
@@ -1078,7 +1078,8 @@ const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
/// Note: `app_buf` should not contain any leading path separators.
/// Note: If the dir is the cwd, dir_buf should be empty (len = 0).
fn windowsCreateProcessPathExt(
- allocator: mem.Allocator,
+ allocator: Allocator,
+ io: Io,
dir_buf: *ArrayList(u16),
app_buf: *ArrayList(u16),
pathext: [:0]const u16,
@@ -1122,16 +1123,14 @@ fn windowsCreateProcessPathExt(
// Under those conditions, here we will have access to lower level directory
// opening function knowing which implementation we are in. Here, we imitate
// that scenario.
- var threaded: std.Io.Threaded = .init_single_threaded;
- const io = threaded.ioBasic();
-
var dir = dir: {
// needs to be null-terminated
try dir_buf.append(allocator, 0);
defer dir_buf.shrinkRetainingCapacity(dir_path_len);
const dir_path_z = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
const prefixed_path = try windows.wToPrefixedFileW(null, dir_path_z);
- break :dir threaded.dirOpenDirWindows(.cwd(), prefixed_path.span(), .{
+ // TODO eliminate this reference
+ break :dir Io.Threaded.global_single_threaded.dirOpenDirWindows(.cwd(), prefixed_path.span(), .{
.iterate = true,
}) catch return error.FileNotFound;
};
@@ -1525,9 +1524,9 @@ const WindowsCommandLineCache = struct {
script_cmd_line: ?[:0]u16 = null,
cmd_exe_path: ?[:0]u16 = null,
argv: []const []const u8,
- allocator: mem.Allocator,
+ allocator: Allocator,
- fn init(allocator: mem.Allocator, argv: []const []const u8) WindowsCommandLineCache {
+ fn init(allocator: Allocator, argv: []const []const u8) WindowsCommandLineCache {
return .{
.allocator = allocator,
.argv = argv,
@@ -1571,7 +1570,7 @@ const WindowsCommandLineCache = struct {
/// Returns the absolute path of `cmd.exe` within the Windows system directory.
/// The caller owns the returned slice.
-fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 {
+fn windowsCmdExePath(allocator: Allocator) error{ OutOfMemory, Unexpected }![:0]u16 {
var buf = try ArrayList(u16).initCapacity(allocator, 128);
errdefer buf.deinit(allocator);
while (true) {
@@ -1608,7 +1607,7 @@ const ArgvToCommandLineError = error{ OutOfMemory, InvalidWtf8, InvalidArg0 };
///
/// When executing `.bat`/`.cmd` scripts, use `argvToScriptCommandLineWindows` instead.
fn argvToCommandLineWindows(
- allocator: mem.Allocator,
+ allocator: Allocator,
argv: []const []const u8,
) ArgvToCommandLineError![:0]u16 {
var buf = std.array_list.Managed(u8).init(allocator);
@@ -1784,7 +1783,7 @@ const ArgvToScriptCommandLineError = error{
/// Should only be used when spawning `.bat`/`.cmd` scripts, see `argvToCommandLineWindows` otherwise.
/// The `.bat`/`.cmd` file must be known to both have the `.bat`/`.cmd` extension and exist on the filesystem.
fn argvToScriptCommandLineWindows(
- allocator: mem.Allocator,
+ allocator: Allocator,
/// Path to the `.bat`/`.cmd` script. If this path is relative, it is assumed to be relative to the CWD.
/// The script must have been verified to exist at this path before calling this function.
script_path: []const u16,
diff --git a/lib/std/start.zig b/lib/std/start.zig
index a5bec41231..c6a9f06724 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -110,7 +110,7 @@ fn main2() callconv(.c) c_int {
}
fn _start2() callconv(.withStackAlign(.c, 1)) noreturn {
- std.posix.exit(callMain());
+ std.process.exit(callMain());
}
fn spirvMain2() callconv(.kernel) void {
@@ -118,7 +118,7 @@ fn spirvMain2() callconv(.kernel) void {
}
fn wWinMainCRTStartup2() callconv(.c) noreturn {
- std.posix.exit(callMain());
+ std.process.exit(callMain());
}
////////////////////////////////////////////////////////////////////////////////
@@ -627,7 +627,7 @@ fn posixCallMainAndExit(argc_argv_ptr: [*]usize) callconv(.c) noreturn {
for (slice) |func| func();
}
- std.posix.exit(callMainWithArgs(argc, argv, envp));
+ std.process.exit(callMainWithArgs(argc, argv, envp));
}
fn expandStackSize(phdrs: []elf.Phdr) void {
@@ -669,6 +669,11 @@ inline fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
std.os.argv = argv[0..argc];
std.os.environ = envp;
+ if (std.Options.debug_threaded_io) |t| {
+ if (@sizeOf(std.Io.Threaded.Argv0) != 0) t.argv0.value = argv[0];
+ t.environ = .{ .block = envp };
+ }
+
std.debug.maybeEnableSegfaultHandler();
return callMain();
@@ -691,6 +696,11 @@ fn main(c_argc: c_int, c_argv: [*][*:0]c_char, c_envp: [*:null]?[*:0]c_char) cal
fn mainWithoutEnv(c_argc: c_int, c_argv: [*][*:0]c_char) callconv(.c) c_int {
std.os.argv = @as([*][*:0]u8, @ptrCast(c_argv))[0..@intCast(c_argc)];
+
+ if (@sizeOf(std.Io.Threaded.Argv0) != 0) {
+ if (std.Options.debug_threaded_io) |t| t.argv0.value = std.os.argv[0];
+ }
+
return callMain();
}
diff --git a/lib/std/std.zig b/lib/std/std.zig
index 5c500d3f55..6ec39306ea 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -108,14 +108,14 @@ pub const start = @import("start.zig");
const root = @import("root");
-/// Stdlib-wide options that can be overridden by the root file.
+/// Compile-time known settings overridable by the root source file.
pub const options: Options = if (@hasDecl(root, "std_options")) root.std_options else .{};
pub const Options = struct {
enable_segfault_handler: bool = debug.default_enable_segfault_handler,
- /// Function used to implement `std.fs.cwd` for WASI.
- wasiCwd: fn () os.wasi.fd_t = fs.defaultWasiCwd,
+ /// Function used to implement `std.Io.Dir.cwd` for WASI.
+ wasiCwd: fn () os.wasi.fd_t = os.defaultWasiCwd,
/// The current log level.
log_level: log.Level = log.default_level,
@@ -129,6 +129,8 @@ pub const Options = struct {
args: anytype,
) void = log.defaultLog,
+ logTerminalMode: fn () Io.Terminal.Mode = log.defaultTerminalMode,
+
/// Overrides `std.heap.page_size_min`.
page_size_min: ?usize = null,
/// Overrides `std.heap.page_size_max`.
@@ -173,6 +175,24 @@ pub const Options = struct {
/// If this is `false`, then captured stack traces will always be empty, and attempts to write
/// stack traces will just print an error to the relevant `Io.Writer` and return.
allow_stack_tracing: bool = !@import("builtin").strip_debug_info,
+
+ pub const debug_threaded_io: ?*Io.Threaded = if (@hasDecl(root, "std_options_debug_threaded_io"))
+ root.std_options_debug_threaded_io
+ else
+ Io.Threaded.global_single_threaded;
+ /// The `Io` instance that `std.debug` uses for `std.debug.print`,
+ /// capturing stack traces, loading debug info, finding the executable's
+ /// own path, and environment variables that affect terminal mode
+ /// detection. The default is to use statically initialized singleton that
+ /// is independent from the application's `Io` instance in order to make
+ /// debugging more straightforward. For example, while debugging an `Io`
+ /// implementation based on coroutines, one likely wants `std.debug.print`
+ /// to directly write to stderr without trying to interact with the code
+ /// being debugged.
+ pub const debug_io: Io = if (@hasDecl(root, "std_options_debug_io")) root.std_options_debug_io else debug_threaded_io.?.ioBasic();
+
+ /// Overrides `std.Io.File.Permissions`.
+ pub const FilePermissions: ?type = if (@hasDecl(root, "std_options_FilePermissions")) root.std_options_FilePermissions else null;
};
// This forces the start.zig file to be imported, and the comptime logic inside that
diff --git a/lib/std/tar.zig b/lib/std/tar.zig
index bf96aed35c..024a425919 100644
--- a/lib/std/tar.zig
+++ b/lib/std/tar.zig
@@ -16,6 +16,7 @@
//! pax reference: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const testing = std.testing;
@@ -302,7 +303,7 @@ pub const FileKind = enum {
/// Iterator over entries in the tar file represented by reader.
pub const Iterator = struct {
- reader: *std.Io.Reader,
+ reader: *Io.Reader,
diagnostics: ?*Diagnostics = null,
// buffers for heeader and file attributes
@@ -328,7 +329,7 @@ pub const Iterator = struct {
/// Iterates over files in tar archive.
/// `next` returns each file in tar archive.
- pub fn init(reader: *std.Io.Reader, options: Options) Iterator {
+ pub fn init(reader: *Io.Reader, options: Options) Iterator {
return .{
.reader = reader,
.diagnostics = options.diagnostics,
@@ -473,7 +474,7 @@ pub const Iterator = struct {
return null;
}
- pub fn streamRemaining(it: *Iterator, file: File, w: *std.Io.Writer) std.Io.Reader.StreamError!void {
+ pub fn streamRemaining(it: *Iterator, file: File, w: *Io.Writer) Io.Reader.StreamError!void {
try it.reader.streamExact64(w, file.size);
it.unread_file_bytes = 0;
}
@@ -499,14 +500,14 @@ const pax_max_size_attr_len = 64;
pub const PaxIterator = struct {
size: usize, // cumulative size of all pax attributes
- reader: *std.Io.Reader,
+ reader: *Io.Reader,
const Self = @This();
const Attribute = struct {
kind: PaxAttributeKind,
len: usize, // length of the attribute value
- reader: *std.Io.Reader, // reader positioned at value start
+ reader: *Io.Reader, // reader positioned at value start
// Copies pax attribute value into destination buffer.
// Must be called with destination buffer of size at least Attribute.len.
@@ -573,13 +574,13 @@ pub const PaxIterator = struct {
}
// Checks that each record ends with new line.
- fn validateAttributeEnding(reader: *std.Io.Reader) !void {
+ fn validateAttributeEnding(reader: *Io.Reader) !void {
if (try reader.takeByte() != '\n') return error.PaxInvalidAttributeEnd;
}
};
/// Saves tar file content to the file systems.
-pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOptions) !void {
+pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOptions) !void {
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var file_contents_buffer: [1024]u8 = undefined;
@@ -605,13 +606,13 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOp
switch (file.kind) {
.directory => {
if (file_name.len > 0 and !options.exclude_empty_directories) {
- try dir.makePath(file_name);
+ try dir.createDirPath(io, file_name);
}
},
.file => {
- if (createDirAndFile(dir, file_name, fileMode(file.mode, options))) |fs_file| {
- defer fs_file.close();
- var file_writer = fs_file.writer(&file_contents_buffer);
+ if (createDirAndFile(io, dir, file_name, filePermissions(file.mode, options))) |fs_file| {
+ defer fs_file.close(io);
+ var file_writer = fs_file.writer(io, &file_contents_buffer);
try it.streamRemaining(file, &file_writer.interface);
try file_writer.interface.flush();
} else |err| {
@@ -624,7 +625,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOp
},
.sym_link => {
const link_name = file.link_name;
- createDirAndSymlink(dir, link_name, file_name) catch |err| {
+ createDirAndSymlink(io, dir, link_name, file_name) catch |err| {
const d = options.diagnostics orelse return error.UnableToCreateSymLink;
try d.errors.append(d.allocator, .{ .unable_to_create_sym_link = .{
.code = err,
@@ -637,12 +638,12 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOp
}
}
-fn createDirAndFile(dir: std.fs.Dir, file_name: []const u8, mode: std.fs.File.Mode) !std.fs.File {
- const fs_file = dir.createFile(file_name, .{ .exclusive = true, .mode = mode }) catch |err| {
+fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, permissions: Io.File.Permissions) !Io.File {
+ const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions }) catch |err| {
if (err == error.FileNotFound) {
if (std.fs.path.dirname(file_name)) |dir_name| {
- try dir.makePath(dir_name);
- return try dir.createFile(file_name, .{ .exclusive = true, .mode = mode });
+ try dir.createDirPath(io, dir_name);
+ return try dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions });
}
}
return err;
@@ -651,12 +652,12 @@ fn createDirAndFile(dir: std.fs.Dir, file_name: []const u8, mode: std.fs.File.Mo
}
// Creates a symbolic link at path `file_name` which points to `link_name`.
-fn createDirAndSymlink(dir: std.fs.Dir, link_name: []const u8, file_name: []const u8) !void {
- dir.symLink(link_name, file_name, .{}) catch |err| {
+fn createDirAndSymlink(io: Io, dir: Io.Dir, link_name: []const u8, file_name: []const u8) !void {
+ dir.symLink(io, link_name, file_name, .{}) catch |err| {
if (err == error.FileNotFound) {
if (std.fs.path.dirname(file_name)) |dir_name| {
- try dir.makePath(dir_name);
- return try dir.symLink(link_name, file_name, .{});
+ try dir.createDirPath(io, dir_name);
+ return try dir.symLink(io, link_name, file_name, .{});
}
}
return err;
@@ -783,7 +784,7 @@ test PaxIterator {
var buffer: [1024]u8 = undefined;
outer: for (cases) |case| {
- var reader: std.Io.Reader = .fixed(case.data);
+ var reader: Io.Reader = .fixed(case.data);
var it: PaxIterator = .{
.size = case.data.len,
.reader = &reader,
@@ -874,25 +875,27 @@ test "header parse mode" {
}
test "create file and symlink" {
+ const io = testing.io;
+
var root = testing.tmpDir(.{});
defer root.cleanup();
- var file = try createDirAndFile(root.dir, "file1", default_mode);
- file.close();
- file = try createDirAndFile(root.dir, "a/b/c/file2", default_mode);
- file.close();
+ var file = try createDirAndFile(io, root.dir, "file1", .default_file);
+ file.close(io);
+ file = try createDirAndFile(io, root.dir, "a/b/c/file2", .default_file);
+ file.close(io);
- createDirAndSymlink(root.dir, "a/b/c/file2", "symlink1") catch |err| {
+ createDirAndSymlink(io, root.dir, "a/b/c/file2", "symlink1") catch |err| {
// On Windows when developer mode is not enabled
if (err == error.AccessDenied) return error.SkipZigTest;
return err;
};
- try createDirAndSymlink(root.dir, "../../../file1", "d/e/f/symlink2");
+ try createDirAndSymlink(io, root.dir, "../../../file1", "d/e/f/symlink2");
// Danglink symlnik, file created later
- try createDirAndSymlink(root.dir, "../../../g/h/i/file4", "j/k/l/symlink3");
- file = try createDirAndFile(root.dir, "g/h/i/file4", default_mode);
- file.close();
+ try createDirAndSymlink(io, root.dir, "../../../g/h/i/file4", "j/k/l/symlink3");
+ file = try createDirAndFile(io, root.dir, "g/h/i/file4", .default_file);
+ file.close(io);
}
test Iterator {
@@ -916,7 +919,7 @@ test Iterator {
// example/empty/
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
// User provided buffers to the iterator
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
@@ -942,7 +945,7 @@ test Iterator {
.file => {
try testing.expectEqualStrings("example/a/file", file.name);
var buf: [16]u8 = undefined;
- var w: std.Io.Writer = .fixed(&buf);
+ var w: Io.Writer = .fixed(&buf);
try it.streamRemaining(file, &w);
try testing.expectEqualStrings("content\n", w.buffered());
},
@@ -955,6 +958,7 @@ test Iterator {
}
test pipeToFileSystem {
+ const io = testing.io;
// Example tar file is created from this tree structure:
// $ tree example
// example
@@ -975,14 +979,14 @@ test pipeToFileSystem {
// example/empty/
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
const dir = tmp.dir;
// Save tar from reader to the file system `dir`
- pipeToFileSystem(dir, &reader, .{
+ pipeToFileSystem(io, dir, &reader, .{
.mode_mode = .ignore,
.strip_components = 1,
.exclude_empty_directories = true,
@@ -992,21 +996,22 @@ test pipeToFileSystem {
return err;
};
- try testing.expectError(error.FileNotFound, dir.statFile("empty"));
- try testing.expect((try dir.statFile("a/file")).kind == .file);
- try testing.expect((try dir.statFile("b/symlink")).kind == .file); // statFile follows symlink
+ try testing.expectError(error.FileNotFound, dir.statFile(io, "empty", .{}));
+ try testing.expect((try dir.statFile(io, "a/file", .{})).kind == .file);
+ try testing.expect((try dir.statFile(io, "b/symlink", .{})).kind == .file); // statFile follows symlink
var buf: [32]u8 = undefined;
try testing.expectEqualSlices(
u8,
"../a/file",
- normalizePath(try dir.readLink("b/symlink", &buf)),
+ normalizePath(buf[0..try dir.readLink(io, "b/symlink", &buf)]),
);
}
test "pipeToFileSystem root_dir" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
// with strip_components = 1
{
@@ -1015,7 +1020,7 @@ test "pipeToFileSystem root_dir" {
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 1,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1037,7 +1042,7 @@ test "pipeToFileSystem root_dir" {
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 0,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1053,43 +1058,46 @@ test "pipeToFileSystem root_dir" {
}
test "findRoot with single file archive" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/22752.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try pipeToFileSystem(tmp.dir, &reader, .{ .diagnostics = &diagnostics });
+ try pipeToFileSystem(io, tmp.dir, &reader, .{ .diagnostics = &diagnostics });
try testing.expectEqualStrings("", diagnostics.root_dir);
}
test "findRoot without explicit root dir" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/19820.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try pipeToFileSystem(tmp.dir, &reader, .{ .diagnostics = &diagnostics });
+ try pipeToFileSystem(io, tmp.dir, &reader, .{ .diagnostics = &diagnostics });
try testing.expectEqualStrings("root", diagnostics.root_dir);
}
test "pipeToFileSystem strip_components" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 3,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1110,45 +1118,36 @@ fn normalizePath(bytes: []u8) []u8 {
return bytes;
}
-const default_mode = std.fs.File.default_mode;
-
// File system mode based on tar header mode and mode_mode options.
-fn fileMode(mode: u32, options: PipeOptions) std.fs.File.Mode {
- if (!std.fs.has_executable_bit or options.mode_mode == .ignore)
- return default_mode;
-
- const S = std.posix.S;
-
- // The mode from the tar file is inspected for the owner executable bit.
- if (mode & S.IXUSR == 0)
- return default_mode;
-
- // This bit is copied to the group and other executable bits.
- // Other bits of the mode are left as the default when creating files.
- return default_mode | S.IXUSR | S.IXGRP | S.IXOTH;
+fn filePermissions(mode: u32, options: PipeOptions) Io.File.Permissions {
+ return if (!Io.File.Permissions.has_executable_bit or options.mode_mode == .ignore or (mode & 0o100) == 0)
+ .default_file
+ else
+ .executable_file;
}
-test fileMode {
- if (!std.fs.has_executable_bit) return error.SkipZigTest;
- try testing.expectEqual(default_mode, fileMode(0o744, PipeOptions{ .mode_mode = .ignore }));
- try testing.expectEqual(0o777, fileMode(0o744, PipeOptions{}));
- try testing.expectEqual(0o666, fileMode(0o644, PipeOptions{}));
- try testing.expectEqual(0o666, fileMode(0o655, PipeOptions{}));
+test filePermissions {
+ if (!Io.File.Permissions.has_executable_bit) return error.SkipZigTest;
+ try testing.expectEqual(.default_file, filePermissions(0o744, .{ .mode_mode = .ignore }));
+ try testing.expectEqual(.executable_file, filePermissions(0o744, .{}));
+ try testing.expectEqual(.default_file, filePermissions(0o644, .{}));
+ try testing.expectEqual(.default_file, filePermissions(0o655, .{}));
}
test "executable bit" {
- if (!std.fs.has_executable_bit) return error.SkipZigTest;
+ if (!Io.File.Permissions.has_executable_bit) return error.SkipZigTest;
+ const io = testing.io;
const S = std.posix.S;
const data = @embedFile("tar/testdata/example.tar");
for ([_]PipeOptions.ModeMode{ .ignore, .executable_bit_only }) |opt| {
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
//defer tmp.cleanup();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 1,
.exclude_empty_directories = true,
.mode_mode = opt,
@@ -1158,19 +1157,21 @@ test "executable bit" {
return err;
};
- const fs = try tmp.dir.statFile("a/file");
+ const fs = try tmp.dir.statFile(io, "a/file", .{});
try testing.expect(fs.kind == .file);
+ const mode = fs.permissions.toMode();
+
if (opt == .executable_bit_only) {
// Executable bit is set for user, group and others
- try testing.expect(fs.mode & S.IXUSR > 0);
- try testing.expect(fs.mode & S.IXGRP > 0);
- try testing.expect(fs.mode & S.IXOTH > 0);
+ try testing.expect(mode & S.IXUSR > 0);
+ try testing.expect(mode & S.IXGRP > 0);
+ try testing.expect(mode & S.IXOTH > 0);
}
if (opt == .ignore) {
- try testing.expect(fs.mode & S.IXUSR == 0);
- try testing.expect(fs.mode & S.IXGRP == 0);
- try testing.expect(fs.mode & S.IXOTH == 0);
+ try testing.expect(mode & S.IXUSR == 0);
+ try testing.expect(mode & S.IXGRP == 0);
+ try testing.expect(mode & S.IXOTH == 0);
}
}
}
diff --git a/lib/std/tar/test.zig b/lib/std/tar/test.zig
index 780e2b844c..61aeb03519 100644
--- a/lib/std/tar/test.zig
+++ b/lib/std/tar/test.zig
@@ -424,6 +424,7 @@ test "insufficient buffer in Header name filed" {
}
test "should not overwrite existing file" {
+ const io = testing.io;
// Starting from this folder structure:
// $ tree root
// root
@@ -469,17 +470,18 @@ test "should not overwrite existing file" {
defer root.cleanup();
try testing.expectError(
error.PathAlreadyExists,
- tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }),
+ tar.pipeToFileSystem(io, root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }),
);
// Unpack with strip_components = 0 should pass
r = .fixed(data);
var root2 = std.testing.tmpDir(.{});
defer root2.cleanup();
- try tar.pipeToFileSystem(root2.dir, &r, .{ .mode_mode = .ignore, .strip_components = 0 });
+ try tar.pipeToFileSystem(io, root2.dir, &r, .{ .mode_mode = .ignore, .strip_components = 0 });
}
test "case sensitivity" {
+ const io = testing.io;
// Mimicking issue #18089, this tar contains, same file name in two case
// sensitive name version. Should fail on case insensitive file systems.
//
@@ -495,13 +497,13 @@ test "case sensitivity" {
var root = std.testing.tmpDir(.{});
defer root.cleanup();
- tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
+ tar.pipeToFileSystem(io, root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
// on case insensitive fs we fail on overwrite existing file
try testing.expectEqual(error.PathAlreadyExists, err);
return;
};
// on case sensitive os both files are created
- try testing.expect((try root.dir.statFile("alacritty/darkermatrix.yml")).kind == .file);
- try testing.expect((try root.dir.statFile("alacritty/Darkermatrix.yml")).kind == .file);
+ try testing.expect((try root.dir.statFile(io, "alacritty/darkermatrix.yml", .{})).kind == .file);
+ try testing.expect((try root.dir.statFile(io, "alacritty/Darkermatrix.yml", .{})).kind == .file);
}
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 186cafad59..9bb2622c3d 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -1,5 +1,7 @@
-const std = @import("std.zig");
const builtin = @import("builtin");
+
+const std = @import("std.zig");
+const Io = std.Io;
const assert = std.debug.assert;
const math = std.math;
@@ -28,8 +30,8 @@ pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{
break :b .init;
};
-pub var io_instance: std.Io.Threaded = undefined;
-pub const io = io_instance.io();
+pub var io_instance: Io.Threaded = undefined;
+pub const io = if (builtin.is_test) io_instance.io() else @compileError("not testing");
/// TODO https://github.com/ziglang/zig/issues/5738
pub var log_level = std.log.Level.warn;
@@ -352,11 +354,10 @@ test expectApproxEqRel {
}
}
-/// This function is intended to be used only in tests. When the two slices are not
-/// equal, prints diagnostics to stderr to show exactly how they are not equal (with
-/// the differences highlighted in red), then returns a test failure error.
-/// The colorized output is optional and controlled by the return of `std.Io.tty.Config.detect`.
-/// If your inputs are UTF-8 encoded strings, consider calling `expectEqualStrings` instead.
+/// This function is intended to be used only in tests. When the two slices are
+/// not equal, prints diagnostics to stderr to show exactly how they are not
+/// equal (with the differences highlighted in red), then returns a test
+/// failure error.
pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const T) !void {
const diff_index: usize = diff_index: {
const shortest = @min(expected.len, actual.len);
@@ -367,9 +368,11 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
break :diff_index if (expected.len == actual.len) return else shortest;
};
if (!backend_can_print) return error.TestExpectedEqual;
- const stderr_w, const ttyconf = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- failEqualSlices(T, expected, actual, diff_index, stderr_w, ttyconf) catch {};
+ // Intentionally using the debug Io instance rather than the testing Io instance.
+ const stderr = std.debug.lockStderr(&.{});
+ defer std.debug.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ failEqualSlices(T, expected, actual, diff_index, w, stderr.terminal_mode) catch {};
return error.TestExpectedEqual;
}
@@ -378,8 +381,8 @@ fn failEqualSlices(
expected: []const T,
actual: []const T,
diff_index: usize,
- w: *std.Io.Writer,
- ttyconf: std.Io.tty.Config,
+ w: *Io.Writer,
+ terminal_mode: Io.Terminal.Mode,
) !void {
try w.print("slices differ. first difference occurs at index {d} (0x{X})\n", .{ diff_index, diff_index });
@@ -402,12 +405,12 @@ fn failEqualSlices(
var differ = if (T == u8) BytesDiffer{
.expected = expected_window,
.actual = actual_window,
- .ttyconf = ttyconf,
+ .terminal_mode = terminal_mode,
} else SliceDiffer(T){
.start_index = window_start,
.expected = expected_window,
.actual = actual_window,
- .ttyconf = ttyconf,
+ .terminal_mode = terminal_mode,
};
// Print indexes as hex for slices of u8 since it's more likely to be binary data where
@@ -464,21 +467,22 @@ fn SliceDiffer(comptime T: type) type {
start_index: usize,
expected: []const T,
actual: []const T,
- ttyconf: std.Io.tty.Config,
+ terminal_mode: Io.Terminal.Mode,
const Self = @This();
- pub fn write(self: Self, writer: *std.Io.Writer) !void {
+ pub fn write(self: Self, writer: *Io.Writer) !void {
+ const t: Io.Terminal = .{ .writer = writer, .mode = self.terminal_mode };
for (self.expected, 0..) |value, i| {
const full_index = self.start_index + i;
const diff = if (i < self.actual.len) !std.meta.eql(self.actual[i], value) else true;
- if (diff) try self.ttyconf.setColor(writer, .red);
+ if (diff) try t.setColor(.red);
if (@typeInfo(T) == .pointer) {
try writer.print("[{}]{*}: {any}\n", .{ full_index, value, value });
} else {
try writer.print("[{}]: {any}\n", .{ full_index, value });
}
- if (diff) try self.ttyconf.setColor(writer, .reset);
+ if (diff) try t.setColor(.reset);
}
}
};
@@ -487,9 +491,9 @@ fn SliceDiffer(comptime T: type) type {
const BytesDiffer = struct {
expected: []const u8,
actual: []const u8,
- ttyconf: std.Io.tty.Config,
+ terminal_mode: Io.Terminal.Mode,
- pub fn write(self: BytesDiffer, writer: *std.Io.Writer) !void {
+ pub fn write(self: BytesDiffer, writer: *Io.Writer) !void {
var expected_iterator = std.mem.window(u8, self.expected, 16, 16);
var row: usize = 0;
while (expected_iterator.next()) |chunk| {
@@ -514,7 +518,7 @@ const BytesDiffer = struct {
try self.writeDiff(writer, "{c}", .{byte}, diff);
} else {
// TODO: remove this `if` when https://github.com/ziglang/zig/issues/7600 is fixed
- if (self.ttyconf == .windows_api) {
+ if (self.terminal_mode == .windows_api) {
try self.writeDiff(writer, ".", .{}, diff);
continue;
}
@@ -535,10 +539,14 @@ const BytesDiffer = struct {
}
}
- fn writeDiff(self: BytesDiffer, writer: *std.Io.Writer, comptime fmt: []const u8, args: anytype, diff: bool) !void {
- if (diff) try self.ttyconf.setColor(writer, .red);
+ fn terminal(self: *const BytesDiffer, writer: *Io.Writer) Io.Terminal {
+ return .{ .writer = writer, .mode = self.terminal_mode };
+ }
+
+ fn writeDiff(self: BytesDiffer, writer: *Io.Writer, comptime fmt: []const u8, args: anytype, diff: bool) !void {
+ if (diff) try self.terminal(writer).setColor(.red);
try writer.print(fmt, args);
- if (diff) try self.ttyconf.setColor(writer, .reset);
+ if (diff) try self.terminal(writer).setColor(.reset);
}
};
@@ -605,34 +613,35 @@ pub fn expect(ok: bool) !void {
}
pub const TmpDir = struct {
- dir: std.fs.Dir,
- parent_dir: std.fs.Dir,
+ dir: Io.Dir,
+ parent_dir: Io.Dir,
sub_path: [sub_path_len]u8,
const random_bytes_count = 12;
const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
pub fn cleanup(self: *TmpDir) void {
- self.dir.close();
- self.parent_dir.deleteTree(&self.sub_path) catch {};
- self.parent_dir.close();
+ self.dir.close(io);
+ self.parent_dir.deleteTree(io, &self.sub_path) catch {};
+ self.parent_dir.close(io);
self.* = undefined;
}
};
-pub fn tmpDir(opts: std.fs.Dir.OpenOptions) TmpDir {
+pub fn tmpDir(opts: Io.Dir.OpenOptions) TmpDir {
+ comptime assert(builtin.is_test);
var random_bytes: [TmpDir.random_bytes_count]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
var sub_path: [TmpDir.sub_path_len]u8 = undefined;
_ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
- const cwd = std.fs.cwd();
- var cache_dir = cwd.makeOpenPath(".zig-cache", .{}) catch
+ const cwd = Io.Dir.cwd();
+ var cache_dir = cwd.createDirPathOpen(io, ".zig-cache", .{}) catch
@panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir");
- defer cache_dir.close();
- const parent_dir = cache_dir.makeOpenPath("tmp", .{}) catch
+ defer cache_dir.close(io);
+ const parent_dir = cache_dir.createDirPathOpen(io, "tmp", .{}) catch
@panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir");
- const dir = parent_dir.makeOpenPath(&sub_path, opts) catch
+ const dir = parent_dir.createDirPathOpen(io, &sub_path, .{ .open_options = opts }) catch
@panic("unable to make tmp dir for testing: unable to make and open the tmp dir");
return .{
@@ -929,7 +938,7 @@ test "expectEqualDeep primitive type" {
a,
b,
- pub fn format(self: @This(), writer: *std.Io.Writer) !void {
+ pub fn format(self: @This(), writer: *Io.Writer) !void {
try writer.writeAll(@tagName(self));
}
};
@@ -1146,9 +1155,10 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
break :x failing_allocator_inst.alloc_index;
};
- var fail_index: usize = 0;
- while (fail_index < needed_alloc_count) : (fail_index += 1) {
- var failing_allocator_inst = std.testing.FailingAllocator.init(backing_allocator, .{ .fail_index = fail_index });
+ for (0..needed_alloc_count) |fail_index| {
+ var failing_allocator_inst = std.testing.FailingAllocator.init(backing_allocator, .{
+ .fail_index = fail_index,
+ });
args.@"0" = failing_allocator_inst.allocator();
if (@call(.auto, test_fn, args)) |_| {
@@ -1160,7 +1170,6 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
} else |err| switch (err) {
error.OutOfMemory => {
if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) {
- const tty_config: std.Io.tty.Config = .detect(.stderr());
print(
"\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\nallocation that was made to fail: {f}",
.{
@@ -1172,7 +1181,6 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
failing_allocator_inst.deallocations,
std.debug.FormatStackTrace{
.stack_trace = failing_allocator_inst.getStackTrace(),
- .tty_config = tty_config,
},
},
);
@@ -1220,14 +1228,14 @@ pub inline fn fuzz(
return @import("root").fuzz(context, testOne, options);
}
-/// A `std.Io.Reader` that writes a predetermined list of buffers during `stream`.
+/// A `Io.Reader` that writes a predetermined list of buffers during `stream`.
pub const Reader = struct {
calls: []const Call,
- interface: std.Io.Reader,
+ interface: Io.Reader,
next_call_index: usize,
next_offset: usize,
/// Further reduces how many bytes are written in each `stream` call.
- artificial_limit: std.Io.Limit = .unlimited,
+ artificial_limit: Io.Limit = .unlimited,
pub const Call = struct {
buffer: []const u8,
@@ -1247,7 +1255,7 @@ pub const Reader = struct {
};
}
- fn stream(io_r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
+ fn stream(io_r: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize {
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
if (r.calls.len - r.next_call_index == 0) return error.EndOfStream;
const call = r.calls[r.next_call_index];
@@ -1262,13 +1270,13 @@ pub const Reader = struct {
}
};
-/// A `std.Io.Reader` that gets its data from another `std.Io.Reader`, and always
+/// A `Io.Reader` that gets its data from another `Io.Reader`, and always
/// writes to its own buffer (and returns 0) during `stream` and `readVec`.
pub const ReaderIndirect = struct {
- in: *std.Io.Reader,
- interface: std.Io.Reader,
+ in: *Io.Reader,
+ interface: Io.Reader,
- pub fn init(in: *std.Io.Reader, buffer: []u8) ReaderIndirect {
+ pub fn init(in: *Io.Reader, buffer: []u8) ReaderIndirect {
return .{
.in = in,
.interface = .{
@@ -1283,17 +1291,17 @@ pub const ReaderIndirect = struct {
};
}
- fn readVec(r: *std.Io.Reader, _: [][]u8) std.Io.Reader.Error!usize {
+ fn readVec(r: *Io.Reader, _: [][]u8) Io.Reader.Error!usize {
try streamInner(r);
return 0;
}
- fn stream(r: *std.Io.Reader, _: *std.Io.Writer, _: std.Io.Limit) std.Io.Reader.StreamError!usize {
+ fn stream(r: *Io.Reader, _: *Io.Writer, _: Io.Limit) Io.Reader.StreamError!usize {
try streamInner(r);
return 0;
}
- fn streamInner(r: *std.Io.Reader) std.Io.Reader.Error!void {
+ fn streamInner(r: *Io.Reader) Io.Reader.Error!void {
const r_indirect: *ReaderIndirect = @alignCast(@fieldParentPtr("interface", r));
// If there's no room remaining in the buffer at all, make room.
@@ -1301,12 +1309,12 @@ pub const ReaderIndirect = struct {
try r.rebase(r.buffer.len);
}
- var writer: std.Io.Writer = .{
+ var writer: Io.Writer = .{
.buffer = r.buffer,
.end = r.end,
.vtable = &.{
- .drain = std.Io.Writer.unreachableDrain,
- .rebase = std.Io.Writer.unreachableRebase,
+ .drain = Io.Writer.unreachableDrain,
+ .rebase = Io.Writer.unreachableRebase,
},
};
defer r.end = writer.end;
diff --git a/lib/std/unicode/throughput_test.zig b/lib/std/unicode/throughput_test.zig
index fd3f46ec58..c02f550a4a 100644
--- a/lib/std/unicode/throughput_test.zig
+++ b/lib/std/unicode/throughput_test.zig
@@ -1,8 +1,8 @@
const std = @import("std");
+const Io = std.Io;
const time = std.time;
const unicode = std.unicode;
-
-const Timer = time.Timer;
+const Timer = std.time.Timer;
const N = 1_000_000;
@@ -41,7 +41,7 @@ fn benchmarkCodepointCount(buf: []const u8) !ResultCount {
pub fn main() !void {
// Size of buffer is about size of printed message.
var stdout_buffer: [0x100]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_writer.interface;
try stdout.print("short ASCII strings\n", .{});
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index c8a0dcde3b..6212264005 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -46,23 +46,16 @@ pub const SrcHasher = std.crypto.hash.Blake3;
pub const SrcHash = [16]u8;
pub const Color = enum {
- /// Determine whether stderr is a terminal or not automatically.
+ /// Auto-detect whether stream supports terminal colors.
auto,
- /// Assume stderr is not a terminal.
+ /// Force-enable colors.
off,
- /// Assume stderr is a terminal.
+ /// Suppress colors.
on,
- pub fn getTtyConf(color: Color, detected: Io.tty.Config) Io.tty.Config {
+ pub fn terminalMode(color: Color) ?Io.Terminal.Mode {
return switch (color) {
- .auto => detected,
- .on => .escape_codes,
- .off => .no_color,
- };
- }
- pub fn detectTtyConf(color: Color) Io.tty.Config {
- return switch (color) {
- .auto => .detect(.stderr()),
+ .auto => null,
.on => .escape_codes,
.off => .no_color,
};
@@ -639,7 +632,7 @@ pub fn readSourceFileToEndAlloc(gpa: Allocator, file_reader: *Io.File.Reader) ![
return buffer.toOwnedSliceSentinel(gpa, 0);
}
-pub fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color: Color) !void {
+pub fn printAstErrorsToStderr(gpa: Allocator, io: Io, tree: Ast, path: []const u8, color: Color) !void {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
try wip_errors.init(gpa);
defer wip_errors.deinit();
@@ -648,7 +641,7 @@ pub fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, color);
+ return error_bundle.renderToStderr(io, .{}, color);
}
pub fn putAstErrorsIntoBundle(
diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig
index ef6203cbe8..c5275729ae 100644
--- a/lib/std/zig/ErrorBundle.zig
+++ b/lib/std/zig/ErrorBundle.zig
@@ -162,45 +162,57 @@ pub const RenderOptions = struct {
include_log_text: bool = true,
};
-pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions, color: std.zig.Color) void {
+pub const RenderToStderrError = Io.Cancelable || Io.File.Writer.Error;
+
+pub fn renderToStderr(eb: ErrorBundle, io: Io, options: RenderOptions, color: std.zig.Color) RenderToStderrError!void {
var buffer: [256]u8 = undefined;
- const w, const ttyconf = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
- renderToWriter(eb, options, w, color.getTtyConf(ttyconf)) catch return;
+ const stderr = try io.lockStderr(&buffer, color.terminalMode());
+ defer io.unlockStderr();
+ renderToTerminal(eb, options, stderr.terminal()) catch |err| switch (err) {
+ error.WriteFailed => return stderr.file_writer.err.?,
+ else => |e| return e,
+ };
+}
+
+pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, w: *Writer) Writer.Error!void {
+ return renderToTerminal(eb, options, .{ .writer = w, .mode = .no_color }) catch |err| switch (err) {
+ error.WriteFailed => |e| return e,
+ else => unreachable,
+ };
}
-pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, w: *Writer, ttyconf: Io.tty.Config) (Writer.Error || std.posix.UnexpectedError)!void {
+pub fn renderToTerminal(eb: ErrorBundle, options: RenderOptions, t: Io.Terminal) Io.Terminal.SetColorError!void {
if (eb.extra.len == 0) return;
for (eb.getMessages()) |err_msg| {
- try renderErrorMessageToWriter(eb, options, err_msg, w, ttyconf, "error", .red, 0);
+ try renderErrorMessage(eb, options, err_msg, t, "error", .red, 0);
}
if (options.include_log_text) {
const log_text = eb.getCompileLogOutput();
if (log_text.len != 0) {
- try w.writeAll("\nCompile Log Output:\n");
- try w.writeAll(log_text);
+ try t.writer.writeAll("\nCompile Log Output:\n");
+ try t.writer.writeAll(log_text);
}
}
}
-fn renderErrorMessageToWriter(
+fn renderErrorMessage(
eb: ErrorBundle,
options: RenderOptions,
err_msg_index: MessageIndex,
- w: *Writer,
- ttyconf: Io.tty.Config,
+ t: Io.Terminal,
kind: []const u8,
- color: Io.tty.Color,
+ color: Io.Terminal.Color,
indent: usize,
-) (Writer.Error || std.posix.UnexpectedError)!void {
+) Io.Terminal.SetColorError!void {
+ const w = t.writer;
const err_msg = eb.getErrorMessage(err_msg_index);
if (err_msg.src_loc != .none) {
const src = eb.extraData(SourceLocation, @intFromEnum(err_msg.src_loc));
var prefix: Writer.Discarding = .init(&.{});
try w.splatByteAll(' ', indent);
prefix.count += indent;
- try ttyconf.setColor(w, .bold);
+ try t.setColor(.bold);
try w.print("{s}:{d}:{d}: ", .{
eb.nullTerminatedString(src.data.src_path),
src.data.line + 1,
@@ -211,7 +223,7 @@ fn renderErrorMessageToWriter(
src.data.line + 1,
src.data.column + 1,
});
- try ttyconf.setColor(w, color);
+ try t.setColor(color);
try w.writeAll(kind);
prefix.count += kind.len;
try w.writeAll(": ");
@@ -219,17 +231,17 @@ fn renderErrorMessageToWriter(
// This is the length of the part before the error message:
// e.g. "file.zig:4:5: error: "
const prefix_len: usize = @intCast(prefix.count);
- try ttyconf.setColor(w, .reset);
- try ttyconf.setColor(w, .bold);
+ try t.setColor(.reset);
+ try t.setColor(.bold);
if (err_msg.count == 1) {
try writeMsg(eb, err_msg, w, prefix_len);
try w.writeByte('\n');
} else {
try writeMsg(eb, err_msg, w, prefix_len);
- try ttyconf.setColor(w, .dim);
+ try t.setColor(.dim);
try w.print(" ({d} times)\n", .{err_msg.count});
}
- try ttyconf.setColor(w, .reset);
+ try t.setColor(.reset);
if (src.data.source_line != 0 and options.include_source_line) {
const line = eb.nullTerminatedString(src.data.source_line);
for (line) |b| switch (b) {
@@ -242,19 +254,19 @@ fn renderErrorMessageToWriter(
// -1 since span.main includes the caret
const after_caret = src.data.span_end -| src.data.span_main -| 1;
try w.splatByteAll(' ', src.data.column - before_caret);
- try ttyconf.setColor(w, .green);
+ try t.setColor(.green);
try w.splatByteAll('~', before_caret);
try w.writeByte('^');
try w.splatByteAll('~', after_caret);
try w.writeByte('\n');
- try ttyconf.setColor(w, .reset);
+ try t.setColor(.reset);
}
for (eb.getNotes(err_msg_index)) |note| {
- try renderErrorMessageToWriter(eb, options, note, w, ttyconf, "note", .cyan, indent);
+ try renderErrorMessage(eb, options, note, t, "note", .cyan, indent);
}
if (src.data.reference_trace_len > 0 and options.include_reference_trace) {
- try ttyconf.setColor(w, .reset);
- try ttyconf.setColor(w, .dim);
+ try t.setColor(.reset);
+ try t.setColor(.dim);
try w.print("referenced by:\n", .{});
var ref_index = src.end;
for (0..src.data.reference_trace_len) |_| {
@@ -281,25 +293,25 @@ fn renderErrorMessageToWriter(
);
}
}
- try ttyconf.setColor(w, .reset);
+ try t.setColor(.reset);
}
} else {
- try ttyconf.setColor(w, color);
+ try t.setColor(color);
try w.splatByteAll(' ', indent);
try w.writeAll(kind);
try w.writeAll(": ");
- try ttyconf.setColor(w, .reset);
+ try t.setColor(.reset);
const msg = eb.nullTerminatedString(err_msg.msg);
if (err_msg.count == 1) {
try w.print("{s}\n", .{msg});
} else {
try w.print("{s}", .{msg});
- try ttyconf.setColor(w, .dim);
+ try t.setColor(.dim);
try w.print(" ({d} times)\n", .{err_msg.count});
}
- try ttyconf.setColor(w, .reset);
+ try t.setColor(.reset);
for (eb.getNotes(err_msg_index)) |note| {
- try renderErrorMessageToWriter(eb, options, note, w, ttyconf, "note", .cyan, indent + 4);
+ try renderErrorMessage(eb, options, note, t, "note", .cyan, indent + 4);
}
}
}
@@ -806,12 +818,10 @@ pub const Wip = struct {
};
defer bundle.deinit(std.testing.allocator);
- const ttyconf: Io.tty.Config = .no_color;
-
var bundle_buf: Writer.Allocating = .init(std.testing.allocator);
const bundle_bw = &bundle_buf.interface;
defer bundle_buf.deinit();
- try bundle.renderToWriter(.{ .ttyconf = ttyconf }, bundle_bw);
+ try bundle.renderToWriter(bundle_bw);
var copy = copy: {
var wip: ErrorBundle.Wip = undefined;
@@ -827,7 +837,7 @@ pub const Wip = struct {
var copy_buf: Writer.Allocating = .init(std.testing.allocator);
const copy_bw = &copy_buf.interface;
defer copy_buf.deinit();
- try copy.renderToWriter(.{ .ttyconf = ttyconf }, copy_bw);
+ try copy.renderToWriter(copy_bw);
try std.testing.expectEqualStrings(bundle_bw.written(), copy_bw.written());
}
diff --git a/lib/std/zig/LibCDirs.zig b/lib/std/zig/LibCDirs.zig
index fa297cd53a..e05ccde589 100644
--- a/lib/std/zig/LibCDirs.zig
+++ b/lib/std/zig/LibCDirs.zig
@@ -1,3 +1,11 @@
+const LibCDirs = @This();
+const builtin = @import("builtin");
+
+const std = @import("../std.zig");
+const Io = std.Io;
+const LibCInstallation = std.zig.LibCInstallation;
+const Allocator = std.mem.Allocator;
+
libc_include_dir_list: []const []const u8,
libc_installation: ?*const LibCInstallation,
libc_framework_dir_list: []const []const u8,
@@ -14,6 +22,7 @@ pub const DarwinSdkLayout = enum {
pub fn detect(
arena: Allocator,
+ io: Io,
zig_lib_dir: []const u8,
target: *const std.Target,
is_native_abi: bool,
@@ -38,7 +47,7 @@ pub fn detect(
// using the system libc installation.
if (is_native_abi and !target.isMinGW()) {
const libc = try arena.create(LibCInstallation);
- libc.* = LibCInstallation.findNative(.{ .allocator = arena, .target = target }) catch |err| switch (err) {
+ libc.* = LibCInstallation.findNative(arena, io, .{ .target = target }) catch |err| switch (err) {
error.CCompilerExitCode,
error.CCompilerCrashed,
error.CCompilerCannotFindHeaders,
@@ -75,7 +84,7 @@ pub fn detect(
if (use_system_abi) {
const libc = try arena.create(LibCInstallation);
- libc.* = try LibCInstallation.findNative(.{ .allocator = arena, .verbose = true, .target = target });
+ libc.* = try LibCInstallation.findNative(arena, io, .{ .verbose = true, .target = target });
return detectFromInstallation(arena, target, libc);
}
@@ -265,9 +274,3 @@ fn libCGenericName(target: *const std.Target) [:0]const u8 {
=> unreachable,
}
}
-
-const LibCDirs = @This();
-const builtin = @import("builtin");
-const std = @import("../std.zig");
-const LibCInstallation = std.zig.LibCInstallation;
-const Allocator = std.mem.Allocator;
diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig
index 2ab4e48570..f2489f9ee7 100644
--- a/lib/std/zig/LibCInstallation.zig
+++ b/lib/std/zig/LibCInstallation.zig
@@ -1,4 +1,18 @@
//! See the render function implementation for documentation of the fields.
+const LibCInstallation = @This();
+
+const builtin = @import("builtin");
+const is_darwin = builtin.target.os.tag.isDarwin();
+const is_windows = builtin.target.os.tag == .windows;
+const is_haiku = builtin.target.os.tag == .haiku;
+
+const std = @import("std");
+const Io = std.Io;
+const Target = std.Target;
+const fs = std.fs;
+const Allocator = std.mem.Allocator;
+const Path = std.Build.Cache.Path;
+const log = std.log.scoped(.libc_installation);
include_dir: ?[]const u8 = null,
sys_include_dir: ?[]const u8 = null,
@@ -23,11 +37,7 @@ pub const FindError = error{
ZigIsTheCCompiler,
};
-pub fn parse(
- allocator: Allocator,
- libc_file: []const u8,
- target: *const std.Target,
-) !LibCInstallation {
+pub fn parse(allocator: Allocator, io: Io, libc_file: []const u8, target: *const std.Target) !LibCInstallation {
var self: LibCInstallation = .{};
const fields = std.meta.fields(LibCInstallation);
@@ -43,7 +53,7 @@ pub fn parse(
}
}
- const contents = try std.fs.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize)));
+ const contents = try Io.Dir.cwd().readFileAlloc(io, libc_file, allocator, .limited(std.math.maxInt(usize)));
defer allocator.free(contents);
var it = std.mem.tokenizeScalar(u8, contents, '\n');
@@ -156,7 +166,6 @@ pub fn render(self: LibCInstallation, out: *std.Io.Writer) !void {
}
pub const FindNativeOptions = struct {
- allocator: Allocator,
target: *const std.Target,
/// If enabled, will print human-friendly errors to stderr.
@@ -164,50 +173,50 @@ pub const FindNativeOptions = struct {
};
/// Finds the default, native libc.
-pub fn findNative(args: FindNativeOptions) FindError!LibCInstallation {
+pub fn findNative(gpa: Allocator, io: Io, args: FindNativeOptions) FindError!LibCInstallation {
var self: LibCInstallation = .{};
if (is_darwin and args.target.os.tag.isDarwin()) {
- if (!std.zig.system.darwin.isSdkInstalled(args.allocator))
+ if (!std.zig.system.darwin.isSdkInstalled(gpa, io))
return error.DarwinSdkNotFound;
- const sdk = std.zig.system.darwin.getSdk(args.allocator, args.target) orelse
+ const sdk = std.zig.system.darwin.getSdk(gpa, io, args.target) orelse
return error.DarwinSdkNotFound;
- defer args.allocator.free(sdk);
+ defer gpa.free(sdk);
- self.include_dir = try fs.path.join(args.allocator, &.{
+ self.include_dir = try fs.path.join(gpa, &.{
sdk, "usr/include",
});
- self.sys_include_dir = try fs.path.join(args.allocator, &.{
+ self.sys_include_dir = try fs.path.join(gpa, &.{
sdk, "usr/include",
});
return self;
} else if (is_windows) {
- const sdk = std.zig.WindowsSdk.find(args.allocator, args.target.cpu.arch) catch |err| switch (err) {
+ const sdk = std.zig.WindowsSdk.find(gpa, io, args.target.cpu.arch) catch |err| switch (err) {
error.NotFound => return error.WindowsSdkNotFound,
error.PathTooLong => return error.WindowsSdkNotFound,
error.OutOfMemory => return error.OutOfMemory,
};
- defer sdk.free(args.allocator);
+ defer sdk.free(gpa);
- try self.findNativeMsvcIncludeDir(args, sdk);
- try self.findNativeMsvcLibDir(args, sdk);
- try self.findNativeKernel32LibDir(args, sdk);
- try self.findNativeIncludeDirWindows(args, sdk);
- try self.findNativeCrtDirWindows(args, sdk);
+ try self.findNativeMsvcIncludeDir(gpa, io, sdk);
+ try self.findNativeMsvcLibDir(gpa, sdk);
+ try self.findNativeKernel32LibDir(gpa, io, args, sdk);
+ try self.findNativeIncludeDirWindows(gpa, io, sdk);
+ try self.findNativeCrtDirWindows(gpa, io, args.target, sdk);
} else if (is_haiku) {
- try self.findNativeIncludeDirPosix(args);
- try self.findNativeGccDirHaiku(args);
- self.crt_dir = try args.allocator.dupeZ(u8, "/system/develop/lib");
+ try self.findNativeIncludeDirPosix(gpa, io, args);
+ try self.findNativeGccDirHaiku(gpa, io, args);
+ self.crt_dir = try gpa.dupeZ(u8, "/system/develop/lib");
} else if (builtin.target.os.tag == .illumos) {
// There is only one libc, and its headers/libraries are always in the same spot.
- self.include_dir = try args.allocator.dupeZ(u8, "/usr/include");
- self.sys_include_dir = try args.allocator.dupeZ(u8, "/usr/include");
- self.crt_dir = try args.allocator.dupeZ(u8, "/usr/lib/64");
+ self.include_dir = try gpa.dupeZ(u8, "/usr/include");
+ self.sys_include_dir = try gpa.dupeZ(u8, "/usr/include");
+ self.crt_dir = try gpa.dupeZ(u8, "/usr/lib/64");
} else if (std.process.can_spawn) {
- try self.findNativeIncludeDirPosix(args);
+ try self.findNativeIncludeDirPosix(gpa, io, args);
switch (builtin.target.os.tag) {
- .freebsd, .netbsd, .openbsd, .dragonfly => self.crt_dir = try args.allocator.dupeZ(u8, "/usr/lib"),
- .linux => try self.findNativeCrtDirPosix(args),
+ .freebsd, .netbsd, .openbsd, .dragonfly => self.crt_dir = try gpa.dupeZ(u8, "/usr/lib"),
+ .linux => try self.findNativeCrtDirPosix(gpa, io, args),
else => {},
}
} else {
@@ -227,11 +236,9 @@ pub fn deinit(self: *LibCInstallation, allocator: Allocator) void {
self.* = undefined;
}
-fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) FindError!void {
- const allocator = args.allocator;
-
+fn findNativeIncludeDirPosix(self: *LibCInstallation, gpa: Allocator, io: Io, args: FindNativeOptions) FindError!void {
// Detect infinite loops.
- var env_map = std.process.getEnvMap(allocator) catch |err| switch (err) {
+ var env_map = std.process.getEnvMap(gpa) catch |err| switch (err) {
error.Unexpected => unreachable, // WASI-only
else => |e| return e,
};
@@ -250,7 +257,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
const dev_null = if (is_windows) "nul" else "/dev/null";
- var argv = std.array_list.Managed([]const u8).init(allocator);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
try appendCcExe(&argv, skip_cc_env_var);
@@ -261,8 +268,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
dev_null,
});
- const run_res = std.process.Child.run(.{
- .allocator = allocator,
+ const run_res = std.process.Child.run(gpa, io, .{
.argv = argv.items,
.max_output_bytes = 1024 * 1024,
.env_map = &env_map,
@@ -279,8 +285,8 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
},
};
defer {
- allocator.free(run_res.stdout);
- allocator.free(run_res.stderr);
+ gpa.free(run_res.stdout);
+ gpa.free(run_res.stderr);
}
switch (run_res.term) {
.Exited => |code| if (code != 0) {
@@ -294,7 +300,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
}
var it = std.mem.tokenizeAny(u8, run_res.stderr, "\n\r");
- var search_paths = std.array_list.Managed([]const u8).init(allocator);
+ var search_paths = std.array_list.Managed([]const u8).init(gpa);
defer search_paths.deinit();
while (it.next()) |line| {
if (line.len != 0 and line[0] == ' ') {
@@ -318,7 +324,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
// search in reverse order
const search_path_untrimmed = search_paths.items[search_paths.items.len - path_i - 1];
const search_path = std.mem.trimStart(u8, search_path_untrimmed, " ");
- var search_dir = fs.cwd().openDir(search_path, .{}) catch |err| switch (err) {
+ var search_dir = Io.Dir.cwd().openDir(io, search_path, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.NoDevice,
@@ -326,11 +332,11 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
else => return error.FileSystem,
};
- defer search_dir.close();
+ defer search_dir.close(io);
if (self.include_dir == null) {
- if (search_dir.access(include_dir_example_file, .{})) |_| {
- self.include_dir = try allocator.dupeZ(u8, search_path);
+ if (search_dir.access(io, include_dir_example_file, .{})) |_| {
+ self.include_dir = try gpa.dupeZ(u8, search_path);
} else |err| switch (err) {
error.FileNotFound => {},
else => return error.FileSystem,
@@ -338,8 +344,8 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
}
if (self.sys_include_dir == null) {
- if (search_dir.access(sys_include_dir_example_file, .{})) |_| {
- self.sys_include_dir = try allocator.dupeZ(u8, search_path);
+ if (search_dir.access(io, sys_include_dir_example_file, .{})) |_| {
+ self.sys_include_dir = try gpa.dupeZ(u8, search_path);
} else |err| switch (err) {
error.FileNotFound => {},
else => return error.FileSystem,
@@ -357,22 +363,21 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
fn findNativeIncludeDirWindows(
self: *LibCInstallation,
- args: FindNativeOptions,
+ gpa: Allocator,
+ io: Io,
sdk: std.zig.WindowsSdk,
) FindError!void {
- const allocator = args.allocator;
-
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
- var result_buf = std.array_list.Managed(u8).init(allocator);
+ var result_buf = std.array_list.Managed(u8).init(gpa);
defer result_buf.deinit();
for (installs) |install| {
result_buf.shrinkAndFree(0);
try result_buf.print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version });
- var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
+ var dir = Io.Dir.cwd().openDir(io, result_buf.items, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.NoDevice,
@@ -380,9 +385,9 @@ fn findNativeIncludeDirWindows(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
- dir.access("stdlib.h", .{}) catch |err| switch (err) {
+ dir.access(io, "stdlib.h", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => return error.FileSystem,
};
@@ -396,18 +401,18 @@ fn findNativeIncludeDirWindows(
fn findNativeCrtDirWindows(
self: *LibCInstallation,
- args: FindNativeOptions,
+ gpa: Allocator,
+ io: Io,
+ target: *const std.Target,
sdk: std.zig.WindowsSdk,
) FindError!void {
- const allocator = args.allocator;
-
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
- var result_buf = std.array_list.Managed(u8).init(allocator);
+ var result_buf = std.array_list.Managed(u8).init(gpa);
defer result_buf.deinit();
- const arch_sub_dir = switch (args.target.cpu.arch) {
+ const arch_sub_dir = switch (target.cpu.arch) {
.x86 => "x86",
.x86_64 => "x64",
.arm, .armeb => "arm",
@@ -419,7 +424,7 @@ fn findNativeCrtDirWindows(
result_buf.shrinkAndFree(0);
try result_buf.print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir });
- var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
+ var dir = Io.Dir.cwd().openDir(io, result_buf.items, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.NoDevice,
@@ -427,9 +432,9 @@ fn findNativeCrtDirWindows(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
- dir.access("ucrt.lib", .{}) catch |err| switch (err) {
+ dir.access(io, "ucrt.lib", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => return error.FileSystem,
};
@@ -440,9 +445,8 @@ fn findNativeCrtDirWindows(
return error.LibCRuntimeNotFound;
}
-fn findNativeCrtDirPosix(self: *LibCInstallation, args: FindNativeOptions) FindError!void {
- self.crt_dir = try ccPrintFileName(.{
- .allocator = args.allocator,
+fn findNativeCrtDirPosix(self: *LibCInstallation, gpa: Allocator, io: Io, args: FindNativeOptions) FindError!void {
+ self.crt_dir = try ccPrintFileName(gpa, io, .{
.search_basename = switch (args.target.os.tag) {
.linux => if (args.target.abi.isAndroid()) "crtbegin_dynamic.o" else "crt1.o",
else => "crt1.o",
@@ -452,9 +456,8 @@ fn findNativeCrtDirPosix(self: *LibCInstallation, args: FindNativeOptions) FindE
});
}
-fn findNativeGccDirHaiku(self: *LibCInstallation, args: FindNativeOptions) FindError!void {
- self.gcc_dir = try ccPrintFileName(.{
- .allocator = args.allocator,
+fn findNativeGccDirHaiku(self: *LibCInstallation, gpa: Allocator, io: Io, args: FindNativeOptions) FindError!void {
+ self.gcc_dir = try ccPrintFileName(gpa, io, .{
.search_basename = "crtbeginS.o",
.want_dirname = .only_dir,
.verbose = args.verbose,
@@ -463,15 +466,15 @@ fn findNativeGccDirHaiku(self: *LibCInstallation, args: FindNativeOptions) FindE
fn findNativeKernel32LibDir(
self: *LibCInstallation,
+ gpa: Allocator,
+ io: Io,
args: FindNativeOptions,
sdk: std.zig.WindowsSdk,
) FindError!void {
- const allocator = args.allocator;
-
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
- var result_buf = std.array_list.Managed(u8).init(allocator);
+ var result_buf = std.array_list.Managed(u8).init(gpa);
defer result_buf.deinit();
const arch_sub_dir = switch (args.target.cpu.arch) {
@@ -486,7 +489,7 @@ fn findNativeKernel32LibDir(
result_buf.shrinkAndFree(0);
try result_buf.print("{s}\\Lib\\{s}\\um\\{s}", .{ install.path, install.version, arch_sub_dir });
- var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
+ var dir = Io.Dir.cwd().openDir(io, result_buf.items, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.NoDevice,
@@ -494,9 +497,9 @@ fn findNativeKernel32LibDir(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
- dir.access("kernel32.lib", .{}) catch |err| switch (err) {
+ dir.access(io, "kernel32.lib", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => return error.FileSystem,
};
@@ -509,19 +512,18 @@ fn findNativeKernel32LibDir(
fn findNativeMsvcIncludeDir(
self: *LibCInstallation,
- args: FindNativeOptions,
+ gpa: Allocator,
+ io: Io,
sdk: std.zig.WindowsSdk,
) FindError!void {
- const allocator = args.allocator;
-
const msvc_lib_dir = sdk.msvc_lib_dir orelse return error.LibCStdLibHeaderNotFound;
const up1 = fs.path.dirname(msvc_lib_dir) orelse return error.LibCStdLibHeaderNotFound;
const up2 = fs.path.dirname(up1) orelse return error.LibCStdLibHeaderNotFound;
- const dir_path = try fs.path.join(allocator, &[_][]const u8{ up2, "include" });
- errdefer allocator.free(dir_path);
+ const dir_path = try fs.path.join(gpa, &[_][]const u8{ up2, "include" });
+ errdefer gpa.free(dir_path);
- var dir = fs.cwd().openDir(dir_path, .{}) catch |err| switch (err) {
+ var dir = Io.Dir.cwd().openDir(io, dir_path, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.NoDevice,
@@ -529,9 +531,9 @@ fn findNativeMsvcIncludeDir(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
- dir.access("vcruntime.h", .{}) catch |err| switch (err) {
+ dir.access(io, "vcruntime.h", .{}) catch |err| switch (err) {
error.FileNotFound => return error.LibCStdLibHeaderNotFound,
else => return error.FileSystem,
};
@@ -541,27 +543,23 @@ fn findNativeMsvcIncludeDir(
fn findNativeMsvcLibDir(
self: *LibCInstallation,
- args: FindNativeOptions,
+ gpa: Allocator,
sdk: std.zig.WindowsSdk,
) FindError!void {
- const allocator = args.allocator;
const msvc_lib_dir = sdk.msvc_lib_dir orelse return error.LibCRuntimeNotFound;
- self.msvc_lib_dir = try allocator.dupe(u8, msvc_lib_dir);
+ self.msvc_lib_dir = try gpa.dupe(u8, msvc_lib_dir);
}
pub const CCPrintFileNameOptions = struct {
- allocator: Allocator,
search_basename: []const u8,
want_dirname: enum { full_path, only_dir },
verbose: bool = false,
};
/// caller owns returned memory
-fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 {
- const allocator = args.allocator;
-
+fn ccPrintFileName(gpa: Allocator, io: Io, args: CCPrintFileNameOptions) ![:0]u8 {
// Detect infinite loops.
- var env_map = std.process.getEnvMap(allocator) catch |err| switch (err) {
+ var env_map = std.process.getEnvMap(gpa) catch |err| switch (err) {
error.Unexpected => unreachable, // WASI-only
else => |e| return e,
};
@@ -578,17 +576,16 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 {
break :blk false;
};
- var argv = std.array_list.Managed([]const u8).init(allocator);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
- const arg1 = try std.fmt.allocPrint(allocator, "-print-file-name={s}", .{args.search_basename});
- defer allocator.free(arg1);
+ const arg1 = try std.fmt.allocPrint(gpa, "-print-file-name={s}", .{args.search_basename});
+ defer gpa.free(arg1);
try appendCcExe(&argv, skip_cc_env_var);
try argv.append(arg1);
- const run_res = std.process.Child.run(.{
- .allocator = allocator,
+ const run_res = std.process.Child.run(gpa, io, .{
.argv = argv.items,
.max_output_bytes = 1024 * 1024,
.env_map = &env_map,
@@ -602,8 +599,8 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 {
else => return error.UnableToSpawnCCompiler,
};
defer {
- allocator.free(run_res.stdout);
- allocator.free(run_res.stderr);
+ gpa.free(run_res.stdout);
+ gpa.free(run_res.stderr);
}
switch (run_res.term) {
.Exited => |code| if (code != 0) {
@@ -622,10 +619,10 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 {
// So we detect failure by checking if the output matches exactly the input.
if (std.mem.eql(u8, line, args.search_basename)) return error.LibCRuntimeNotFound;
switch (args.want_dirname) {
- .full_path => return allocator.dupeZ(u8, line),
+ .full_path => return gpa.dupeZ(u8, line),
.only_dir => {
const dirname = fs.path.dirname(line) orelse return error.LibCRuntimeNotFound;
- return allocator.dupeZ(u8, dirname);
+ return gpa.dupeZ(u8, dirname);
},
}
}
@@ -1015,17 +1012,3 @@ pub fn resolveCrtPaths(
},
}
}
-
-const LibCInstallation = @This();
-const std = @import("std");
-const builtin = @import("builtin");
-const Target = std.Target;
-const fs = std.fs;
-const Allocator = std.mem.Allocator;
-const Path = std.Build.Cache.Path;
-
-const is_darwin = builtin.target.os.tag.isDarwin();
-const is_windows = builtin.target.os.tag == .windows;
-const is_haiku = builtin.target.os.tag == .haiku;
-
-const log = std.log.scoped(.libc_installation);
diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig
index 89d608633c..b0f24c2aca 100644
--- a/lib/std/zig/WindowsSdk.zig
+++ b/lib/std/zig/WindowsSdk.zig
@@ -1,7 +1,11 @@
const WindowsSdk = @This();
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const Writer = std.Io.Writer;
+const Allocator = std.mem.Allocator;
windows10sdk: ?Installation,
windows81sdk: ?Installation,
@@ -19,8 +23,8 @@ const product_version_max_length = version_major_minor_max_length + ".65535".len
/// Find path and version of Windows 10 SDK and Windows 8.1 SDK, and find path to MSVC's `lib/` directory.
/// Caller owns the result's fields.
-/// After finishing work, call `free(allocator)`.
-pub fn find(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFound, PathTooLong }!WindowsSdk {
+/// Returns memory allocated by `gpa`
+pub fn find(gpa: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFound, PathTooLong }!WindowsSdk {
if (builtin.os.tag != .windows) return error.NotFound;
//note(dimenus): If this key doesn't exist, neither the Win 8 SDK nor the Win 10 SDK is installed
@@ -29,27 +33,27 @@ pub fn find(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutO
};
defer roots_key.closeKey();
- const windows10sdk = Installation.find(allocator, roots_key, "KitsRoot10", "", "v10.0") catch |err| switch (err) {
+ const windows10sdk = Installation.find(gpa, io, roots_key, "KitsRoot10", "", "v10.0") catch |err| switch (err) {
error.InstallationNotFound => null,
error.PathTooLong => null,
error.VersionTooLong => null,
error.OutOfMemory => return error.OutOfMemory,
};
- errdefer if (windows10sdk) |*w| w.free(allocator);
+ errdefer if (windows10sdk) |*w| w.free(gpa);
- const windows81sdk = Installation.find(allocator, roots_key, "KitsRoot81", "winver", "v8.1") catch |err| switch (err) {
+ const windows81sdk = Installation.find(gpa, io, roots_key, "KitsRoot81", "winver", "v8.1") catch |err| switch (err) {
error.InstallationNotFound => null,
error.PathTooLong => null,
error.VersionTooLong => null,
error.OutOfMemory => return error.OutOfMemory,
};
- errdefer if (windows81sdk) |*w| w.free(allocator);
+ errdefer if (windows81sdk) |*w| w.free(gpa);
- const msvc_lib_dir: ?[]const u8 = MsvcLibDir.find(allocator, arch) catch |err| switch (err) {
+ const msvc_lib_dir: ?[]const u8 = MsvcLibDir.find(gpa, io, arch) catch |err| switch (err) {
error.MsvcLibDirNotFound => null,
error.OutOfMemory => return error.OutOfMemory,
};
- errdefer allocator.free(msvc_lib_dir);
+ errdefer gpa.free(msvc_lib_dir);
return .{
.windows10sdk = windows10sdk,
@@ -58,15 +62,15 @@ pub fn find(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutO
};
}
-pub fn free(sdk: WindowsSdk, allocator: std.mem.Allocator) void {
+pub fn free(sdk: WindowsSdk, gpa: Allocator) void {
if (sdk.windows10sdk) |*w10sdk| {
- w10sdk.free(allocator);
+ w10sdk.free(gpa);
}
if (sdk.windows81sdk) |*w81sdk| {
- w81sdk.free(allocator);
+ w81sdk.free(gpa);
}
if (sdk.msvc_lib_dir) |msvc_lib_dir| {
- allocator.free(msvc_lib_dir);
+ gpa.free(msvc_lib_dir);
}
}
@@ -74,8 +78,9 @@ pub fn free(sdk: WindowsSdk, allocator: std.mem.Allocator) void {
/// and a version. Returns slice of version strings sorted in descending order.
/// Caller owns result.
fn iterateAndFilterByVersion(
- iterator: *std.fs.Dir.Iterator,
- allocator: std.mem.Allocator,
+ iterator: *Dir.Iterator,
+ gpa: Allocator,
+ io: Io,
prefix: []const u8,
) error{OutOfMemory}![][]const u8 {
const Version = struct {
@@ -92,15 +97,15 @@ fn iterateAndFilterByVersion(
std.mem.order(u8, lhs.build, rhs.build);
}
};
- var versions = std.array_list.Managed(Version).init(allocator);
- var dirs = std.array_list.Managed([]const u8).init(allocator);
+ var versions = std.array_list.Managed(Version).init(gpa);
+ var dirs = std.array_list.Managed([]const u8).init(gpa);
defer {
versions.deinit();
- for (dirs.items) |filtered_dir| allocator.free(filtered_dir);
+ for (dirs.items) |filtered_dir| gpa.free(filtered_dir);
dirs.deinit();
}
- iterate: while (iterator.next() catch null) |entry| {
+ iterate: while (iterator.next(io) catch null) |entry| {
if (entry.kind != .directory) continue;
if (!std.mem.startsWith(u8, entry.name, prefix)) continue;
@@ -116,8 +121,8 @@ fn iterateAndFilterByVersion(
num.* = Version.parseNum(num_it.next() orelse break) orelse continue :iterate
else if (num_it.next()) |_| continue;
- const name = try allocator.dupe(u8, suffix);
- errdefer allocator.free(name);
+ const name = try gpa.dupe(u8, suffix);
+ errdefer gpa.free(name);
if (underscore) |pos| version.build = name[pos + 1 ..];
try versions.append(version);
@@ -174,7 +179,7 @@ const RegistryWtf8 = struct {
/// Get string from registry.
/// Caller owns result.
- pub fn getString(reg: RegistryWtf8, allocator: std.mem.Allocator, subkey: []const u8, value_name: []const u8) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]u8 {
+ pub fn getString(reg: RegistryWtf8, gpa: Allocator, subkey: []const u8, value_name: []const u8) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]u8 {
const subkey_wtf16le: [:0]const u16 = subkey_wtf16le: {
var subkey_wtf16le_buf: [RegistryWtf16Le.key_name_max_len]u16 = undefined;
const subkey_wtf16le_len: usize = std.unicode.wtf8ToWtf16Le(subkey_wtf16le_buf[0..], subkey) catch unreachable;
@@ -190,11 +195,11 @@ const RegistryWtf8 = struct {
};
const registry_wtf16le: RegistryWtf16Le = .{ .key = reg.key };
- const value_wtf16le = try registry_wtf16le.getString(allocator, subkey_wtf16le, value_name_wtf16le);
- defer allocator.free(value_wtf16le);
+ const value_wtf16le = try registry_wtf16le.getString(gpa, subkey_wtf16le, value_name_wtf16le);
+ defer gpa.free(value_wtf16le);
- const value_wtf8: []u8 = try std.unicode.wtf16LeToWtf8Alloc(allocator, value_wtf16le);
- errdefer allocator.free(value_wtf8);
+ const value_wtf8: []u8 = try std.unicode.wtf16LeToWtf8Alloc(gpa, value_wtf16le);
+ errdefer gpa.free(value_wtf8);
return value_wtf8;
}
@@ -282,7 +287,7 @@ const RegistryWtf16Le = struct {
}
/// Get string ([:0]const u16) from registry.
- fn getString(reg: RegistryWtf16Le, allocator: std.mem.Allocator, subkey_wtf16le: [:0]const u16, value_name_wtf16le: [:0]const u16) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]const u16 {
+ fn getString(reg: RegistryWtf16Le, gpa: Allocator, subkey_wtf16le: [:0]const u16, value_name_wtf16le: [:0]const u16) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]const u16 {
var actual_type: windows.ULONG = undefined;
// Calculating length to allocate
@@ -311,8 +316,8 @@ const RegistryWtf16Le = struct {
else => return error.NotAString,
}
- const value_wtf16le_buf: []u16 = try allocator.alloc(u16, std.math.divCeil(u32, value_wtf16le_buf_size, 2) catch unreachable);
- errdefer allocator.free(value_wtf16le_buf);
+ const value_wtf16le_buf: []u16 = try gpa.alloc(u16, std.math.divCeil(u32, value_wtf16le_buf_size, 2) catch unreachable);
+ errdefer gpa.free(value_wtf16le_buf);
return_code_int = windows.advapi32.RegGetValueW(
reg.key,
@@ -346,7 +351,7 @@ const RegistryWtf16Le = struct {
break :value_wtf16le std.mem.span(value_wtf16le_overestimated);
};
- _ = allocator.resize(value_wtf16le_buf, value_wtf16le.len);
+ _ = gpa.resize(value_wtf16le_buf, value_wtf16le.len);
return value_wtf16le;
}
@@ -414,88 +419,89 @@ pub const Installation = struct {
/// Find path and version of Windows SDK.
/// Caller owns the result's fields.
- /// After finishing work, call `free(allocator)`.
fn find(
- allocator: std.mem.Allocator,
+ gpa: Allocator,
+ io: Io,
roots_key: RegistryWtf8,
roots_subkey: []const u8,
prefix: []const u8,
version_key_name: []const u8,
) error{ OutOfMemory, InstallationNotFound, PathTooLong, VersionTooLong }!Installation {
roots: {
- const installation = findFromRoot(allocator, roots_key, roots_subkey, prefix) catch
+ const installation = findFromRoot(gpa, io, roots_key, roots_subkey, prefix) catch
break :roots;
if (installation.isValidVersion()) return installation;
- installation.free(allocator);
+ installation.free(gpa);
}
{
- const installation = try findFromInstallationFolder(allocator, version_key_name);
+ const installation = try findFromInstallationFolder(gpa, version_key_name);
if (installation.isValidVersion()) return installation;
- installation.free(allocator);
+ installation.free(gpa);
}
return error.InstallationNotFound;
}
fn findFromRoot(
- allocator: std.mem.Allocator,
+ gpa: Allocator,
+ io: Io,
roots_key: RegistryWtf8,
roots_subkey: []const u8,
prefix: []const u8,
) error{ OutOfMemory, InstallationNotFound, PathTooLong, VersionTooLong }!Installation {
const path = path: {
- const path_maybe_with_trailing_slash = roots_key.getString(allocator, "", roots_subkey) catch |err| switch (err) {
+ const path_maybe_with_trailing_slash = roots_key.getString(gpa, "", roots_subkey) catch |err| switch (err) {
error.NotAString => return error.InstallationNotFound,
error.ValueNameNotFound => return error.InstallationNotFound,
error.StringNotFound => return error.InstallationNotFound,
error.OutOfMemory => return error.OutOfMemory,
};
- if (path_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(path_maybe_with_trailing_slash)) {
- allocator.free(path_maybe_with_trailing_slash);
+ if (path_maybe_with_trailing_slash.len > Dir.max_path_bytes or !Dir.path.isAbsolute(path_maybe_with_trailing_slash)) {
+ gpa.free(path_maybe_with_trailing_slash);
return error.PathTooLong;
}
- var path = std.array_list.Managed(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
+ var path = std.array_list.Managed(u8).fromOwnedSlice(gpa, path_maybe_with_trailing_slash);
errdefer path.deinit();
// String might contain trailing slash, so trim it here
if (path.items.len > "C:\\".len and path.getLast() == '\\') _ = path.pop();
break :path try path.toOwnedSlice();
};
- errdefer allocator.free(path);
+ errdefer gpa.free(path);
const version = version: {
- var buf: [std.fs.max_path_bytes]u8 = undefined;
+ var buf: [Dir.max_path_bytes]u8 = undefined;
const sdk_lib_dir_path = std.fmt.bufPrint(buf[0..], "{s}\\Lib\\", .{path}) catch |err| switch (err) {
error.NoSpaceLeft => return error.PathTooLong,
};
- if (!std.fs.path.isAbsolute(sdk_lib_dir_path)) return error.InstallationNotFound;
+ if (!Dir.path.isAbsolute(sdk_lib_dir_path)) return error.InstallationNotFound;
// enumerate files in sdk path looking for latest version
- var sdk_lib_dir = std.fs.openDirAbsolute(sdk_lib_dir_path, .{
+ var sdk_lib_dir = Dir.openDirAbsolute(io, sdk_lib_dir_path, .{
.iterate = true,
}) catch |err| switch (err) {
error.NameTooLong => return error.PathTooLong,
else => return error.InstallationNotFound,
};
- defer sdk_lib_dir.close();
+ defer sdk_lib_dir.close(io);
var iterator = sdk_lib_dir.iterate();
- const versions = try iterateAndFilterByVersion(&iterator, allocator, prefix);
+ const versions = try iterateAndFilterByVersion(&iterator, gpa, io, prefix);
if (versions.len == 0) return error.InstallationNotFound;
defer {
- for (versions[1..]) |version| allocator.free(version);
- allocator.free(versions);
+ for (versions[1..]) |version| gpa.free(version);
+ gpa.free(versions);
}
break :version versions[0];
};
- errdefer allocator.free(version);
+ errdefer gpa.free(version);
return .{ .path = path, .version = version };
}
fn findFromInstallationFolder(
- allocator: std.mem.Allocator,
+ gpa: Allocator,
version_key_name: []const u8,
) error{ OutOfMemory, InstallationNotFound, PathTooLong, VersionTooLong }!Installation {
var key_name_buf: [RegistryWtf16Le.key_name_max_len]u8 = undefined;
@@ -514,7 +520,7 @@ pub const Installation = struct {
defer key.closeKey();
const path: []const u8 = path: {
- const path_maybe_with_trailing_slash = key.getString(allocator, "", "InstallationFolder") catch |err| switch (err) {
+ const path_maybe_with_trailing_slash = key.getString(gpa, "", "InstallationFolder") catch |err| switch (err) {
error.NotAString => return error.InstallationNotFound,
error.ValueNameNotFound => return error.InstallationNotFound,
error.StringNotFound => return error.InstallationNotFound,
@@ -522,12 +528,12 @@ pub const Installation = struct {
error.OutOfMemory => return error.OutOfMemory,
};
- if (path_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(path_maybe_with_trailing_slash)) {
- allocator.free(path_maybe_with_trailing_slash);
+ if (path_maybe_with_trailing_slash.len > Dir.max_path_bytes or !Dir.path.isAbsolute(path_maybe_with_trailing_slash)) {
+ gpa.free(path_maybe_with_trailing_slash);
return error.PathTooLong;
}
- var path = std.array_list.Managed(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
+ var path = std.array_list.Managed(u8).fromOwnedSlice(gpa, path_maybe_with_trailing_slash);
errdefer path.deinit();
// String might contain trailing slash, so trim it here
@@ -536,12 +542,12 @@ pub const Installation = struct {
const path_without_trailing_slash = try path.toOwnedSlice();
break :path path_without_trailing_slash;
};
- errdefer allocator.free(path);
+ errdefer gpa.free(path);
const version: []const u8 = version: {
// note(dimenus): Microsoft doesn't include the .0 in the ProductVersion key....
- const version_without_0 = key.getString(allocator, "", "ProductVersion") catch |err| switch (err) {
+ const version_without_0 = key.getString(gpa, "", "ProductVersion") catch |err| switch (err) {
error.NotAString => return error.InstallationNotFound,
error.ValueNameNotFound => return error.InstallationNotFound,
error.StringNotFound => return error.InstallationNotFound,
@@ -549,11 +555,11 @@ pub const Installation = struct {
error.OutOfMemory => return error.OutOfMemory,
};
if (version_without_0.len + ".0".len > product_version_max_length) {
- allocator.free(version_without_0);
+ gpa.free(version_without_0);
return error.VersionTooLong;
}
- var version = std.array_list.Managed(u8).fromOwnedSlice(allocator, version_without_0);
+ var version = std.array_list.Managed(u8).fromOwnedSlice(gpa, version_without_0);
errdefer version.deinit();
try version.appendSlice(".0");
@@ -561,14 +567,14 @@ pub const Installation = struct {
const version_with_0 = try version.toOwnedSlice();
break :version version_with_0;
};
- errdefer allocator.free(version);
+ errdefer gpa.free(version);
return .{ .path = path, .version = version };
}
/// Check whether this version is enumerated in registry.
fn isValidVersion(installation: Installation) bool {
- var buf: [std.fs.max_path_bytes]u8 = undefined;
+ var buf: [Dir.max_path_bytes]u8 = undefined;
const reg_query_as_wtf8 = std.fmt.bufPrint(buf[0..], "{s}\\{s}\\Installed Options", .{
windows_kits_reg_key,
installation.version,
@@ -597,21 +603,21 @@ pub const Installation = struct {
return (reg_value == 1);
}
- fn free(install: Installation, allocator: std.mem.Allocator) void {
- allocator.free(install.path);
- allocator.free(install.version);
+ fn free(install: Installation, gpa: Allocator) void {
+ gpa.free(install.path);
+ gpa.free(install.version);
}
};
const MsvcLibDir = struct {
- fn findInstancesDirViaSetup(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
+ fn findInstancesDirViaSetup(gpa: Allocator, io: Io) error{ OutOfMemory, PathNotFound }!Dir {
const vs_setup_key_path = "SOFTWARE\\Microsoft\\VisualStudio\\Setup";
const vs_setup_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, vs_setup_key_path, .{}) catch |err| switch (err) {
error.KeyNotFound => return error.PathNotFound,
};
defer vs_setup_key.closeKey();
- const packages_path = vs_setup_key.getString(allocator, "", "CachePath") catch |err| switch (err) {
+ const packages_path = vs_setup_key.getString(gpa, "", "CachePath") catch |err| switch (err) {
error.NotAString,
error.ValueNameNotFound,
error.StringNotFound,
@@ -619,24 +625,24 @@ const MsvcLibDir = struct {
error.OutOfMemory => return error.OutOfMemory,
};
- defer allocator.free(packages_path);
+ defer gpa.free(packages_path);
- if (!std.fs.path.isAbsolute(packages_path)) return error.PathNotFound;
+ if (!Dir.path.isAbsolute(packages_path)) return error.PathNotFound;
- const instances_path = try std.fs.path.join(allocator, &.{ packages_path, "_Instances" });
- defer allocator.free(instances_path);
+ const instances_path = try Dir.path.join(gpa, &.{ packages_path, "_Instances" });
+ defer gpa.free(instances_path);
- return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch return error.PathNotFound;
+ return Dir.openDirAbsolute(io, instances_path, .{ .iterate = true }) catch return error.PathNotFound;
}
- fn findInstancesDirViaCLSID(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
+ fn findInstancesDirViaCLSID(gpa: Allocator, io: Io) error{ OutOfMemory, PathNotFound }!Dir {
const setup_configuration_clsid = "{177f0c4a-1cd3-4de7-a32c-71dbbb9fa36d}";
const setup_config_key = RegistryWtf8.openKey(windows.HKEY_CLASSES_ROOT, "CLSID\\" ++ setup_configuration_clsid, .{}) catch |err| switch (err) {
error.KeyNotFound => return error.PathNotFound,
};
defer setup_config_key.closeKey();
- const dll_path = setup_config_key.getString(allocator, "InprocServer32", "") catch |err| switch (err) {
+ const dll_path = setup_config_key.getString(gpa, "InprocServer32", "") catch |err| switch (err) {
error.NotAString,
error.ValueNameNotFound,
error.StringNotFound,
@@ -644,11 +650,11 @@ const MsvcLibDir = struct {
error.OutOfMemory => return error.OutOfMemory,
};
- defer allocator.free(dll_path);
+ defer gpa.free(dll_path);
- if (!std.fs.path.isAbsolute(dll_path)) return error.PathNotFound;
+ if (!Dir.path.isAbsolute(dll_path)) return error.PathNotFound;
- var path_it = std.fs.path.componentIterator(dll_path);
+ var path_it = Dir.path.componentIterator(dll_path);
// the .dll filename
_ = path_it.last();
const root_path = while (path_it.previous()) |dir_component| {
@@ -659,17 +665,17 @@ const MsvcLibDir = struct {
return error.PathNotFound;
};
- const instances_path = try std.fs.path.join(allocator, &.{ root_path, "Packages", "_Instances" });
- defer allocator.free(instances_path);
+ const instances_path = try Dir.path.join(gpa, &.{ root_path, "Packages", "_Instances" });
+ defer gpa.free(instances_path);
- return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch return error.PathNotFound;
+ return Dir.openDirAbsolute(io, instances_path, .{ .iterate = true }) catch return error.PathNotFound;
}
- fn findInstancesDir(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
+ fn findInstancesDir(gpa: Allocator, io: Io) error{ OutOfMemory, PathNotFound }!Dir {
// First, try getting the packages cache path from the registry.
// This only seems to exist when the path is different from the default.
method1: {
- return findInstancesDirViaSetup(allocator) catch |err| switch (err) {
+ return findInstancesDirViaSetup(gpa, io) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.PathNotFound => break :method1,
};
@@ -677,7 +683,7 @@ const MsvcLibDir = struct {
// Otherwise, try to get the path from the .dll that would have been
// loaded via COM for SetupConfiguration.
method2: {
- return findInstancesDirViaCLSID(allocator) catch |err| switch (err) {
+ return findInstancesDirViaCLSID(gpa, io) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.PathNotFound => break :method2,
};
@@ -685,19 +691,19 @@ const MsvcLibDir = struct {
// If that can't be found, fall back to manually appending
// `Microsoft\VisualStudio\Packages\_Instances` to %PROGRAMDATA%
method3: {
- const program_data = std.process.getEnvVarOwned(allocator, "PROGRAMDATA") catch |err| switch (err) {
+ const program_data = std.process.getEnvVarOwned(gpa, "PROGRAMDATA") catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.InvalidWtf8 => unreachable,
error.EnvironmentVariableNotFound => break :method3,
};
- defer allocator.free(program_data);
+ defer gpa.free(program_data);
- if (!std.fs.path.isAbsolute(program_data)) break :method3;
+ if (!Dir.path.isAbsolute(program_data)) break :method3;
- const instances_path = try std.fs.path.join(allocator, &.{ program_data, "Microsoft", "VisualStudio", "Packages", "_Instances" });
- defer allocator.free(instances_path);
+ const instances_path = try Dir.path.join(gpa, &.{ program_data, "Microsoft", "VisualStudio", "Packages", "_Instances" });
+ defer gpa.free(instances_path);
- return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch break :method3;
+ return Dir.openDirAbsolute(io, instances_path, .{ .iterate = true }) catch break :method3;
}
return error.PathNotFound;
}
@@ -748,33 +754,33 @@ const MsvcLibDir = struct {
///
/// The logic in this function is intended to match what ISetupConfiguration does
/// under-the-hood, as verified using Procmon.
- fn findViaCOM(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
+ fn findViaCOM(gpa: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
// Typically `%PROGRAMDATA%\Microsoft\VisualStudio\Packages\_Instances`
// This will contain directories with names of instance IDs like 80a758ca,
// which will contain `state.json` files that have the version and
// installation directory.
- var instances_dir = try findInstancesDir(allocator);
- defer instances_dir.close();
+ var instances_dir = try findInstancesDir(gpa, io);
+ defer instances_dir.close(io);
- var state_subpath_buf: [std.fs.max_name_bytes + 32]u8 = undefined;
+ var state_subpath_buf: [Dir.max_name_bytes + 32]u8 = undefined;
var latest_version_lib_dir: std.ArrayList(u8) = .empty;
- errdefer latest_version_lib_dir.deinit(allocator);
+ errdefer latest_version_lib_dir.deinit(gpa);
var latest_version: u64 = 0;
var instances_dir_it = instances_dir.iterateAssumeFirstIteration();
- while (instances_dir_it.next() catch return error.PathNotFound) |entry| {
+ while (instances_dir_it.next(io) catch return error.PathNotFound) |entry| {
if (entry.kind != .directory) continue;
var writer: Writer = .fixed(&state_subpath_buf);
writer.writeAll(entry.name) catch unreachable;
- writer.writeByte(std.fs.path.sep) catch unreachable;
+ writer.writeByte(Dir.path.sep) catch unreachable;
writer.writeAll("state.json") catch unreachable;
- const json_contents = instances_dir.readFileAlloc(writer.buffered(), allocator, .limited(std.math.maxInt(usize))) catch continue;
- defer allocator.free(json_contents);
+ const json_contents = instances_dir.readFileAlloc(io, writer.buffered(), gpa, .limited(std.math.maxInt(usize))) catch continue;
+ defer gpa.free(json_contents);
- var parsed = std.json.parseFromSlice(std.json.Value, allocator, json_contents, .{}) catch continue;
+ var parsed = std.json.parseFromSlice(std.json.Value, gpa, json_contents, .{}) catch continue;
defer parsed.deinit();
if (parsed.value != .object) continue;
@@ -791,35 +797,40 @@ const MsvcLibDir = struct {
const installation_path = parsed.value.object.get("installationPath") orelse continue;
if (installation_path != .string) continue;
- const lib_dir_path = libDirFromInstallationPath(allocator, installation_path.string, arch) catch |err| switch (err) {
+ const lib_dir_path = libDirFromInstallationPath(gpa, io, installation_path.string, arch) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.PathNotFound => continue,
};
- defer allocator.free(lib_dir_path);
+ defer gpa.free(lib_dir_path);
latest_version_lib_dir.clearRetainingCapacity();
- try latest_version_lib_dir.appendSlice(allocator, lib_dir_path);
+ try latest_version_lib_dir.appendSlice(gpa, lib_dir_path);
latest_version = parsed_version;
}
if (latest_version_lib_dir.items.len == 0) return error.PathNotFound;
- return latest_version_lib_dir.toOwnedSlice(allocator);
+ return latest_version_lib_dir.toOwnedSlice(gpa);
}
- fn libDirFromInstallationPath(allocator: std.mem.Allocator, installation_path: []const u8, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
- var lib_dir_buf = try std.array_list.Managed(u8).initCapacity(allocator, installation_path.len + 64);
+ fn libDirFromInstallationPath(
+ gpa: Allocator,
+ io: Io,
+ installation_path: []const u8,
+ arch: std.Target.Cpu.Arch,
+ ) error{ OutOfMemory, PathNotFound }![]const u8 {
+ var lib_dir_buf = try std.array_list.Managed(u8).initCapacity(gpa, installation_path.len + 64);
errdefer lib_dir_buf.deinit();
lib_dir_buf.appendSliceAssumeCapacity(installation_path);
- if (!std.fs.path.isSep(lib_dir_buf.getLast())) {
+ if (!Dir.path.isSep(lib_dir_buf.getLast())) {
try lib_dir_buf.append('\\');
}
const installation_path_with_trailing_sep_len = lib_dir_buf.items.len;
try lib_dir_buf.appendSlice("VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt");
var default_tools_version_buf: [512]u8 = undefined;
- const default_tools_version_contents = std.fs.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch {
+ const default_tools_version_contents = Dir.cwd().readFile(io, lib_dir_buf.items, &default_tools_version_buf) catch {
return error.PathNotFound;
};
var tokenizer = std.mem.tokenizeAny(u8, default_tools_version_contents, " \r\n");
@@ -837,7 +848,7 @@ const MsvcLibDir = struct {
else => unreachable,
});
- if (!verifyLibDir(lib_dir_buf.items)) {
+ if (!verifyLibDir(io, lib_dir_buf.items)) {
return error.PathNotFound;
}
@@ -845,64 +856,64 @@ const MsvcLibDir = struct {
}
// https://learn.microsoft.com/en-us/visualstudio/install/tools-for-managing-visual-studio-instances?view=vs-2022#editing-the-registry-for-a-visual-studio-instance
- fn findViaRegistry(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
+ fn findViaRegistry(gpa: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
// %localappdata%\Microsoft\VisualStudio\
// %appdata%\Local\Microsoft\VisualStudio\
- const visualstudio_folder_path = std.fs.getAppDataDir(allocator, "Microsoft\\VisualStudio\\") catch return error.PathNotFound;
- defer allocator.free(visualstudio_folder_path);
+ const visualstudio_folder_path = std.fs.getAppDataDir(gpa, "Microsoft\\VisualStudio\\") catch return error.PathNotFound;
+ defer gpa.free(visualstudio_folder_path);
const vs_versions: []const []const u8 = vs_versions: {
- if (!std.fs.path.isAbsolute(visualstudio_folder_path)) return error.PathNotFound;
+ if (!Dir.path.isAbsolute(visualstudio_folder_path)) return error.PathNotFound;
// enumerate folders that contain `privateregistry.bin`, looking for all versions
// f.i. %localappdata%\Microsoft\VisualStudio\17.0_9e9cbb98\
- var visualstudio_folder = std.fs.openDirAbsolute(visualstudio_folder_path, .{
+ var visualstudio_folder = Dir.openDirAbsolute(io, visualstudio_folder_path, .{
.iterate = true,
}) catch return error.PathNotFound;
- defer visualstudio_folder.close();
+ defer visualstudio_folder.close(io);
var iterator = visualstudio_folder.iterate();
- break :vs_versions try iterateAndFilterByVersion(&iterator, allocator, "");
+ break :vs_versions try iterateAndFilterByVersion(&iterator, gpa, io, "");
};
defer {
- for (vs_versions) |vs_version| allocator.free(vs_version);
- allocator.free(vs_versions);
+ for (vs_versions) |vs_version| gpa.free(vs_version);
+ gpa.free(vs_versions);
}
var config_subkey_buf: [RegistryWtf16Le.key_name_max_len * 2]u8 = undefined;
const source_directories: []const u8 = source_directories: for (vs_versions) |vs_version| {
- const privateregistry_absolute_path = std.fs.path.join(allocator, &.{ visualstudio_folder_path, vs_version, "privateregistry.bin" }) catch continue;
- defer allocator.free(privateregistry_absolute_path);
- if (!std.fs.path.isAbsolute(privateregistry_absolute_path)) continue;
+ const privateregistry_absolute_path = Dir.path.join(gpa, &.{ visualstudio_folder_path, vs_version, "privateregistry.bin" }) catch continue;
+ defer gpa.free(privateregistry_absolute_path);
+ if (!Dir.path.isAbsolute(privateregistry_absolute_path)) continue;
const visualstudio_registry = RegistryWtf8.loadFromPath(privateregistry_absolute_path) catch continue;
defer visualstudio_registry.closeKey();
const config_subkey = std.fmt.bufPrint(config_subkey_buf[0..], "Software\\Microsoft\\VisualStudio\\{s}_Config", .{vs_version}) catch unreachable;
- const source_directories_value = visualstudio_registry.getString(allocator, config_subkey, "Source Directories") catch |err| switch (err) {
+ const source_directories_value = visualstudio_registry.getString(gpa, config_subkey, "Source Directories") catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => continue,
};
- if (source_directories_value.len > (std.fs.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 paths and at least some of them are not of max length
- allocator.free(source_directories_value);
+ if (source_directories_value.len > (Dir.max_path_bytes * 30)) { // note(bratishkaerik): guessing from the fact that on my computer it has 15 paths and at least some of them are not of max length
+ gpa.free(source_directories_value);
continue;
}
break :source_directories source_directories_value;
} else return error.PathNotFound;
- defer allocator.free(source_directories);
+ defer gpa.free(source_directories);
var source_directories_split = std.mem.splitScalar(u8, source_directories, ';');
const msvc_dir: []const u8 = msvc_dir: {
- const msvc_include_dir_maybe_with_trailing_slash = try allocator.dupe(u8, source_directories_split.first());
+ const msvc_include_dir_maybe_with_trailing_slash = try gpa.dupe(u8, source_directories_split.first());
- if (msvc_include_dir_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(msvc_include_dir_maybe_with_trailing_slash)) {
- allocator.free(msvc_include_dir_maybe_with_trailing_slash);
+ if (msvc_include_dir_maybe_with_trailing_slash.len > Dir.max_path_bytes or !Dir.path.isAbsolute(msvc_include_dir_maybe_with_trailing_slash)) {
+ gpa.free(msvc_include_dir_maybe_with_trailing_slash);
return error.PathNotFound;
}
- var msvc_dir = std.array_list.Managed(u8).fromOwnedSlice(allocator, msvc_include_dir_maybe_with_trailing_slash);
+ var msvc_dir = std.array_list.Managed(u8).fromOwnedSlice(gpa, msvc_include_dir_maybe_with_trailing_slash);
errdefer msvc_dir.deinit();
// String might contain trailing slash, so trim it here
@@ -924,19 +935,19 @@ const MsvcLibDir = struct {
const msvc_dir_with_arch = try msvc_dir.toOwnedSlice();
break :msvc_dir msvc_dir_with_arch;
};
- errdefer allocator.free(msvc_dir);
+ errdefer gpa.free(msvc_dir);
- if (!verifyLibDir(msvc_dir)) {
+ if (!verifyLibDir(io, msvc_dir)) {
return error.PathNotFound;
}
return msvc_dir;
}
- fn findViaVs7Key(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
+ fn findViaVs7Key(gpa: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
var base_path: std.array_list.Managed(u8) = base_path: {
try_env: {
- var env_map = std.process.getEnvMap(allocator) catch |err| switch (err) {
+ var env_map = std.process.getEnvMap(gpa) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => break :try_env,
};
@@ -944,8 +955,8 @@ const MsvcLibDir = struct {
if (env_map.get("VS140COMNTOOLS")) |VS140COMNTOOLS| {
if (VS140COMNTOOLS.len < "C:\\Common7\\Tools".len) break :try_env;
- if (!std.fs.path.isAbsolute(VS140COMNTOOLS)) break :try_env;
- var list = std.array_list.Managed(u8).init(allocator);
+ if (!Dir.path.isAbsolute(VS140COMNTOOLS)) break :try_env;
+ var list = std.array_list.Managed(u8).init(gpa);
errdefer list.deinit();
try list.appendSlice(VS140COMNTOOLS); // C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\Tools
@@ -959,17 +970,17 @@ const MsvcLibDir = struct {
const vs7_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", .{ .wow64_32 = true }) catch return error.PathNotFound;
defer vs7_key.closeKey();
try_vs7_key: {
- const path_maybe_with_trailing_slash = vs7_key.getString(allocator, "", "14.0") catch |err| switch (err) {
+ const path_maybe_with_trailing_slash = vs7_key.getString(gpa, "", "14.0") catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => break :try_vs7_key,
};
- if (path_maybe_with_trailing_slash.len > std.fs.max_path_bytes or !std.fs.path.isAbsolute(path_maybe_with_trailing_slash)) {
- allocator.free(path_maybe_with_trailing_slash);
+ if (path_maybe_with_trailing_slash.len > Dir.max_path_bytes or !Dir.path.isAbsolute(path_maybe_with_trailing_slash)) {
+ gpa.free(path_maybe_with_trailing_slash);
break :try_vs7_key;
}
- var path = std.array_list.Managed(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
+ var path = std.array_list.Managed(u8).fromOwnedSlice(gpa, path_maybe_with_trailing_slash);
errdefer path.deinit();
// String might contain trailing slash, so trim it here
@@ -989,7 +1000,7 @@ const MsvcLibDir = struct {
else => unreachable,
});
- if (!verifyLibDir(base_path.items)) {
+ if (!verifyLibDir(io, base_path.items)) {
return error.PathNotFound;
}
@@ -997,13 +1008,13 @@ const MsvcLibDir = struct {
return full_path;
}
- fn verifyLibDir(lib_dir_path: []const u8) bool {
- std.debug.assert(std.fs.path.isAbsolute(lib_dir_path)); // should be already handled in `findVia*`
+ fn verifyLibDir(io: Io, lib_dir_path: []const u8) bool {
+ std.debug.assert(Dir.path.isAbsolute(lib_dir_path)); // should be already handled in `findVia*`
- var dir = std.fs.openDirAbsolute(lib_dir_path, .{}) catch return false;
- defer dir.close();
+ var dir = Dir.openDirAbsolute(io, lib_dir_path, .{}) catch return false;
+ defer dir.close(io);
- const stat = dir.statFile("vcruntime.lib") catch return false;
+ const stat = dir.statFile(io, "vcruntime.lib", .{}) catch return false;
if (stat.kind != .file)
return false;
@@ -1012,18 +1023,18 @@ const MsvcLibDir = struct {
/// Find path to MSVC's `lib/` directory.
/// Caller owns the result.
- pub fn find(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, MsvcLibDirNotFound }![]const u8 {
- const full_path = MsvcLibDir.findViaCOM(allocator, arch) catch |err1| switch (err1) {
+ pub fn find(gpa: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, MsvcLibDirNotFound }![]const u8 {
+ const full_path = MsvcLibDir.findViaCOM(gpa, io, arch) catch |err1| switch (err1) {
error.OutOfMemory => return error.OutOfMemory,
- error.PathNotFound => MsvcLibDir.findViaRegistry(allocator, arch) catch |err2| switch (err2) {
+ error.PathNotFound => MsvcLibDir.findViaRegistry(gpa, io, arch) catch |err2| switch (err2) {
error.OutOfMemory => return error.OutOfMemory,
- error.PathNotFound => MsvcLibDir.findViaVs7Key(allocator, arch) catch |err3| switch (err3) {
+ error.PathNotFound => MsvcLibDir.findViaVs7Key(gpa, io, arch) catch |err3| switch (err3) {
error.OutOfMemory => return error.OutOfMemory,
error.PathNotFound => return error.MsvcLibDirNotFound,
},
},
};
- errdefer allocator.free(full_path);
+ errdefer gpa.free(full_path);
return full_path;
}
diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig
index 09c785613f..37ce7b4cfa 100644
--- a/lib/std/zig/Zir.zig
+++ b/lib/std/zig/Zir.zig
@@ -11,9 +11,11 @@
//! * In the future, possibly inline assembly, which needs to get parsed and
//! handled by the codegen backend, and errors reported there. However for now,
//! inline assembly is not an exception.
+const Zir = @This();
+const builtin = @import("builtin");
const std = @import("std");
-const builtin = @import("builtin");
+const Io = std.Io;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -21,8 +23,6 @@ const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Ast = std.zig.Ast;
-const Zir = @This();
-
instructions: std.MultiArrayList(Inst).Slice,
/// In order to store references to strings in fewer bytes, we copy all
/// string bytes into here. String bytes can be null. It is up to whomever
@@ -45,7 +45,7 @@ pub const Header = extern struct {
/// it's essentially free to have a zero field here and makes the warning go away,
/// making it more likely that following Valgrind warnings will be taken seriously.
unused: u32 = 0,
- stat_inode: std.fs.File.INode,
+ stat_inode: Io.File.INode,
stat_size: u64,
stat_mtime: i128,
};
diff --git a/lib/std/zig/Zoir.zig b/lib/std/zig/Zoir.zig
index 08a7fc9639..d82b8f1861 100644
--- a/lib/std/zig/Zoir.zig
+++ b/lib/std/zig/Zoir.zig
@@ -1,6 +1,13 @@
//! Zig Object Intermediate Representation.
//! Simplified AST for the ZON (Zig Object Notation) format.
//! `ZonGen` converts `Ast` to `Zoir`.
+const Zoir = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+const Ast = std.zig.Ast;
nodes: std.MultiArrayList(Node.Repr).Slice,
extra: []u32,
@@ -25,7 +32,7 @@ pub const Header = extern struct {
/// making it more likely that following Valgrind warnings will be taken seriously.
unused: u64 = 0,
- stat_inode: std.fs.File.INode,
+ stat_inode: Io.File.INode,
stat_size: u64,
stat_mtime: i128,
@@ -254,9 +261,3 @@ pub const CompileError = extern struct {
assert(std.meta.hasUniqueRepresentation(Note));
}
};
-
-const std = @import("std");
-const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const Ast = std.zig.Ast;
-const Zoir = @This();
diff --git a/lib/std/zig/llvm/Builder.zig b/lib/std/zig/llvm/Builder.zig
index 587ac82c70..66d20df348 100644
--- a/lib/std/zig/llvm/Builder.zig
+++ b/lib/std/zig/llvm/Builder.zig
@@ -1,14 +1,17 @@
+const builtin = @import("builtin");
+const Builder = @This();
+
const std = @import("../../std.zig");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
-const bitcode_writer = @import("bitcode_writer.zig");
-const Builder = @This();
-const builtin = @import("builtin");
const DW = std.dwarf;
-const ir = @import("ir.zig");
const log = std.log.scoped(.llvm);
const Writer = std.Io.Writer;
+const bitcode_writer = @import("bitcode_writer.zig");
+const ir = @import("ir.zig");
+
gpa: Allocator,
strip: bool,
@@ -9573,21 +9576,21 @@ pub fn asmValue(
return (try self.asmConst(ty, info, assembly, constraints)).toValue();
}
-pub fn dump(b: *Builder) void {
+pub fn dump(b: *Builder, io: Io) void {
var buffer: [4000]u8 = undefined;
- const stderr: std.fs.File = .stderr();
- b.printToFile(stderr, &buffer) catch {};
+ const stderr: Io.File = .stderr();
+ b.printToFile(io, stderr, &buffer) catch {};
}
-pub fn printToFilePath(b: *Builder, dir: std.fs.Dir, path: []const u8) !void {
+pub fn printToFilePath(b: *Builder, io: Io, dir: Io.Dir, path: []const u8) !void {
var buffer: [4000]u8 = undefined;
- const file = try dir.createFile(path, .{});
- defer file.close();
- try b.printToFile(file, &buffer);
+ const file = try dir.createFile(io, path, .{});
+ defer file.close(io);
+ try b.printToFile(io, file, &buffer);
}
-pub fn printToFile(b: *Builder, file: std.fs.File, buffer: []u8) !void {
- var fw = file.writer(buffer);
+pub fn printToFile(b: *Builder, io: Io, file: Io.File, buffer: []u8) !void {
+ var fw = file.writer(io, buffer);
try print(b, &fw.interface);
try fw.interface.flush();
}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 3ce2603947..7feca08e87 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1,7 +1,6 @@
const std = @import("std");
-const mem = std.mem;
-const print = std.debug.print;
-const maxInt = std.math.maxInt;
+const Io = std.Io;
+const Allocator = std.mem.Allocator;
test "zig fmt: remove extra whitespace at start and end of file with comment between" {
try testTransform(
@@ -4539,7 +4538,7 @@ test "zig fmt: Only indent multiline string literals in function calls" {
test "zig fmt: Don't add extra newline after if" {
try testCanonical(
\\pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
- \\ if (cwd().symLink(existing_path, new_path, .{})) {
+ \\ if (foo().bar(existing_path, new_path, .{})) {
\\ return;
\\ }
\\}
@@ -6332,54 +6331,64 @@ test "ampersand" {
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
+fn testParse(io: Io, source: [:0]const u8, allocator: Allocator, anything_changed: *bool) ![]u8 {
var buffer: [64]u8 = undefined;
- const stderr, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const writer = &stderr.file_writer.interface;
var tree = try std.zig.Ast.parse(allocator, source, .zig);
defer tree.deinit(allocator);
for (tree.errors) |parse_error| {
const loc = tree.tokenLocation(0, parse_error.token);
- try stderr.print("(memory buffer):{d}:{d}: error: ", .{ loc.line + 1, loc.column + 1 });
- try tree.renderError(parse_error, stderr);
- try stderr.print("\n{s}\n", .{source[loc.line_start..loc.line_end]});
+ try writer.print("(memory buffer):{d}:{d}: error: ", .{ loc.line + 1, loc.column + 1 });
+ try tree.renderError(parse_error, writer);
+ try writer.print("\n{s}\n", .{source[loc.line_start..loc.line_end]});
{
var i: usize = 0;
while (i < loc.column) : (i += 1) {
- try stderr.writeAll(" ");
+ try writer.writeAll(" ");
}
- try stderr.writeAll("^");
+ try writer.writeAll("^");
}
- try stderr.writeAll("\n");
+ try writer.writeAll("\n");
}
if (tree.errors.len != 0) {
return error.ParseError;
}
const formatted = try tree.renderAlloc(allocator);
- anything_changed.* = !mem.eql(u8, formatted, source);
+ anything_changed.* = !std.mem.eql(u8, formatted, source);
return formatted;
}
-fn testTransformImpl(allocator: mem.Allocator, fba: *std.heap.FixedBufferAllocator, source: [:0]const u8, expected_source: []const u8) !void {
+fn testTransformImpl(
+ allocator: Allocator,
+ fba: *std.heap.FixedBufferAllocator,
+ io: Io,
+ source: [:0]const u8,
+ expected_source: []const u8,
+) !void {
// reset the fixed buffer allocator each run so that it can be re-used for each
// iteration of the failing index
fba.reset();
var anything_changed: bool = undefined;
- const result_source = try testParse(source, allocator, &anything_changed);
+ const result_source = try testParse(io, source, allocator, &anything_changed);
try std.testing.expectEqualStrings(expected_source, result_source);
const changes_expected = source.ptr != expected_source.ptr;
if (anything_changed != changes_expected) {
- print("std.zig.render returned {} instead of {}\n", .{ anything_changed, changes_expected });
+ std.debug.print("std.zig.render returned {} instead of {}\n", .{ anything_changed, changes_expected });
return error.TestFailed;
}
try std.testing.expect(anything_changed == changes_expected);
allocator.free(result_source);
}
fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
+ const io = std.testing.io;
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- return std.testing.checkAllAllocationFailures(fixed_allocator.allocator(), testTransformImpl, .{ &fixed_allocator, source, expected_source });
+ return std.testing.checkAllAllocationFailures(fixed_allocator.allocator(), testTransformImpl, .{
+ &fixed_allocator, io, source, expected_source,
+ });
}
fn testCanonical(source: [:0]const u8) !void {
return testTransform(source, source);
diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig
index 1566a15d2d..da3dd42f15 100644
--- a/lib/std/zig/perf_test.zig
+++ b/lib/std/zig/perf_test.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const Tokenizer = std.zig.Tokenizer;
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
@@ -22,7 +23,7 @@ pub fn main() !void {
const bytes_per_sec = @as(u64, @intFromFloat(@floor(bytes_per_sec_float)));
var stdout_buffer: [1024]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
const stdout = &stdout_writer.interface;
try stdout.print("parsing speed: {Bi:.2}/s, {Bi:.2} used \n", .{ bytes_per_sec, memory_used });
try stdout.flush();
diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig
index a3ce039ee8..dfa10b84b6 100644
--- a/lib/std/zig/system.zig
+++ b/lib/std/zig/system.zig
@@ -1,11 +1,12 @@
const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
const std = @import("../std.zig");
const mem = std.mem;
const elf = std.elf;
const fs = std.fs;
const assert = std.debug.assert;
const Target = std.Target;
-const native_endian = builtin.cpu.arch.endian();
const posix = std.posix;
const Io = std.Io;
@@ -39,6 +40,7 @@ pub const GetExternalExecutorOptions = struct {
/// Return whether or not the given host is capable of running executables of
/// the other target.
pub fn getExternalExecutor(
+ io: Io,
host: *const std.Target,
candidate: *const std.Target,
options: GetExternalExecutorOptions,
@@ -69,7 +71,7 @@ pub fn getExternalExecutor(
if (os_match and cpu_ok) native: {
if (options.link_libc) {
if (candidate.dynamic_linker.get()) |candidate_dl| {
- fs.cwd().access(candidate_dl, .{}) catch {
+ Io.Dir.cwd().access(io, candidate_dl, .{}) catch {
bad_result = .{ .bad_dl = candidate_dl };
break :native;
};
@@ -209,7 +211,6 @@ pub const DetectError = error{
DeviceBusy,
OSVersionDetectionFail,
Unexpected,
- ProcessNotFound,
} || Io.Cancelable;
/// Given a `Target.Query`, which specifies in detail which parts of the
@@ -247,7 +248,7 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
os.version_range.windows.min = detected_version;
os.version_range.windows.max = detected_version;
},
- .macos => try darwin.macos.detect(&os),
+ .macos => try darwin.macos.detect(io, &os),
.freebsd, .netbsd, .dragonfly => {
const key = switch (builtin.target.os.tag) {
.freebsd => "kern.osreldate",
@@ -322,7 +323,7 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
error.Unexpected => return error.OSVersionDetectionFail,
};
- if (Target.Query.parseVersion(buf[0..len :0])) |ver| {
+ if (Target.Query.parseVersion(buf[0 .. len - 1 :0])) |ver| {
assert(ver.build == null);
assert(ver.pre == null);
os.version_range.semver.min = ver;
@@ -422,7 +423,6 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
error.SocketUnconnected => return error.Unexpected,
error.AccessDenied,
- error.ProcessNotFound,
error.SymLinkLoop,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
@@ -553,7 +553,6 @@ pub const AbiAndDynamicLinkerFromFileError = error{
SystemResources,
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
- ProcessNotFound,
IsDir,
WouldBlock,
InputOutput,
@@ -693,8 +692,10 @@ fn abiAndDynamicLinkerFromFile(
// So far, no luck. Next we try to see if the information is
// present in the symlink data for the dynamic linker path.
- var link_buf: [posix.PATH_MAX]u8 = undefined;
- const link_name = posix.readlink(dl_path, &link_buf) catch |err| switch (err) {
+ var link_buffer: [posix.PATH_MAX]u8 = undefined;
+ const link_name = if (Io.Dir.readLinkAbsolute(io, dl_path, &link_buffer)) |n|
+ link_buffer[0..n]
+ else |err| switch (err) {
error.NameTooLong => unreachable,
error.BadPathName => unreachable, // Windows only
error.UnsupportedReparsePointType => unreachable, // Windows only
@@ -711,6 +712,7 @@ fn abiAndDynamicLinkerFromFile(
error.SystemResources,
error.FileSystem,
error.SymLinkLoop,
+ error.Canceled,
error.Unexpected,
=> |e| return e,
};
@@ -786,7 +788,9 @@ test glibcVerFromLinkName {
}
fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
- var dir = fs.cwd().openDir(rpath, .{}) catch |err| switch (err) {
+ const cwd: Io.Dir = .cwd();
+
+ var dir = cwd.openDir(io, rpath, .{}) catch |err| switch (err) {
error.NameTooLong => return error.Unexpected,
error.BadPathName => return error.Unexpected,
error.DeviceBusy => return error.Unexpected,
@@ -805,7 +809,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
error.Unexpected => |e| return e,
error.Canceled => |e| return e,
};
- defer dir.close();
+ defer dir.close(io);
// Now we have a candidate for the path to libc shared object. In
// the past, we used readlink() here because the link name would
@@ -815,14 +819,14 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
// .dynstr section, and finding the max version number of symbols
// that start with "GLIBC_2.".
const glibc_so_basename = "libc.so.6";
- var file = dir.openFile(glibc_so_basename, .{}) catch |err| switch (err) {
+ var file = dir.openFile(io, glibc_so_basename, .{}) catch |err| switch (err) {
error.NameTooLong => return error.Unexpected,
error.BadPathName => return error.Unexpected,
error.PipeBusy => return error.Unexpected, // Windows-only
error.SharingViolation => return error.Unexpected, // Windows-only
error.NetworkNotFound => return error.Unexpected, // Windows-only
error.AntivirusInterference => return error.Unexpected, // Windows-only
- error.FileLocksNotSupported => return error.Unexpected, // No lock requested.
+ error.FileLocksUnsupported => return error.Unexpected, // No lock requested.
error.NoSpaceLeft => return error.Unexpected, // read-only
error.PathAlreadyExists => return error.Unexpected, // read-only
error.DeviceBusy => return error.Unexpected, // read-only
@@ -837,7 +841,6 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
error.NotDir => return error.GLibCNotFound,
error.IsDir => return error.GLibCNotFound,
- error.ProcessNotFound => |e| return e,
error.ProcessFdQuotaExceeded => |e| return e,
error.SystemFdQuotaExceeded => |e| return e,
error.SystemResources => |e| return e,
@@ -845,11 +848,11 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
error.Unexpected => |e| return e,
error.Canceled => |e| return e,
};
- defer file.close();
+ defer file.close(io);
// Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system.
var buffer: [8000]u8 = undefined;
- var file_reader: Io.File.Reader = .initAdapted(file, io, &buffer);
+ var file_reader: Io.File.Reader = .init(file, io, &buffer);
return glibcVerFromSoFile(&file_reader) catch |err| switch (err) {
error.InvalidElfMagic,
@@ -1024,14 +1027,14 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ
};
while (true) {
- const file = fs.openFileAbsolute(file_name, .{}) catch |err| switch (err) {
+ const file = Io.Dir.openFileAbsolute(io, file_name, .{}) catch |err| switch (err) {
error.NoSpaceLeft => return error.Unexpected,
error.NameTooLong => return error.Unexpected,
error.PathAlreadyExists => return error.Unexpected,
error.SharingViolation => return error.Unexpected,
error.BadPathName => return error.Unexpected,
error.PipeBusy => return error.Unexpected,
- error.FileLocksNotSupported => return error.Unexpected,
+ error.FileLocksUnsupported => return error.Unexpected,
error.FileBusy => return error.Unexpected, // opened without write permissions
error.AntivirusInterference => return error.Unexpected, // Windows-only error
@@ -1049,9 +1052,9 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ
else => |e| return e,
};
var is_elf_file = false;
- defer if (!is_elf_file) file.close();
+ defer if (!is_elf_file) file.close(io);
- file_reader = .initAdapted(file, io, &file_reader_buffer);
+ file_reader = .init(file, io, &file_reader_buffer);
file_name = undefined; // it aliases file_reader_buffer
const header = elf.Header.read(&file_reader.interface) catch |hdr_err| switch (hdr_err) {
@@ -1101,7 +1104,6 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ
error.SymLinkLoop,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
- error.ProcessNotFound,
error.Canceled,
=> |e| return e,
diff --git a/lib/std/zig/system/NativePaths.zig b/lib/std/zig/system/NativePaths.zig
index eea12ee272..bf66f912ae 100644
--- a/lib/std/zig/system/NativePaths.zig
+++ b/lib/std/zig/system/NativePaths.zig
@@ -1,11 +1,12 @@
-const std = @import("../../std.zig");
+const NativePaths = @This();
const builtin = @import("builtin");
+
+const std = @import("../../std.zig");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const process = std.process;
const mem = std.mem;
-const NativePaths = @This();
-
arena: Allocator,
include_dirs: std.ArrayList([]const u8) = .empty,
lib_dirs: std.ArrayList([]const u8) = .empty,
@@ -13,7 +14,7 @@ framework_dirs: std.ArrayList([]const u8) = .empty,
rpaths: std.ArrayList([]const u8) = .empty,
warnings: std.ArrayList([]const u8) = .empty,
-pub fn detect(arena: Allocator, native_target: *const std.Target) !NativePaths {
+pub fn detect(arena: Allocator, io: Io, native_target: *const std.Target) !NativePaths {
var self: NativePaths = .{ .arena = arena };
var is_nix = false;
if (process.getEnvVarOwned(arena, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
@@ -115,8 +116,8 @@ pub fn detect(arena: Allocator, native_target: *const std.Target) !NativePaths {
// TODO: consider also adding macports paths
if (builtin.target.os.tag.isDarwin()) {
- if (std.zig.system.darwin.isSdkInstalled(arena)) sdk: {
- const sdk = std.zig.system.darwin.getSdk(arena, native_target) orelse break :sdk;
+ if (std.zig.system.darwin.isSdkInstalled(arena, io)) sdk: {
+ const sdk = std.zig.system.darwin.getSdk(arena, io, native_target) orelse break :sdk;
try self.addLibDir(try std.fs.path.join(arena, &.{ sdk, "usr/lib" }));
try self.addFrameworkDir(try std.fs.path.join(arena, &.{ sdk, "System/Library/Frameworks" }));
try self.addIncludeDir(try std.fs.path.join(arena, &.{ sdk, "usr/include" }));
diff --git a/lib/std/zig/system/darwin.zig b/lib/std/zig/system/darwin.zig
index fbd6da2a9e..b493ccf0ec 100644
--- a/lib/std/zig/system/darwin.zig
+++ b/lib/std/zig/system/darwin.zig
@@ -1,28 +1,29 @@
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
const Target = std.Target;
const Version = std.SemanticVersion;
pub const macos = @import("darwin/macos.zig");
/// Check if SDK is installed on Darwin without triggering CLT installation popup window.
-/// Note: simply invoking `xcrun` will inevitably trigger the CLT installation popup.
+///
+/// Simply invoking `xcrun` will inevitably trigger the CLT installation popup.
/// Therefore, we resort to invoking `xcode-select --print-path` and checking
/// if the status is nonzero.
+///
/// stderr from xcode-select is ignored.
+///
/// If error.OutOfMemory occurs in Allocator, this function returns null.
-pub fn isSdkInstalled(allocator: Allocator) bool {
- const result = std.process.Child.run(.{
- .allocator = allocator,
+pub fn isSdkInstalled(gpa: Allocator, io: Io) bool {
+ const result = std.process.Child.run(gpa, io, .{
.argv = &.{ "xcode-select", "--print-path" },
}) catch return false;
-
defer {
- allocator.free(result.stderr);
- allocator.free(result.stdout);
+ gpa.free(result.stderr);
+ gpa.free(result.stdout);
}
-
return switch (result.term) {
.Exited => |code| if (code == 0) result.stdout.len > 0 else false,
else => false,
@@ -34,7 +35,7 @@ pub fn isSdkInstalled(allocator: Allocator) bool {
/// Caller owns the memory.
/// stderr from xcrun is ignored.
/// If error.OutOfMemory occurs in Allocator, this function returns null.
-pub fn getSdk(allocator: Allocator, target: *const Target) ?[]const u8 {
+pub fn getSdk(gpa: Allocator, io: Io, target: *const Target) ?[]const u8 {
const is_simulator_abi = target.abi == .simulator;
const sdk = switch (target.os.tag) {
.driverkit => "driverkit",
@@ -46,16 +47,16 @@ pub fn getSdk(allocator: Allocator, target: *const Target) ?[]const u8 {
else => return null,
};
const argv = &[_][]const u8{ "xcrun", "--sdk", sdk, "--show-sdk-path" };
- const result = std.process.Child.run(.{ .allocator = allocator, .argv = argv }) catch return null;
+ const result = std.process.Child.run(gpa, io, .{ .argv = argv }) catch return null;
defer {
- allocator.free(result.stderr);
- allocator.free(result.stdout);
+ gpa.free(result.stderr);
+ gpa.free(result.stdout);
}
switch (result.term) {
.Exited => |code| if (code != 0) return null,
else => return null,
}
- return allocator.dupe(u8, mem.trimEnd(u8, result.stdout, "\r\n")) catch null;
+ return gpa.dupe(u8, mem.trimEnd(u8, result.stdout, "\r\n")) catch null;
}
test {
diff --git a/lib/std/zig/system/darwin/macos.zig b/lib/std/zig/system/darwin/macos.zig
index 9bb4e34e3b..7d80c2b588 100644
--- a/lib/std/zig/system/darwin/macos.zig
+++ b/lib/std/zig/system/darwin/macos.zig
@@ -1,14 +1,15 @@
-const std = @import("std");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const mem = std.mem;
const testing = std.testing;
-
const Target = std.Target;
/// Detect macOS version.
/// `target_os` is not modified in case of error.
-pub fn detect(target_os: *Target.Os) !void {
+pub fn detect(io: Io, target_os: *Target.Os) !void {
// Drop use of osproductversion sysctl because:
// 1. only available 10.13.4 High Sierra and later
// 2. when used from a binary built against < SDK 11.0 it returns 10.16 and masks Big Sur 11.x version
@@ -54,7 +55,7 @@ pub fn detect(target_os: *Target.Os) !void {
// approx. 4 times historical file size
var buf: [2048]u8 = undefined;
- if (std.fs.cwd().readFile(path, &buf)) |bytes| {
+ if (Io.Dir.cwd().readFile(io, path, &buf)) |bytes| {
if (parseSystemVersion(bytes)) |ver| {
// never return non-canonical `10.(16+)`
if (!(ver.major == 10 and ver.minor >= 16)) {
diff --git a/lib/std/zig/system/linux.zig b/lib/std/zig/system/linux.zig
index 6b4f0cf6f9..60f4d7bfec 100644
--- a/lib/std/zig/system/linux.zig
+++ b/lib/std/zig/system/linux.zig
@@ -444,10 +444,10 @@ inline fn getAArch64CpuFeature(comptime feat_reg: []const u8) u64 {
}
pub fn detectNativeCpuAndFeatures(io: Io) ?Target.Cpu {
- var file = fs.openFileAbsolute("/proc/cpuinfo", .{}) catch |err| switch (err) {
+ var file = Io.Dir.openFileAbsolute(io, "/proc/cpuinfo", .{}) catch |err| switch (err) {
else => return null,
};
- defer file.close();
+ defer file.close(io);
var buffer: [4096]u8 = undefined; // "flags" lines can get pretty long.
var file_reader = file.reader(io, &buffer);
diff --git a/lib/std/zip.zig b/lib/std/zip.zig
index 583377e00a..ff95587e7a 100644
--- a/lib/std/zip.zig
+++ b/lib/std/zip.zig
@@ -4,9 +4,11 @@
//! Note that this file uses the abbreviation "cd" for "central directory"
const builtin = @import("builtin");
-const std = @import("std");
-const File = std.fs.File;
const is_le = builtin.target.cpu.arch.endian() == .little;
+
+const std = @import("std");
+const Io = std.Io;
+const File = std.Io.File;
const Writer = std.Io.Writer;
const Reader = std.Io.Reader;
const flate = std.compress.flate;
@@ -115,7 +117,7 @@ pub const EndRecord = extern struct {
return record;
}
- pub const FindFileError = File.Reader.SizeError || File.SeekError || File.ReadError || error{
+ pub const FindFileError = File.Reader.SizeError || File.SeekError || File.Reader.Error || error{
ZipNoEndRecord,
EndOfStream,
ReadFailed,
@@ -460,8 +462,10 @@ pub const Iterator = struct {
stream: *File.Reader,
options: ExtractOptions,
filename_buf: []u8,
- dest: std.fs.Dir,
+ dest: Io.Dir,
) !void {
+ const io = stream.io;
+
if (filename_buf.len < self.filename_len)
return error.ZipInsufficientBuffer;
switch (self.compression_method) {
@@ -550,23 +554,23 @@ pub const Iterator = struct {
if (filename[filename.len - 1] == '/') {
if (self.uncompressed_size != 0)
return error.ZipBadDirectorySize;
- try dest.makePath(filename[0 .. filename.len - 1]);
+ try dest.createDirPath(io, filename[0 .. filename.len - 1]);
return;
}
const out_file = blk: {
if (std.fs.path.dirname(filename)) |dirname| {
- var parent_dir = try dest.makeOpenPath(dirname, .{});
- defer parent_dir.close();
+ var parent_dir = try dest.createDirPathOpen(io, dirname, .{});
+ defer parent_dir.close(io);
const basename = std.fs.path.basename(filename);
- break :blk try parent_dir.createFile(basename, .{ .exclusive = true });
+ break :blk try parent_dir.createFile(io, basename, .{ .exclusive = true });
}
- break :blk try dest.createFile(filename, .{ .exclusive = true });
+ break :blk try dest.createFile(io, filename, .{ .exclusive = true });
};
- defer out_file.close();
+ defer out_file.close(io);
var out_file_buffer: [1024]u8 = undefined;
- var file_writer = out_file.writer(&out_file_buffer);
+ var file_writer = out_file.writer(io, &out_file_buffer);
const local_data_file_offset: u64 =
@as(u64, self.file_offset) +
@as(u64, @sizeOf(LocalFileHeader)) +
@@ -647,7 +651,7 @@ pub const ExtractOptions = struct {
};
/// Extract the zipped files to the given `dest` directory.
-pub fn extract(dest: std.fs.Dir, fr: *File.Reader, options: ExtractOptions) !void {
+pub fn extract(dest: Io.Dir, fr: *File.Reader, options: ExtractOptions) !void {
if (options.verify_checksums) @panic("TODO unimplemented");
var iter = try Iterator.init(fr);
diff --git a/src/Air/print.zig b/src/Air/print.zig
index 95c8a1fcda..98b0a0b242 100644
--- a/src/Air/print.zig
+++ b/src/Air/print.zig
@@ -9,7 +9,7 @@ const Type = @import("../Type.zig");
const Air = @import("../Air.zig");
const InternPool = @import("../InternPool.zig");
-pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
+pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) !void {
comptime assert(build_options.enable_debug_extensions);
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
@@ -24,7 +24,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air
liveness_special_bytes + tomb_bytes;
// zig fmt: off
- stream.print(
+ try stream.print(
\\# Total AIR+Liveness bytes: {Bi}
\\# AIR Instructions: {d} ({Bi})
\\# AIR Extra Data: {d} ({Bi})
@@ -39,7 +39,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air
tomb_bytes,
if (liveness) |l| l.extra.len else 0, liveness_extra_bytes,
if (liveness) |l| l.special.count() else 0, liveness_special_bytes,
- }) catch return;
+ });
// zig fmt: on
var writer: Writer = .{
@@ -50,7 +50,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air
.indent = 2,
.skip_body = false,
};
- writer.writeBody(stream, air.getMainBody()) catch return;
+ try writer.writeBody(stream, air.getMainBody());
}
pub fn writeInst(
@@ -73,15 +73,23 @@ pub fn writeInst(
}
pub fn dump(air: Air, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
- const stderr_bw, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- air.write(stderr_bw, pt, liveness);
+ const comp = pt.zcu.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ air.write(w, pt, liveness);
}
pub fn dumpInst(air: Air, inst: Air.Inst.Index, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
- const stderr_bw, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- air.writeInst(stderr_bw, inst, pt, liveness);
+ const comp = pt.zcu.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ air.writeInst(w, inst, pt, liveness);
}
const Writer = struct {
diff --git a/src/Builtin.zig b/src/Builtin.zig
index b0077f2276..a097e88734 100644
--- a/src/Builtin.zig
+++ b/src/Builtin.zig
@@ -313,8 +313,9 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
assert(file.source != null);
const root_dir, const sub_path = file.path.openInfo(comp.dirs);
+ const io = comp.io;
- if (root_dir.statFile(sub_path)) |stat| {
+ if (root_dir.statFile(io, sub_path, .{})) |stat| {
if (stat.size != file.source.?.len) {
std.log.warn(
"the cached file '{f}' had the wrong size. Expected {d}, found {d}. " ++
@@ -342,7 +343,7 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
}
// `make_path` matters because the dir hasn't actually been created yet.
- var af = try root_dir.atomicFile(sub_path, .{ .make_path = true, .write_buffer = &.{} });
+ var af = try root_dir.atomicFile(io, sub_path, .{ .make_path = true, .write_buffer = &.{} });
defer af.deinit();
try af.file_writer.interface.writeAll(file.source.?);
af.finish() catch |err| switch (err) {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 931a0b2d14..8e005992ec 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -446,11 +446,11 @@ pub const Path = struct {
}
/// Given a `Path`, returns the directory handle and sub path to be used to open the path.
- pub fn openInfo(p: Path, dirs: Directories) struct { fs.Dir, []const u8 } {
+ pub fn openInfo(p: Path, dirs: Directories) struct { Io.Dir, []const u8 } {
const dir = switch (p.root) {
.none => {
const cwd_sub_path = absToCwdRelative(p.sub_path, dirs.cwd);
- return .{ fs.cwd(), cwd_sub_path };
+ return .{ Io.Dir.cwd(), cwd_sub_path };
},
.zig_lib => dirs.zig_lib.handle,
.global_cache => dirs.global_cache.handle,
@@ -721,13 +721,13 @@ pub const Directories = struct {
/// This may be the same as `global_cache`.
local_cache: Cache.Directory,
- pub fn deinit(dirs: *Directories) void {
+ pub fn deinit(dirs: *Directories, io: Io) void {
// The local and global caches could be the same.
- const close_local = dirs.local_cache.handle.fd != dirs.global_cache.handle.fd;
+ const close_local = dirs.local_cache.handle.handle != dirs.global_cache.handle.handle;
- dirs.global_cache.handle.close();
- if (close_local) dirs.local_cache.handle.close();
- dirs.zig_lib.handle.close();
+ dirs.global_cache.handle.close(io);
+ if (close_local) dirs.local_cache.handle.close(io);
+ dirs.zig_lib.handle.close(io);
}
/// Returns a `Directories` where `local_cache` is replaced with `global_cache`, intended for
@@ -745,6 +745,7 @@ pub const Directories = struct {
/// Uses `std.process.fatal` on error conditions.
pub fn init(
arena: Allocator,
+ io: Io,
override_zig_lib: ?[]const u8,
override_global_cache: ?[]const u8,
local_cache_strat: union(enum) {
@@ -768,30 +769,30 @@ pub const Directories = struct {
};
const zig_lib: Cache.Directory = d: {
- if (override_zig_lib) |path| break :d openUnresolved(arena, cwd, path, .@"zig lib");
+ if (override_zig_lib) |path| break :d openUnresolved(arena, io, cwd, path, .@"zig lib");
if (wasi) break :d openWasiPreopen(wasi_preopens, "/lib");
- break :d introspect.findZigLibDirFromSelfExe(arena, cwd, self_exe_path) catch |err| {
- fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
+ break :d introspect.findZigLibDirFromSelfExe(arena, io, cwd, self_exe_path) catch |err| {
+ fatal("unable to find zig installation directory '{s}': {t}", .{ self_exe_path, err });
};
};
const global_cache: Cache.Directory = d: {
- if (override_global_cache) |path| break :d openUnresolved(arena, cwd, path, .@"global cache");
+ if (override_global_cache) |path| break :d openUnresolved(arena, io, cwd, path, .@"global cache");
if (wasi) break :d openWasiPreopen(wasi_preopens, "/cache");
const path = introspect.resolveGlobalCacheDir(arena) catch |err| {
- fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
+ fatal("unable to resolve zig cache directory: {t}", .{err});
};
- break :d openUnresolved(arena, cwd, path, .@"global cache");
+ break :d openUnresolved(arena, io, cwd, path, .@"global cache");
};
const local_cache: Cache.Directory = switch (local_cache_strat) {
- .override => |path| openUnresolved(arena, cwd, path, .@"local cache"),
+ .override => |path| openUnresolved(arena, io, cwd, path, .@"local cache"),
.search => d: {
- const maybe_path = introspect.resolveSuitableLocalCacheDir(arena, cwd) catch |err| {
- fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
+ const maybe_path = introspect.resolveSuitableLocalCacheDir(arena, io, cwd) catch |err| {
+ fatal("unable to resolve zig cache directory: {t}", .{err});
};
const path = maybe_path orelse break :d global_cache;
- break :d openUnresolved(arena, cwd, path, .@"local cache");
+ break :d openUnresolved(arena, io, cwd, path, .@"local cache");
},
.global => global_cache,
};
@@ -814,18 +815,24 @@ pub const Directories = struct {
return .{
.path = if (std.mem.eql(u8, name, ".")) null else name,
.handle = .{
- .fd = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}),
+ .handle = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}),
},
};
}
- fn openUnresolved(arena: Allocator, cwd: []const u8, unresolved_path: []const u8, thing: enum { @"zig lib", @"global cache", @"local cache" }) Cache.Directory {
+ fn openUnresolved(
+ arena: Allocator,
+ io: Io,
+ cwd: []const u8,
+ unresolved_path: []const u8,
+ thing: enum { @"zig lib", @"global cache", @"local cache" },
+ ) Cache.Directory {
const path = introspect.resolvePath(arena, cwd, &.{unresolved_path}) catch |err| {
fatal("unable to resolve {s} directory: {s}", .{ @tagName(thing), @errorName(err) });
};
const nonempty_path = if (path.len == 0) "." else path;
const handle_or_err = switch (thing) {
- .@"zig lib" => fs.cwd().openDir(nonempty_path, .{}),
- .@"global cache", .@"local cache" => fs.cwd().makeOpenPath(nonempty_path, .{}),
+ .@"zig lib" => Io.Dir.cwd().openDir(io, nonempty_path, .{}),
+ .@"global cache", .@"local cache" => Io.Dir.cwd().createDirPathOpen(io, nonempty_path, .{}),
};
return .{
.path = if (path.len == 0) null else path,
@@ -912,8 +919,8 @@ pub const CrtFile = struct {
lock: Cache.Lock,
full_object_path: Cache.Path,
- pub fn deinit(self: *CrtFile, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *CrtFile, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.full_object_path.sub_path);
self.* = undefined;
}
@@ -1104,8 +1111,8 @@ pub const CObject = struct {
const source_line = source_line: {
if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0;
- const file = fs.cwd().openFile(file_name, .{}) catch break :source_line 0;
- defer file.close();
+ const file = Io.Dir.cwd().openFile(io, file_name, .{}) catch break :source_line 0;
+ defer file.close(io);
var buffer: [1024]u8 = undefined;
var file_reader = file.reader(io, &buffer);
file_reader.seekTo(diag.src_loc.offset + 1 - diag.src_loc.column) catch break :source_line 0;
@@ -1179,8 +1186,8 @@ pub const CObject = struct {
};
var buffer: [1024]u8 = undefined;
- const file = try fs.cwd().openFile(path, .{});
- defer file.close();
+ const file = try Io.Dir.cwd().openFile(io, path, .{});
+ defer file.close(io);
var file_reader = file.reader(io, &buffer);
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface });
defer bc.deinit();
@@ -1310,7 +1317,7 @@ pub const CObject = struct {
};
/// Returns if there was failure.
- pub fn clearStatus(self: *CObject, gpa: Allocator) bool {
+ pub fn clearStatus(self: *CObject, gpa: Allocator, io: Io) bool {
switch (self.status) {
.new => return false,
.failure, .failure_retryable => {
@@ -1319,15 +1326,15 @@ pub const CObject = struct {
},
.success => |*success| {
gpa.free(success.object_path.sub_path);
- success.lock.release();
+ success.lock.release(io);
self.status = .new;
return false;
},
}
}
- pub fn destroy(self: *CObject, gpa: Allocator) void {
- _ = self.clearStatus(gpa);
+ pub fn destroy(self: *CObject, gpa: Allocator, io: Io) void {
+ _ = self.clearStatus(gpa, io);
gpa.destroy(self);
}
};
@@ -1357,7 +1364,7 @@ pub const Win32Resource = struct {
},
/// Returns true if there was failure.
- pub fn clearStatus(self: *Win32Resource, gpa: Allocator) bool {
+ pub fn clearStatus(self: *Win32Resource, gpa: Allocator, io: Io) bool {
switch (self.status) {
.new => return false,
.failure, .failure_retryable => {
@@ -1366,15 +1373,15 @@ pub const Win32Resource = struct {
},
.success => |*success| {
gpa.free(success.res_path);
- success.lock.release();
+ success.lock.release(io);
self.status = .new;
return false;
},
}
}
- pub fn destroy(self: *Win32Resource, gpa: Allocator) void {
- _ = self.clearStatus(gpa);
+ pub fn destroy(self: *Win32Resource, gpa: Allocator, io: Io) void {
+ _ = self.clearStatus(gpa, io);
gpa.destroy(self);
}
};
@@ -1603,9 +1610,9 @@ const CacheUse = union(CacheMode) {
/// Prevents other processes from clobbering files in the output directory.
lock: ?Cache.Lock,
- fn releaseLock(whole: *Whole) void {
+ fn releaseLock(whole: *Whole, io: Io) void {
if (whole.lock) |*lock| {
- lock.release();
+ lock.release(io);
whole.lock = null;
}
}
@@ -1617,17 +1624,17 @@ const CacheUse = union(CacheMode) {
}
};
- fn deinit(cu: CacheUse) void {
+ fn deinit(cu: CacheUse, io: Io) void {
switch (cu) {
.none => |none| {
assert(none.tmp_artifact_directory == null);
},
.incremental => |incremental| {
- incremental.artifact_directory.handle.close();
+ incremental.artifact_directory.handle.close(io);
},
.whole => |whole| {
assert(whole.tmp_artifact_directory == null);
- whole.releaseLock();
+ whole.releaseLock(io);
},
}
}
@@ -1872,7 +1879,7 @@ pub const CreateDiagnostic = union(enum) {
pub const CreateCachePath = struct {
which: enum { local, global },
sub: []const u8,
- err: (fs.Dir.MakeError || fs.Dir.OpenError || fs.Dir.StatFileError),
+ err: (Io.Dir.CreateDirError || Io.Dir.OpenError || Io.Dir.StatFileError),
};
pub fn format(diag: CreateDiagnostic, w: *Writer) Writer.Error!void {
switch (diag) {
@@ -1896,13 +1903,17 @@ pub const CreateDiagnostic = union(enum) {
return error.CreateFail;
}
};
-pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) error{
+
+pub const CreateError = error{
OutOfMemory,
+ Canceled,
Unexpected,
CurrentWorkingDirectoryUnlinked,
/// An error has been stored to `diag`.
CreateFail,
-}!*Compilation {
+};
+
+pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) CreateError!*Compilation {
const output_mode = options.config.output_mode;
const is_dyn_lib = switch (output_mode) {
.Obj, .Exe => false,
@@ -1950,6 +1961,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
const libc_dirs = std.zig.LibCDirs.detect(
arena,
+ io,
options.dirs.zig_lib.path.?,
target,
options.root_mod.resolved_target.is_native_abi,
@@ -2080,13 +2092,17 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
}
if (options.verbose_llvm_cpu_features) {
- if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: {
- const stderr_w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- stderr_w.print("compilation: {s}\n", .{options.root_name}) catch break :print;
- stderr_w.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print;
- stderr_w.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print;
- stderr_w.print(" features: {s}\n", .{cf}) catch {};
+ if (options.root_mod.resolved_target.llvm_cpu_features) |cf| {
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ printVerboseLlvmCpuFeatures(w, arena, options.root_name, target, cf) catch |err| switch (err) {
+ error.WriteFailed => switch (stderr.file_writer.err.?) {
+ error.Canceled => |e| return e,
+ else => {},
+ },
+ error.OutOfMemory => |e| return e,
+ };
}
}
@@ -2104,16 +2120,16 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
cache.* = .{
.gpa = gpa,
.io = io,
- .manifest_dir = options.dirs.local_cache.handle.makeOpenPath("h", .{}) catch |err| {
+ .manifest_dir = options.dirs.local_cache.handle.createDirPathOpen(io, "h", .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = "h", .err = err } });
},
};
// These correspond to std.zig.Server.Message.PathPrefix.
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(options.dirs.zig_lib);
cache.addPrefix(options.dirs.local_cache);
cache.addPrefix(options.dirs.global_cache);
- errdefer cache.manifest_dir.close();
+ errdefer cache.manifest_dir.close(io);
// This is shared hasher state common to zig source and all C source files.
cache.hash.addBytes(build_options.version);
@@ -2154,18 +2170,18 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
// to redundantly happen for each AstGen operation.
const zir_sub_dir = "z";
- var local_zir_dir = options.dirs.local_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| {
+ var local_zir_dir = options.dirs.local_cache.handle.createDirPathOpen(io, zir_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = zir_sub_dir, .err = err } });
};
- errdefer local_zir_dir.close();
+ errdefer local_zir_dir.close(io);
const local_zir_cache: Cache.Directory = .{
.handle = local_zir_dir,
.path = try options.dirs.local_cache.join(arena, &.{zir_sub_dir}),
};
- var global_zir_dir = options.dirs.global_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| {
+ var global_zir_dir = options.dirs.global_cache.handle.createDirPathOpen(io, zir_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .global, .sub = zir_sub_dir, .err = err } });
};
- errdefer global_zir_dir.close();
+ errdefer global_zir_dir.close(io);
const global_zir_cache: Cache.Directory = .{
.handle = global_zir_dir,
.path = try options.dirs.global_cache.join(arena, &.{zir_sub_dir}),
@@ -2433,10 +2449,10 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
const digest = hash.final();
const artifact_sub_dir = "o" ++ fs.path.sep_str ++ digest;
- var artifact_dir = options.dirs.local_cache.handle.makeOpenPath(artifact_sub_dir, .{}) catch |err| {
+ var artifact_dir = options.dirs.local_cache.handle.createDirPathOpen(io, artifact_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = artifact_sub_dir, .err = err } });
};
- errdefer artifact_dir.close();
+ errdefer artifact_dir.close(io);
const artifact_directory: Cache.Directory = .{
.handle = artifact_dir,
.path = try options.dirs.local_cache.join(arena, &.{artifact_sub_dir}),
@@ -2687,12 +2703,26 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
return comp;
}
+fn printVerboseLlvmCpuFeatures(
+ w: *Writer,
+ arena: Allocator,
+ root_name: []const u8,
+ target: *const std.Target,
+ cf: [*:0]const u8,
+) (Writer.Error || Allocator.Error)!void {
+ try w.print("compilation: {s}\n", .{root_name});
+ try w.print(" target: {s}\n", .{try target.zigTriple(arena)});
+ try w.print(" cpu: {s}\n", .{target.cpu.model.name});
+ try w.print(" features: {s}\n", .{cf});
+}
+
pub fn destroy(comp: *Compilation) void {
const gpa = comp.gpa;
+ const io = comp.io;
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
- comp.cache_use.deinit();
+ comp.cache_use.deinit(io);
for (&comp.work_queues) |*work_queue| work_queue.deinit(gpa);
comp.c_object_work_queue.deinit(gpa);
@@ -2705,36 +2735,36 @@ pub fn destroy(comp: *Compilation) void {
var it = comp.crt_files.iterator();
while (it.next()) |entry| {
gpa.free(entry.key_ptr.*);
- entry.value_ptr.deinit(gpa);
+ entry.value_ptr.deinit(gpa, io);
}
comp.crt_files.deinit(gpa);
}
- if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa);
- if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa);
- if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa);
+ if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.glibc_so_files) |*glibc_file| {
- glibc_file.deinit(gpa);
+ glibc_file.deinit(gpa, io);
}
if (comp.freebsd_so_files) |*freebsd_file| {
- freebsd_file.deinit(gpa);
+ freebsd_file.deinit(gpa, io);
}
if (comp.netbsd_so_files) |*netbsd_file| {
- netbsd_file.deinit(gpa);
+ netbsd_file.deinit(gpa, io);
}
for (comp.c_object_table.keys()) |key| {
- key.destroy(gpa);
+ key.destroy(gpa, io);
}
comp.c_object_table.deinit(gpa);
@@ -2744,7 +2774,7 @@ pub fn destroy(comp: *Compilation) void {
comp.failed_c_objects.deinit(gpa);
for (comp.win32_resource_table.keys()) |key| {
- key.destroy(gpa);
+ key.destroy(gpa, io);
}
comp.win32_resource_table.deinit(gpa);
@@ -2760,7 +2790,7 @@ pub fn destroy(comp: *Compilation) void {
comp.clearMiscFailures();
- comp.cache_parent.manifest_dir.close();
+ comp.cache_parent.manifest_dir.close(io);
}
pub fn clearMiscFailures(comp: *Compilation) void {
@@ -2791,10 +2821,12 @@ pub fn hotCodeSwap(
}
fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
+ const io = comp.io;
+
switch (comp.cache_use) {
.none => |none| {
if (none.tmp_artifact_directory) |*tmp_dir| {
- tmp_dir.handle.close();
+ tmp_dir.handle.close(io);
none.tmp_artifact_directory = null;
if (dev.env == .bootstrap) {
// zig1 uses `CacheMode.none`, but it doesn't need to know how to delete
@@ -2813,12 +2845,9 @@ fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
return;
}
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
- comp.dirs.local_cache.handle.deleteTree(tmp_dir_sub_path) catch |err| {
- log.warn("failed to delete temporary directory '{s}{c}{s}': {s}", .{
- comp.dirs.local_cache.path orelse ".",
- fs.path.sep,
- tmp_dir_sub_path,
- @errorName(err),
+ comp.dirs.local_cache.handle.deleteTree(io, tmp_dir_sub_path) catch |err| {
+ log.warn("failed to delete temporary directory '{s}{c}{s}': {t}", .{
+ comp.dirs.local_cache.path orelse ".", fs.path.sep, tmp_dir_sub_path, err,
});
};
}
@@ -2834,15 +2863,12 @@ fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
comp.bin_file = null;
}
if (whole.tmp_artifact_directory) |*tmp_dir| {
- tmp_dir.handle.close();
+ tmp_dir.handle.close(io);
whole.tmp_artifact_directory = null;
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
- comp.dirs.local_cache.handle.deleteTree(tmp_dir_sub_path) catch |err| {
- log.warn("failed to delete temporary directory '{s}{c}{s}': {s}", .{
- comp.dirs.local_cache.path orelse ".",
- fs.path.sep,
- tmp_dir_sub_path,
- @errorName(err),
+ comp.dirs.local_cache.handle.deleteTree(io, tmp_dir_sub_path) catch |err| {
+ log.warn("failed to delete temporary directory '{s}{c}{s}': {t}", .{
+ comp.dirs.local_cache.path orelse ".", fs.path.sep, tmp_dir_sub_path, err,
});
};
}
@@ -2891,7 +2917,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
tmp_dir_rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path});
- const handle = comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}) catch |err| {
+ const handle = comp.dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{}) catch |err| {
return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err });
};
break :d .{ .path = path, .handle = handle };
@@ -2901,7 +2927,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
.whole => |whole| {
assert(comp.bin_file == null);
// We are about to obtain this lock, so here we give other processes a chance first.
- whole.releaseLock();
+ whole.releaseLock(io);
man = comp.cache_parent.obtain();
whole.cache_manifest = &man;
@@ -2972,7 +2998,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
tmp_dir_rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path});
- const handle = comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}) catch |err| {
+ const handle = comp.dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{}) catch |err| {
return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err });
};
break :d .{ .path = path, .handle = handle };
@@ -3087,17 +3113,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
}
if (build_options.enable_debug_extensions and comp.verbose_intern_pool) {
- std.debug.print("intern pool stats for '{s}':\n", .{
- comp.root_name,
- });
+ std.debug.print("intern pool stats for '{s}':\n", .{comp.root_name});
zcu.intern_pool.dump();
}
if (build_options.enable_debug_extensions and comp.verbose_generic_instances) {
- std.debug.print("generic instances for '{s}:0x{x}':\n", .{
- comp.root_name,
- @intFromPtr(zcu),
- });
+ std.debug.print("generic instances for '{s}:0x{x}':\n", .{ comp.root_name, @intFromPtr(zcu) });
zcu.intern_pool.dumpGenericInstances(gpa);
}
}
@@ -3152,7 +3173,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// the file handle and re-open it in the follow up call to
// `makeWritable`.
if (lf.file) |f| {
- f.close();
+ f.close(io);
lf.file = null;
if (lf.closeDebugInfo()) break :w .lf_and_debug;
@@ -3165,12 +3186,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// Rename the temporary directory into place.
// Close tmp dir and link.File to avoid open handle during rename.
- whole.tmp_artifact_directory.?.handle.close();
+ whole.tmp_artifact_directory.?.handle.close(io);
whole.tmp_artifact_directory = null;
const s = fs.path.sep_str;
const tmp_dir_sub_path = "tmp" ++ s ++ std.fmt.hex(tmp_dir_rand_int);
const o_sub_path = "o" ++ s ++ hex_digest;
- renameTmpIntoCache(comp.dirs.local_cache, tmp_dir_sub_path, o_sub_path) catch |err| {
+ renameTmpIntoCache(io, comp.dirs.local_cache, tmp_dir_sub_path, o_sub_path) catch |err| {
return comp.setMiscFailure(
.rename_results,
"failed to rename compilation results ('{f}{s}') into local cache ('{f}{s}'): {t}",
@@ -3300,11 +3321,8 @@ pub fn resolveEmitPathFlush(
},
}
}
-fn flush(
- comp: *Compilation,
- arena: Allocator,
- tid: Zcu.PerThread.Id,
-) Allocator.Error!void {
+
+fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id) (Io.Cancelable || Allocator.Error)!void {
const io = comp.io;
if (comp.zcu) |zcu| {
if (zcu.llvm_object) |llvm_object| {
@@ -3370,7 +3388,7 @@ fn flush(
// This is needed before reading the error flags.
lf.flush(arena, tid, comp.link_prog_node) catch |err| switch (err) {
error.LinkFailure => {}, // Already reported.
- error.OutOfMemory => return error.OutOfMemory,
+ error.OutOfMemory, error.Canceled => |e| return e,
};
}
if (comp.zcu) |zcu| {
@@ -3389,17 +3407,19 @@ fn flush(
/// implementation at the bottom of this function.
/// This function is only called when CacheMode is `whole`.
fn renameTmpIntoCache(
+ io: Io,
cache_directory: Cache.Directory,
tmp_dir_sub_path: []const u8,
o_sub_path: []const u8,
) !void {
var seen_eaccess = false;
while (true) {
- fs.rename(
+ Io.Dir.rename(
cache_directory.handle,
tmp_dir_sub_path,
cache_directory.handle,
o_sub_path,
+ io,
) catch |err| switch (err) {
// On Windows, rename fails with `AccessDenied` rather than `PathAlreadyExists`.
// See https://github.com/ziglang/zig/issues/8362
@@ -3407,17 +3427,17 @@ fn renameTmpIntoCache(
.windows => {
if (seen_eaccess) return error.AccessDenied;
seen_eaccess = true;
- try cache_directory.handle.deleteTree(o_sub_path);
+ try cache_directory.handle.deleteTree(io, o_sub_path);
continue;
},
else => return error.AccessDenied,
},
error.PathAlreadyExists => {
- try cache_directory.handle.deleteTree(o_sub_path);
+ try cache_directory.handle.deleteTree(io, o_sub_path);
continue;
},
error.FileNotFound => {
- try cache_directory.handle.makePath("o");
+ try cache_directory.handle.createDirPath(io, "o");
continue;
},
else => |e| return e,
@@ -3592,6 +3612,7 @@ fn emitFromCObject(
new_ext: []const u8,
unresolved_emit_path: []const u8,
) Allocator.Error!void {
+ const io = comp.io;
// The dirname and stem (i.e. everything but the extension), of the sub path of the C object.
// We'll append `new_ext` to it to get the path to the right thing (asm, LLVM IR, etc).
const c_obj_dir_and_stem: []const u8 = p: {
@@ -3601,23 +3622,18 @@ fn emitFromCObject(
};
const src_path: Cache.Path = .{
.root_dir = c_obj_path.root_dir,
- .sub_path = try std.fmt.allocPrint(arena, "{s}{s}", .{
- c_obj_dir_and_stem,
- new_ext,
- }),
+ .sub_path = try std.fmt.allocPrint(arena, "{s}{s}", .{ c_obj_dir_and_stem, new_ext }),
};
const emit_path = comp.resolveEmitPath(unresolved_emit_path);
- src_path.root_dir.handle.copyFile(
+ Io.Dir.copyFile(
+ src_path.root_dir.handle,
src_path.sub_path,
emit_path.root_dir.handle,
emit_path.sub_path,
+ io,
.{},
- ) catch |err| log.err("unable to copy '{f}' to '{f}': {s}", .{
- src_path,
- emit_path,
- @errorName(err),
- });
+ ) catch |err| log.err("unable to copy '{f}' to '{f}': {t}", .{ src_path, emit_path, err });
}
/// Having the file open for writing is problematic as far as executing the
@@ -3673,6 +3689,7 @@ pub fn saveState(comp: *Compilation) !void {
const lf = comp.bin_file orelse return;
const gpa = comp.gpa;
+ const io = comp.io;
var bufs = std.array_list.Managed([]const u8).init(gpa);
defer bufs.deinit();
@@ -3893,7 +3910,7 @@ pub fn saveState(comp: *Compilation) !void {
// Using an atomic file prevents a crash or power failure from corrupting
// the previous incremental compilation state.
var write_buffer: [1024]u8 = undefined;
- var af = try lf.emit.root_dir.handle.atomicFile(basename, .{ .write_buffer = &write_buffer });
+ var af = try lf.emit.root_dir.handle.atomicFile(io, basename, .{ .write_buffer = &write_buffer });
defer af.deinit();
try af.file_writer.interface.writeVecAll(bufs.items);
try af.finish();
@@ -4251,12 +4268,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
// However, we haven't reported any such error.
// This is a compiler bug.
print_ctx: {
- var stderr_w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- stderr_w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx;
- stderr_w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx;
+ const stderr = std.debug.lockStderr(&.{}).terminal();
+ defer std.debug.unlockStderr();
+ const w = stderr.writer;
+ w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx;
+ w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx;
while (ref) |r| {
- stderr_w.print("referenced by: {f}{s}\n", .{
+ w.print("referenced by: {f}{s}\n", .{
zcu.fmtAnalUnit(r.referencer),
if (zcu.transitive_failed_analysis.contains(r.referencer)) " [transitive failure]" else "",
}) catch break :print_ctx;
@@ -5038,7 +5056,9 @@ fn dispatchPrelinkWork(comp: *Compilation, main_progress_node: std.Progress.Node
}
prelink_group.wait(io);
- comp.link_queue.finishPrelinkQueue(comp);
+ comp.link_queue.finishPrelinkQueue(comp) catch |err| switch (err) {
+ error.Canceled => return,
+ };
}
const JobError = Allocator.Error || Io.Cancelable;
@@ -5211,13 +5231,10 @@ fn processOneJob(
}
}
-fn createDepFile(
- comp: *Compilation,
- depfile: []const u8,
- binfile: Cache.Path,
-) anyerror!void {
+fn createDepFile(comp: *Compilation, depfile: []const u8, binfile: Cache.Path) anyerror!void {
+ const io = comp.io;
var buf: [4096]u8 = undefined;
- var af = try std.fs.cwd().atomicFile(depfile, .{ .write_buffer = &buf });
+ var af = try Io.Dir.cwd().atomicFile(io, depfile, .{ .write_buffer = &buf });
defer af.deinit();
comp.writeDepFile(binfile, &af.file_writer.interface) catch return af.file_writer.err.?;
@@ -5258,39 +5275,35 @@ fn workerDocsCopy(comp: *Compilation) void {
fn docsCopyFallible(comp: *Compilation) anyerror!void {
const zcu = comp.zcu orelse return comp.lockAndSetMiscFailure(.docs_copy, "no Zig code to document", .{});
+ const io = comp.io;
const docs_path = comp.resolveEmitPath(comp.emit_docs.?);
- var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
+ var out_dir = docs_path.root_dir.handle.createDirPathOpen(io, docs_path.sub_path, .{}) catch |err| {
return comp.lockAndSetMiscFailure(
.docs_copy,
"unable to create output directory '{f}': {s}",
.{ docs_path, @errorName(err) },
);
};
- defer out_dir.close();
+ defer out_dir.close(io);
for (&[_][]const u8{ "docs/main.js", "docs/index.html" }) |sub_path| {
const basename = fs.path.basename(sub_path);
- comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, .{}) catch |err| {
- comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {s}", .{
- sub_path,
- @errorName(err),
- });
- return;
- };
+ comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, io, .{}) catch |err|
+ return comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {t}", .{ sub_path, err });
}
- var tar_file = out_dir.createFile("sources.tar", .{}) catch |err| {
+ var tar_file = out_dir.createFile(io, "sources.tar", .{}) catch |err| {
return comp.lockAndSetMiscFailure(
.docs_copy,
"unable to create '{f}/sources.tar': {s}",
.{ docs_path, @errorName(err) },
);
};
- defer tar_file.close();
+ defer tar_file.close(io);
var buffer: [1024]u8 = undefined;
- var tar_file_writer = tar_file.writer(&buffer);
+ var tar_file_writer = tar_file.writer(io, &buffer);
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty;
defer seen_table.deinit(comp.gpa);
@@ -5321,17 +5334,17 @@ fn docsCopyModule(
comp: *Compilation,
module: *Package.Module,
name: []const u8,
- tar_file_writer: *fs.File.Writer,
+ tar_file_writer: *Io.File.Writer,
) !void {
const io = comp.io;
const root = module.root;
var mod_dir = d: {
const root_dir, const sub_path = root.openInfo(comp.dirs);
- break :d root_dir.openDir(sub_path, .{ .iterate = true });
+ break :d root_dir.openDir(io, sub_path, .{ .iterate = true });
} catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{f}': {t}", .{ root.fmt(comp), err });
};
- defer mod_dir.close();
+ defer mod_dir.close(io);
var walker = try mod_dir.walk(comp.gpa);
defer walker.deinit();
@@ -5341,7 +5354,7 @@ fn docsCopyModule(
var buffer: [1024]u8 = undefined;
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
switch (entry.kind) {
.file => {
if (!std.mem.endsWith(u8, entry.basename, ".zig")) continue;
@@ -5350,14 +5363,14 @@ fn docsCopyModule(
},
else => continue,
}
- var file = mod_dir.openFile(entry.path, .{}) catch |err| {
+ var file = mod_dir.openFile(io, entry.path, .{}) catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open {f}{s}: {t}", .{
root.fmt(comp), entry.path, err,
});
};
- defer file.close();
- const stat = try file.stat();
- var file_reader: fs.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size);
+ defer file.close(io);
+ const stat = try file.stat(io);
+ var file_reader: Io.File.Reader = .initSize(file, io, &buffer, stat.size);
archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{
@@ -5496,13 +5509,13 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
try comp.updateSubCompilation(sub_compilation, .docs_wasm, prog_node);
var crt_file = try sub_compilation.toCrtFile();
- defer crt_file.deinit(gpa);
+ defer crt_file.deinit(gpa, io);
const docs_bin_file = crt_file.full_object_path;
assert(docs_bin_file.sub_path.len > 0); // emitted binary is not a directory
const docs_path = comp.resolveEmitPath(comp.emit_docs.?);
- var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
+ var out_dir = docs_path.root_dir.handle.createDirPathOpen(io, docs_path.sub_path, .{}) catch |err| {
comp.lockAndSetMiscFailure(
.docs_copy,
"unable to create output directory '{f}': {t}",
@@ -5510,12 +5523,14 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
);
return error.AlreadyReported;
};
- defer out_dir.close();
+ defer out_dir.close(io);
- crt_file.full_object_path.root_dir.handle.copyFile(
+ Io.Dir.copyFile(
+ crt_file.full_object_path.root_dir.handle,
crt_file.full_object_path.sub_path,
out_dir,
"main.wasm",
+ io,
.{},
) catch |err| {
comp.lockAndSetMiscFailure(.docs_copy, "unable to copy '{f}' to '{f}': {t}", .{
@@ -5692,8 +5707,8 @@ pub fn translateC(
const tmp_basename = std.fmt.hex(std.crypto.random.int(u64));
const tmp_sub_path = "tmp" ++ fs.path.sep_str ++ tmp_basename;
const cache_dir = comp.dirs.local_cache.handle;
- var cache_tmp_dir = try cache_dir.makeOpenPath(tmp_sub_path, .{});
- defer cache_tmp_dir.close();
+ var cache_tmp_dir = try cache_dir.createDirPathOpen(io, tmp_sub_path, .{});
+ defer cache_tmp_dir.close(io);
const translated_path = try comp.dirs.local_cache.join(arena, &.{ tmp_sub_path, translated_basename });
const source_path = switch (source) {
@@ -5702,7 +5717,7 @@ pub fn translateC(
const out_h_sub_path = tmp_sub_path ++ fs.path.sep_str ++ cimport_basename;
const out_h_path = try comp.dirs.local_cache.join(arena, &.{out_h_sub_path});
if (comp.verbose_cimport) log.info("writing C import source to {s}", .{out_h_path});
- try cache_dir.writeFile(.{ .sub_path = out_h_sub_path, .data = c_src });
+ try cache_dir.writeFile(io, .{ .sub_path = out_h_sub_path, .data = c_src });
break :path out_h_path;
},
.path => |p| p,
@@ -5749,7 +5764,7 @@ pub fn translateC(
try argv.appendSlice(comp.global_cc_argv);
try argv.appendSlice(owner_mod.cc_argv);
try argv.appendSlice(&.{ source_path, "-o", translated_path });
- if (comp.verbose_cimport) dump_argv(argv.items);
+ if (comp.verbose_cimport) try dumpArgv(io, argv.items);
}
var stdout: []u8 = undefined;
@@ -5775,7 +5790,7 @@ pub fn translateC(
}
// Just to save disk space, we delete the file because it is never needed again.
- cache_tmp_dir.deleteFile(dep_basename) catch |err| {
+ cache_tmp_dir.deleteFile(io, dep_basename) catch |err| {
log.warn("failed to delete '{s}': {t}", .{ dep_file_path, err });
};
}
@@ -5805,7 +5820,7 @@ pub fn translateC(
const o_sub_path = "o" ++ fs.path.sep_str ++ hex_digest;
if (comp.verbose_cimport) log.info("renaming {s} to {s}", .{ tmp_sub_path, o_sub_path });
- try renameTmpIntoCache(comp.dirs.local_cache, tmp_sub_path, o_sub_path);
+ try renameTmpIntoCache(io, comp.dirs.local_cache, tmp_sub_path, o_sub_path);
return .{
.digest = bin_digest,
@@ -6144,7 +6159,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
const gpa = comp.gpa;
const io = comp.io;
- if (c_object.clearStatus(gpa)) {
+ if (c_object.clearStatus(gpa, io)) {
// There was previous failure.
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -6257,7 +6272,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
}
if (comp.verbose_cc) {
- dump_argv(argv.items);
+ try dumpArgv(io, argv.items);
}
const err = std.process.execv(arena, argv.items);
@@ -6267,8 +6282,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// We can't know the digest until we do the C compiler invocation,
// so we need a temporary filename.
const out_obj_path = try comp.tmpFilePath(arena, o_basename);
- var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
- defer zig_cache_tmp_dir.close();
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, "tmp", .{});
+ defer zig_cache_tmp_dir.close(io);
const out_diag_path = if (comp.clang_passthrough_mode or !ext.clangSupportsDiagnostics())
null
@@ -6303,15 +6318,15 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
}
if (comp.verbose_cc) {
- dump_argv(argv.items);
+ try dumpArgv(io, argv.items);
}
// Just to save disk space, we delete the files that are never needed again.
- defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(diag_file_path)) catch |err| switch (err) {
+ defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(diag_file_path)) catch |err| switch (err) {
error.FileNotFound => {}, // the file wasn't created due to an error we reported
else => log.warn("failed to delete '{s}': {s}", .{ diag_file_path, @errorName(err) }),
};
- defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(dep_file_path)) catch |err| switch (err) {
+ defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(dep_file_path)) catch |err| switch (err) {
error.FileNotFound => {}, // the file wasn't created due to an error we reported
else => log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }),
};
@@ -6322,7 +6337,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
- const term = child.spawnAndWait() catch |err| {
+ const term = child.spawnAndWait(io) catch |err| {
return comp.failCObj(c_object, "failed to spawn zig clang (passthrough mode) {s}: {s}", .{ argv.items[0], @errorName(err) });
};
switch (term) {
@@ -6340,12 +6355,12 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
- try child.spawn();
+ try child.spawn(io);
var stderr_reader = child.stderr.?.readerStreaming(io, &.{});
const stderr = try stderr_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32)));
- const term = child.wait() catch |err| {
+ const term = child.wait(io) catch |err| {
return comp.failCObj(c_object, "failed to spawn zig clang {s}: {s}", .{ argv.items[0], @errorName(err) });
};
@@ -6387,7 +6402,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
if (comp.file_system_inputs != null) {
// Use the same file size limit as the cache code does for dependency files.
- const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, gpa, .limited(Cache.manifest_file_size_max));
+ const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, gpa, .limited(Cache.manifest_file_size_max));
defer gpa.free(dep_file_contents);
var str_buf: std.ArrayList(u8) = .empty;
@@ -6432,10 +6447,10 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// Rename into place.
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const tmp_basename = fs.path.basename(out_obj_path);
- try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename);
+ try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename, io);
break :blk digest;
};
@@ -6477,8 +6492,6 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const tracy_trace = trace(@src());
defer tracy_trace.end();
- const io = comp.io;
-
const src_path = switch (win32_resource.src) {
.rc => |rc_src| rc_src.src_path,
.manifest => |src_path| src_path,
@@ -6487,11 +6500,13 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
log.debug("updating win32 resource: {s}", .{src_path});
+ const io = comp.io;
+
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- if (win32_resource.clearStatus(comp.gpa)) {
+ if (win32_resource.clearStatus(comp.gpa, io)) {
// There was previous failure.
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -6521,8 +6536,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &.{ "o", &digest });
- var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const in_rc_path = try comp.dirs.local_cache.join(comp.gpa, &.{
o_sub_path, rc_basename,
@@ -6559,7 +6574,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
resource_id, resource_type, fmtRcEscape(src_path),
});
- try o_dir.writeFile(.{ .sub_path = rc_basename, .data = input });
+ try o_dir.writeFile(io, .{ .sub_path = rc_basename, .data = input });
var argv = std.array_list.Managed([]const u8).init(comp.gpa);
defer argv.deinit();
@@ -6609,8 +6624,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const rc_basename_noext = src_basename[0 .. src_basename.len - fs.path.extension(src_basename).len];
const digest = if (try man.hit()) man.final() else blk: {
- var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
- defer zig_cache_tmp_dir.close();
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, "tmp", .{});
+ defer zig_cache_tmp_dir.close(io);
const res_filename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext});
@@ -6652,7 +6667,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// Read depfile and update cache manifest
{
const dep_basename = fs.path.basename(out_dep_path);
- const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, arena, .limited(50 * 1024 * 1024));
+ const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, arena, .limited(50 * 1024 * 1024));
defer arena.free(dep_file_contents);
const value = try std.json.parseFromSliceLeaky(std.json.Value, arena, dep_file_contents, .{});
@@ -6680,10 +6695,10 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// Rename into place.
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const tmp_basename = fs.path.basename(out_res_path);
- try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename);
+ try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename, io);
break :blk digest;
};
@@ -6716,6 +6731,7 @@ fn spawnZigRc(
argv: []const []const u8,
child_progress_node: std.Progress.Node,
) !void {
+ const io = comp.io;
var node_name: std.ArrayList(u8) = .empty;
defer node_name.deinit(arena);
@@ -6725,8 +6741,8 @@ fn spawnZigRc(
child.stderr_behavior = .Pipe;
child.progress_node = child_progress_node;
- child.spawn() catch |err| {
- return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) });
+ child.spawn(io) catch |err| {
+ return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {t}", .{ argv[0], err });
};
var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{
@@ -6758,7 +6774,7 @@ fn spawnZigRc(
// Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace)
const stderr = poller.reader(.stderr);
- const term = child.wait() catch |err| {
+ const term = child.wait(io) catch |err| {
return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) });
};
@@ -7765,17 +7781,25 @@ pub fn lockAndSetMiscFailure(
return setMiscFailure(comp, tag, format, args);
}
-pub fn dump_argv(argv: []const []const u8) void {
+pub fn dumpArgv(io: Io, argv: []const []const u8) Io.Cancelable!void {
var buffer: [64]u8 = undefined;
- const stderr, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
- nosuspend {
- for (argv, 0..) |arg, i| {
- if (i != 0) stderr.writeByte(' ') catch return;
- stderr.writeAll(arg) catch return;
- }
- stderr.writeByte('\n') catch return;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ return dumpArgvWriter(w, argv) catch |err| switch (err) {
+ error.WriteFailed => switch (stderr.file_writer.err.?) {
+ error.Canceled => return error.Canceled,
+ else => return,
+ },
+ };
+}
+
+fn dumpArgvWriter(w: *Io.Writer, argv: []const []const u8) Io.Writer.Error!void {
+ for (argv, 0..) |arg, i| {
+ if (i != 0) try w.writeByte(' ');
+ try w.writeAll(arg);
}
+ try w.writeByte('\n');
}
pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend {
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 5568c493d9..69a64cbc45 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -1,20 +1,21 @@
//! All interned objects have both a value and a type.
//! This data structure is self-contained.
+const InternPool = @This();
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Cache = std.Build.Cache;
-const Io = std.Io;
const Limb = std.math.big.Limb;
const Hash = std.hash.Wyhash;
+const Zir = std.zig.Zir;
-const InternPool = @This();
const Zcu = @import("Zcu.zig");
-const Zir = std.zig.Zir;
/// One item per thread, indexed by `tid`, which is dense and unique per thread.
locals: []Local,
@@ -11166,11 +11167,15 @@ pub fn mutateVarInit(ip: *InternPool, io: Io, index: Index, init_index: Index) v
}
pub fn dump(ip: *const InternPool) void {
- dumpStatsFallible(ip, std.heap.page_allocator) catch return;
- dumpAllFallible(ip) catch return;
+ var buffer: [4096]u8 = undefined;
+ const stderr = std.debug.lockStderr(&buffer);
+ defer std.debug.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ dumpStatsFallible(ip, w, std.heap.page_allocator) catch return;
+ dumpAllFallible(ip, w) catch return;
}
-fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
+fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !void {
var items_len: usize = 0;
var extra_len: usize = 0;
var limbs_len: usize = 0;
@@ -11423,18 +11428,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
};
counts.sort(SortContext{ .map = &counts });
const len = @min(50, counts.count());
- std.debug.print(" top 50 tags:\n", .{});
+ try w.print(" top 50 tags:\n", .{});
for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| {
- std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{
- @tagName(tag), stats.count, stats.bytes,
- });
+ try w.print(" {t}: {d} occurrences, {d} total bytes\n", .{ tag, stats.count, stats.bytes });
}
}
-fn dumpAllFallible(ip: *const InternPool) anyerror!void {
- var buffer: [4096]u8 = undefined;
- const stderr_bw, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
+fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void {
for (ip.locals, 0..) |*local, tid| {
const items = local.shared.items.view();
for (
@@ -11443,12 +11443,12 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
0..,
) |tag, data, index| {
const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip);
- try stderr_bw.print("${d} = {s}(", .{ i, @tagName(tag) });
+ try w.print("${d} = {s}(", .{ i, @tagName(tag) });
switch (tag) {
.removed => {},
- .simple_type => try stderr_bw.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
- .simple_value => try stderr_bw.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
+ .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
+ .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
.type_int_signed,
.type_int_unsigned,
@@ -11521,23 +11521,27 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.func_coerced,
.union_value,
.memoized_call,
- => try stderr_bw.print("{d}", .{data}),
+ => try w.print("{d}", .{data}),
.opt_null,
.type_slice,
.only_possible_value,
- => try stderr_bw.print("${d}", .{data}),
+ => try w.print("${d}", .{data}),
}
- try stderr_bw.writeAll(")\n");
+ try w.writeAll(")\n");
}
}
}
pub fn dumpGenericInstances(ip: *const InternPool, allocator: Allocator) void {
- ip.dumpGenericInstancesFallible(allocator) catch return;
+ var buffer: [4096]u8 = undefined;
+ const stderr = std.debug.lockStderr(&buffer);
+ defer std.debug.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ ip.dumpGenericInstancesFallible(allocator, w) catch return;
}
-pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) anyerror!void {
+pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator, w: *Io.Writer) !void {
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@@ -11564,10 +11568,6 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
}
}
- var buffer: [4096]u8 = undefined;
- const stderr_bw, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
-
const SortContext = struct {
values: []std.ArrayList(Index),
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
@@ -11579,19 +11579,19 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
var it = instances.iterator();
while (it.next()) |entry| {
const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav);
- try stderr_bw.print("{f} ({d}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
+ try w.print("{f} ({d}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
for (entry.value_ptr.items) |index| {
const unwrapped_index = index.unwrap(ip);
const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip));
const owner_nav = ip.getNav(func.owner_nav);
- try stderr_bw.print(" {f}: (", .{owner_nav.name.fmt(ip)});
+ try w.print(" {f}: (", .{owner_nav.name.fmt(ip)});
for (func.comptime_args.get(ip)) |arg| {
if (arg != .none) {
const key = ip.indexToKey(arg);
- try stderr_bw.print(" {} ", .{key});
+ try w.print(" {} ", .{key});
}
}
- try stderr_bw.writeAll(")\n");
+ try w.writeAll(")\n");
}
}
}
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
index 9b7c83cc39..f8e4b83293 100644
--- a/src/Package/Fetch.zig
+++ b/src/Package/Fetch.zig
@@ -383,14 +383,14 @@ pub fn run(f: *Fetch) RunError!void {
},
.remote => |remote| remote,
.path_or_url => |path_or_url| {
- if (fs.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| {
+ if (Io.Dir.cwd().openDir(io, path_or_url, .{ .iterate = true })) |dir| {
var resource: Resource = .{ .dir = dir };
return f.runResource(path_or_url, &resource, null);
} else |dir_err| {
var server_header_buffer: [init_resource_buffer_size]u8 = undefined;
const file_err = if (dir_err == error.NotDir) e: {
- if (fs.cwd().openFile(path_or_url, .{})) |file| {
+ if (Io.Dir.cwd().openFile(io, path_or_url, .{})) |file| {
var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) };
return f.runResource(path_or_url, &resource, null);
} else |err| break :e err;
@@ -418,7 +418,7 @@ pub fn run(f: *Fetch) RunError!void {
const prefixed_pkg_sub_path = prefixed_pkg_sub_path_buffer[0 .. 2 + hash_slice.len];
const prefix_len: usize = if (f.job_queue.read_only) "p/".len else 0;
const pkg_sub_path = prefixed_pkg_sub_path[prefix_len..];
- if (cache_root.handle.access(pkg_sub_path, .{})) |_| {
+ if (cache_root.handle.access(io, pkg_sub_path, .{})) |_| {
assert(f.lazy_status != .unavailable);
f.package_root = .{
.root_dir = cache_root,
@@ -500,12 +500,12 @@ fn runResource(
var tmp_directory: Cache.Directory = .{
.path = tmp_directory_path,
.handle = handle: {
- const dir = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
- .iterate = true,
+ const dir = cache_root.handle.createDirPathOpen(io, tmp_dir_sub_path, .{
+ .open_options = .{ .iterate = true },
}) catch |err| {
try eb.addRootErrorMessage(.{
- .msg = try eb.printString("unable to create temporary directory '{s}': {s}", .{
- tmp_directory_path, @errorName(err),
+ .msg = try eb.printString("unable to create temporary directory '{s}': {t}", .{
+ tmp_directory_path, err,
}),
});
return error.FetchFailed;
@@ -513,7 +513,7 @@ fn runResource(
break :handle dir;
},
};
- defer tmp_directory.handle.close();
+ defer tmp_directory.handle.close(io);
// Fetch and unpack a resource into a temporary directory.
var unpack_result = try unpackResource(f, resource, uri_path, tmp_directory);
@@ -523,9 +523,9 @@ fn runResource(
// Apply btrfs workaround if needed. Reopen tmp_directory.
if (native_os == .linux and f.job_queue.work_around_btrfs_bug) {
// https://github.com/ziglang/zig/issues/17095
- pkg_path.root_dir.handle.close();
- pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
- .iterate = true,
+ pkg_path.root_dir.handle.close(io);
+ pkg_path.root_dir.handle = cache_root.handle.createDirPathOpen(io, tmp_dir_sub_path, .{
+ .open_options = .{ .iterate = true },
}) catch @panic("btrfs workaround failed");
}
@@ -567,7 +567,7 @@ fn runResource(
.root_dir = cache_root,
.sub_path = try std.fmt.allocPrint(arena, "p" ++ s ++ "{s}", .{computed_package_hash.toSlice()}),
};
- renameTmpIntoCache(cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| {
+ renameTmpIntoCache(io, cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| {
const src = try cache_root.join(arena, &.{tmp_dir_sub_path});
const dest = try cache_root.join(arena, &.{f.package_root.sub_path});
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
@@ -578,7 +578,7 @@ fn runResource(
};
// Remove temporary directory root if not already renamed to global cache.
if (!std.mem.eql(u8, package_sub_path, tmp_dir_sub_path)) {
- cache_root.handle.deleteDir(tmp_dir_sub_path) catch {};
+ cache_root.handle.deleteDir(io, tmp_dir_sub_path) catch {};
}
// Validate the computed hash against the expected hash. If invalid, this
@@ -637,8 +637,9 @@ pub fn computedPackageHash(f: *const Fetch) Package.Hash {
/// `computeHash` gets a free check for the existence of `build.zig`, but when
/// not computing a hash, we need to do a syscall to check for it.
fn checkBuildFileExistence(f: *Fetch) RunError!void {
+ const io = f.job_queue.io;
const eb = &f.error_bundle;
- if (f.package_root.access(Package.build_zig_basename, .{})) |_| {
+ if (f.package_root.access(io, Package.build_zig_basename, .{})) |_| {
f.has_build_zig = true;
} else |err| switch (err) {
error.FileNotFound => {},
@@ -655,9 +656,11 @@ fn checkBuildFileExistence(f: *Fetch) RunError!void {
/// This function populates `f.manifest` or leaves it `null`.
fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
+ const io = f.job_queue.io;
const eb = &f.error_bundle;
const arena = f.arena.allocator();
const manifest_bytes = pkg_root.root_dir.handle.readFileAllocOptions(
+ io,
try fs.path.join(arena, &.{ pkg_root.sub_path, Manifest.basename }),
arena,
.limited(Manifest.max_bytes),
@@ -882,10 +885,10 @@ fn fail(f: *Fetch, msg_tok: std.zig.Ast.TokenIndex, msg_str: u32) RunError {
}
const Resource = union(enum) {
- file: fs.File.Reader,
+ file: Io.File.Reader,
http_request: HttpRequest,
git: Git,
- dir: fs.Dir,
+ dir: Io.Dir,
const Git = struct {
session: git.Session,
@@ -908,7 +911,7 @@ const Resource = union(enum) {
.git => |*git_resource| {
git_resource.fetch_stream.deinit();
},
- .dir => |*dir| dir.close(),
+ .dir => |*dir| dir.close(io),
}
resource.* = undefined;
}
@@ -995,7 +998,7 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u
if (ascii.eqlIgnoreCase(uri.scheme, "file")) {
const path = try uri.path.toRawMaybeAlloc(arena);
- const file = f.parent_package_root.openFile(path, .{}) catch |err| {
+ const file = f.parent_package_root.openFile(io, path, .{}) catch |err| {
return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{
f.parent_package_root, path, err,
}));
@@ -1247,13 +1250,14 @@ fn unpackResource(
}
}
-fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!UnpackResult {
+fn unpackTarball(f: *Fetch, out_dir: Io.Dir, reader: *Io.Reader) RunError!UnpackResult {
const eb = &f.error_bundle;
const arena = f.arena.allocator();
+ const io = f.job_queue.io;
var diagnostics: std.tar.Diagnostics = .{ .allocator = arena };
- std.tar.pipeToFileSystem(out_dir, reader, .{
+ std.tar.pipeToFileSystem(io, out_dir, reader, .{
.diagnostics = &diagnostics,
.strip_components = 0,
.mode_mode = .ignore,
@@ -1280,7 +1284,7 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!Unpack
fn unzip(
f: *Fetch,
- out_dir: fs.Dir,
+ out_dir: Io.Dir,
reader: *Io.Reader,
) error{ ReadFailed, OutOfMemory, Canceled, FetchFailed }!UnpackResult {
// We write the entire contents to a file first because zip files
@@ -1302,7 +1306,7 @@ fn unzip(
const random_integer = std.crypto.random.int(u64);
zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer);
- break cache_root.handle.createFile(&zip_path, .{
+ break cache_root.handle.createFile(io, &zip_path, .{
.exclusive = true,
.read = true,
}) catch |err| switch (err) {
@@ -1314,10 +1318,10 @@ fn unzip(
),
};
};
- defer zip_file.close();
+ defer zip_file.close(io);
var zip_file_buffer: [4096]u8 = undefined;
var zip_file_reader = b: {
- var zip_file_writer = zip_file.writer(&zip_file_buffer);
+ var zip_file_writer = zip_file.writer(io, &zip_file_buffer);
_ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
@@ -1330,7 +1334,7 @@ fn unzip(
f.location_tok,
try eb.printString("failed writing temporary zip file: {t}", .{err}),
);
- break :b zip_file_writer.moveToReader(io);
+ break :b zip_file_writer.moveToReader();
};
var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() };
@@ -1343,13 +1347,13 @@ fn unzip(
.diagnostics = &diagnostics,
}) catch |err| return f.fail(f.location_tok, try eb.printString("zip extract failed: {t}", .{err}));
- cache_root.handle.deleteFile(&zip_path) catch |err|
+ cache_root.handle.deleteFile(io, &zip_path) catch |err|
return f.fail(f.location_tok, try eb.printString("delete temporary zip failed: {t}", .{err}));
return .{ .root_dir = diagnostics.root_dir };
}
-fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!UnpackResult {
+fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!UnpackResult {
const io = f.job_queue.io;
const arena = f.arena.allocator();
// TODO don't try to get a gpa from an arena. expose this dependency higher up
@@ -1362,23 +1366,23 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
// we do not attempt to replicate the exact structure of a real .git
// directory, since that isn't relevant for fetching a package.
{
- var pack_dir = try out_dir.makeOpenPath(".git", .{});
- defer pack_dir.close();
- var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
- defer pack_file.close();
+ var pack_dir = try out_dir.createDirPathOpen(io, ".git", .{});
+ defer pack_dir.close(io);
+ var pack_file = try pack_dir.createFile(io, "pkg.pack", .{ .read = true });
+ defer pack_file.close(io);
var pack_file_buffer: [4096]u8 = undefined;
var pack_file_reader = b: {
- var pack_file_writer = pack_file.writer(&pack_file_buffer);
+ var pack_file_writer = pack_file.writer(io, &pack_file_buffer);
const fetch_reader = &resource.fetch_stream.reader;
_ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
try pack_file_writer.interface.flush();
- break :b pack_file_writer.moveToReader(io);
+ break :b pack_file_writer.moveToReader();
};
- var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
- defer index_file.close();
+ var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true });
+ defer index_file.close(io);
var index_file_buffer: [2000]u8 = undefined;
- var index_file_writer = index_file.writer(&index_file_buffer);
+ var index_file_writer = index_file.writer(io, &index_file_buffer);
{
const index_prog_node = f.prog_node.start("Index pack", 0);
defer index_prog_node.end();
@@ -1393,7 +1397,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
try repository.init(gpa, object_format, &pack_file_reader, &index_file_reader);
defer repository.deinit();
var diagnostics: git.Diagnostics = .{ .allocator = arena };
- try repository.checkout(out_dir, resource.want_oid, &diagnostics);
+ try repository.checkout(io, out_dir, resource.want_oid, &diagnostics);
if (diagnostics.errors.items.len > 0) {
try res.allocErrors(arena, diagnostics.errors.items.len, "unable to unpack packfile");
@@ -1407,41 +1411,37 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
}
}
- try out_dir.deleteTree(".git");
+ try out_dir.deleteTree(io, ".git");
return res;
}
-fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void {
+fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void {
const gpa = f.arena.child_allocator;
+ const io = f.job_queue.io;
// Recursive directory copy.
var it = try dir.walk(gpa);
defer it.deinit();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
switch (entry.kind) {
.directory => {}, // omit empty directories
.file => {
- dir.copyFile(
- entry.path,
- tmp_dir,
- entry.path,
- .{},
- ) catch |err| switch (err) {
+ dir.copyFile(entry.path, tmp_dir, entry.path, io, .{}) catch |err| switch (err) {
error.FileNotFound => {
- if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
- try dir.copyFile(entry.path, tmp_dir, entry.path, .{});
+ if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.createDirPath(io, dirname);
+ try dir.copyFile(entry.path, tmp_dir, entry.path, io, .{});
},
else => |e| return e,
};
},
.sym_link => {
var buf: [fs.max_path_bytes]u8 = undefined;
- const link_name = try dir.readLink(entry.path, &buf);
+ const link_name = buf[0..try dir.readLink(io, entry.path, &buf)];
// TODO: if this would create a symlink to outside
// the destination directory, fail with an error instead.
- tmp_dir.symLink(link_name, entry.path, .{}) catch |err| switch (err) {
+ tmp_dir.symLink(io, link_name, entry.path, .{}) catch |err| switch (err) {
error.FileNotFound => {
- if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
- try tmp_dir.symLink(link_name, entry.path, .{});
+ if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.createDirPath(io, dirname);
+ try tmp_dir.symLink(io, link_name, entry.path, .{});
},
else => |e| return e,
};
@@ -1451,14 +1451,14 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void
}
}
-pub fn renameTmpIntoCache(cache_dir: fs.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
+pub fn renameTmpIntoCache(io: Io, cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
assert(dest_dir_sub_path[1] == fs.path.sep);
var handled_missing_dir = false;
while (true) {
- cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) {
+ cache_dir.rename(tmp_dir_sub_path, cache_dir, dest_dir_sub_path, io) catch |err| switch (err) {
error.FileNotFound => {
if (handled_missing_dir) return err;
- cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) {
+ cache_dir.createDir(io, dest_dir_sub_path[0..1], .default_dir) catch |mkd_err| switch (mkd_err) {
error.PathAlreadyExists => handled_missing_dir = true,
else => |e| return e,
};
@@ -1466,7 +1466,7 @@ pub fn renameTmpIntoCache(cache_dir: fs.Dir, tmp_dir_sub_path: []const u8, dest_
},
error.PathAlreadyExists, error.AccessDenied => {
// Package has been already downloaded and may already be in use on the system.
- cache_dir.deleteTree(tmp_dir_sub_path) catch {
+ cache_dir.deleteTree(io, tmp_dir_sub_path) catch {
// Garbage files leftover in zig-cache/tmp/ is, as they say
// on Star Trek, "operating within normal parameters".
};
@@ -1519,7 +1519,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
var group: Io.Group = .init;
defer group.wait(io);
- while (walker.next() catch |err| {
+ while (walker.next(io) catch |err| {
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
"unable to walk temporary directory '{f}': {s}",
.{ pkg_path, @errorName(err) },
@@ -1542,7 +1542,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
.fs_path = fs_path,
.failure = undefined, // to be populated by the worker
};
- group.async(io, workerDeleteFile, .{ root_dir, deleted_file });
+ group.async(io, workerDeleteFile, .{ io, root_dir, deleted_file });
try deleted_files.append(deleted_file);
continue;
}
@@ -1570,7 +1570,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
.failure = undefined, // to be populated by the worker
.size = undefined, // to be populated by the worker
};
- group.async(io, workerHashFile, .{ root_dir, hashed_file });
+ group.async(io, workerHashFile, .{ io, root_dir, hashed_file });
try all_files.append(hashed_file);
}
}
@@ -1588,7 +1588,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
var i: usize = 0;
while (i < sus_dirs.count()) : (i += 1) {
const sus_dir = sus_dirs.keys()[i];
- root_dir.deleteDir(sus_dir) catch |err| switch (err) {
+ root_dir.deleteDir(io, sus_dir) catch |err| switch (err) {
error.DirNotEmpty => continue,
error.FileNotFound => continue,
else => |e| {
@@ -1638,7 +1638,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
assert(!f.job_queue.recursive);
// Print something to stdout that can be text diffed to figure out why
// the package hash is different.
- dumpHashInfo(all_files.items) catch |err| {
+ dumpHashInfo(io, all_files.items) catch |err| {
std.debug.print("unable to write to stdout: {s}\n", .{@errorName(err)});
std.process.exit(1);
};
@@ -1650,9 +1650,9 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
};
}
-fn dumpHashInfo(all_files: []const *const HashedFile) !void {
+fn dumpHashInfo(io: Io, all_files: []const *const HashedFile) !void {
var stdout_buffer: [1024]u8 = undefined;
- var stdout_writer: fs.File.Writer = .initStreaming(.stdout(), &stdout_buffer);
+ var stdout_writer: Io.File.Writer = .initStreaming(.stdout(), io, &stdout_buffer);
const w = &stdout_writer.interface;
for (all_files) |hashed_file| {
try w.print("{t}: {x}: {s}\n", .{ hashed_file.kind, &hashed_file.hash, hashed_file.normalized_path });
@@ -1660,15 +1660,15 @@ fn dumpHashInfo(all_files: []const *const HashedFile) !void {
try w.flush();
}
-fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile) void {
- hashed_file.failure = hashFileFallible(dir, hashed_file);
+fn workerHashFile(io: Io, dir: Io.Dir, hashed_file: *HashedFile) void {
+ hashed_file.failure = hashFileFallible(io, dir, hashed_file);
}
-fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile) void {
- deleted_file.failure = deleteFileFallible(dir, deleted_file);
+fn workerDeleteFile(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) void {
+ deleted_file.failure = deleteFileFallible(io, dir, deleted_file);
}
-fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
+fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var hasher = Package.Hash.Algo.init(.{});
hasher.update(hashed_file.normalized_path);
@@ -1676,24 +1676,24 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
switch (hashed_file.kind) {
.file => {
- var file = try dir.openFile(hashed_file.fs_path, .{});
- defer file.close();
+ var file = try dir.openFile(io, hashed_file.fs_path, .{});
+ defer file.close(io);
// Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463
hasher.update(&.{ 0, 0 });
var file_header: FileHeader = .{};
while (true) {
- const bytes_read = try file.read(&buf);
+ const bytes_read = try file.readPositional(io, &.{&buf}, file_size);
if (bytes_read == 0) break;
file_size += bytes_read;
hasher.update(buf[0..bytes_read]);
file_header.update(buf[0..bytes_read]);
}
if (file_header.isExecutable()) {
- try setExecutable(file);
+ try setExecutable(io, file);
}
},
.link => {
- const link_name = try dir.readLink(hashed_file.fs_path, &buf);
+ const link_name = buf[0..try dir.readLink(io, hashed_file.fs_path, &buf)];
if (fs.path.sep != canonical_sep) {
// Package hashes are intended to be consistent across
// platforms which means we must normalize path separators
@@ -1707,16 +1707,13 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
hashed_file.size = file_size;
}
-fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
- try dir.deleteFile(deleted_file.fs_path);
+fn deleteFileFallible(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
+ try dir.deleteFile(io, deleted_file.fs_path);
}
-fn setExecutable(file: fs.File) !void {
- if (!std.fs.has_executable_bit) return;
-
- const S = std.posix.S;
- const mode = fs.File.default_mode | S.IXUSR | S.IXGRP | S.IXOTH;
- try file.chmod(mode);
+fn setExecutable(io: Io, file: Io.File) !void {
+ if (!Io.File.Permissions.has_executable_bit) return;
+ try file.setPermissions(io, .executable_file);
}
const DeletedFile = struct {
@@ -1724,8 +1721,8 @@ const DeletedFile = struct {
failure: Error!void,
const Error =
- fs.Dir.DeleteFileError ||
- fs.Dir.DeleteDirError;
+ Io.Dir.DeleteFileError ||
+ Io.Dir.DeleteDirError;
};
const HashedFile = struct {
@@ -1737,11 +1734,11 @@ const HashedFile = struct {
size: u64,
const Error =
- fs.File.OpenError ||
- fs.File.ReadError ||
- fs.File.StatError ||
- fs.File.ChmodError ||
- fs.Dir.ReadLinkError;
+ Io.File.OpenError ||
+ Io.File.ReadPositionalError ||
+ Io.File.StatError ||
+ Io.File.SetPermissionsError ||
+ Io.Dir.ReadLinkError;
const Kind = enum { file, link };
@@ -2043,7 +2040,7 @@ const UnpackResult = struct {
defer errors.deinit(gpa);
var aw: Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
- try errors.renderToWriter(.{}, &aw.writer, .no_color);
+ try errors.renderToWriter(.{}, &aw.writer);
try std.testing.expectEqualStrings(
\\error: unable to unpack
\\ note: unable to create symlink from 'dir2/file2' to 'filename': SymlinkError
@@ -2074,7 +2071,7 @@ test "tarball with duplicate paths" {
defer tmp.cleanup();
const tarball_name = "duplicate_paths.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2107,7 +2104,7 @@ test "tarball with excluded duplicate paths" {
defer tmp.cleanup();
const tarball_name = "duplicate_paths_excluded.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2153,7 +2150,7 @@ test "tarball without root folder" {
defer tmp.cleanup();
const tarball_name = "no_root.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2178,7 +2175,7 @@ test "tarball without root folder" {
}
test "set executable bit based on file content" {
- if (!std.fs.has_executable_bit) return error.SkipZigTest;
+ if (!Io.File.Permissions.has_executable_bit) return error.SkipZigTest;
const gpa = std.testing.allocator;
const io = std.testing.io;
@@ -2186,7 +2183,7 @@ test "set executable bit based on file content" {
defer tmp.cleanup();
const tarball_name = "executables.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2210,16 +2207,16 @@ test "set executable bit based on file content" {
);
var out = try fb.packageDir();
- defer out.close();
+ defer out.close(io);
const S = std.posix.S;
// expect executable bit not set
- try std.testing.expect((try out.statFile("file1")).mode & S.IXUSR == 0);
- try std.testing.expect((try out.statFile("script_without_shebang")).mode & S.IXUSR == 0);
+ try std.testing.expect((try out.statFile(io, "file1", .{})).permissions.toMode() & S.IXUSR == 0);
+ try std.testing.expect((try out.statFile(io, "script_without_shebang", .{})).permissions.toMode() & S.IXUSR == 0);
// expect executable bit set
- try std.testing.expect((try out.statFile("hello")).mode & S.IXUSR != 0);
- try std.testing.expect((try out.statFile("script")).mode & S.IXUSR != 0);
- try std.testing.expect((try out.statFile("script_with_shebang_without_exec_bit")).mode & S.IXUSR != 0);
- try std.testing.expect((try out.statFile("hello_ln")).mode & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "hello", .{})).permissions.toMode() & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "script", .{})).permissions.toMode() & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "script_with_shebang_without_exec_bit", .{})).permissions.toMode() & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "hello_ln", .{})).permissions.toMode() & S.IXUSR != 0);
//
// $ ls -al zig-cache/tmp/OCz9ovUcstDjTC_U/zig-global-cache/p/1220fecb4c06a9da8673c87fe8810e15785f1699212f01728eadce094d21effeeef3
@@ -2231,12 +2228,12 @@ test "set executable bit based on file content" {
// -rwxrwxr-x 1 17 Apr script_with_shebang_without_exec_bit
}
-fn saveEmbedFile(comptime tarball_name: []const u8, dir: fs.Dir) !void {
+fn saveEmbedFile(io: Io, comptime tarball_name: []const u8, dir: Io.Dir) !void {
//const tarball_name = "duplicate_paths_excluded.tar.gz";
const tarball_content = @embedFile("Fetch/testdata/" ++ tarball_name);
- var tmp_file = try dir.createFile(tarball_name, .{});
- defer tmp_file.close();
- try tmp_file.writeAll(tarball_content);
+ var tmp_file = try dir.createFile(io, tarball_name, .{});
+ defer tmp_file.close(io);
+ try tmp_file.writeStreamingAll(io, tarball_content);
}
// Builds Fetch with required dependencies, clears dependencies on deinit().
@@ -2250,10 +2247,10 @@ const TestFetchBuilder = struct {
self: *TestFetchBuilder,
allocator: std.mem.Allocator,
io: Io,
- cache_parent_dir: std.fs.Dir,
+ cache_parent_dir: std.Io.Dir,
path_or_url: []const u8,
) !*Fetch {
- const cache_dir = try cache_parent_dir.makeOpenPath("zig-global-cache", .{});
+ const cache_dir = try cache_parent_dir.createDirPathOpen(io, "zig-global-cache", .{});
self.http_client = .{ .allocator = allocator, .io = io };
self.global_cache_directory = .{ .handle = cache_dir, .path = null };
@@ -2301,35 +2298,40 @@ const TestFetchBuilder = struct {
}
fn deinit(self: *TestFetchBuilder) void {
+ const io = self.job_queue.io;
self.fetch.deinit();
self.job_queue.deinit();
self.fetch.prog_node.end();
- self.global_cache_directory.handle.close();
+ self.global_cache_directory.handle.close(io);
self.http_client.deinit();
}
- fn packageDir(self: *TestFetchBuilder) !fs.Dir {
+ fn packageDir(self: *TestFetchBuilder) !Io.Dir {
+ const io = self.job_queue.io;
const root = self.fetch.package_root;
- return try root.root_dir.handle.openDir(root.sub_path, .{ .iterate = true });
+ return try root.root_dir.handle.openDir(io, root.sub_path, .{ .iterate = true });
}
// Test helper, asserts thet package dir constains expected_files.
// expected_files must be sorted.
fn expectPackageFiles(self: *TestFetchBuilder, expected_files: []const []const u8) !void {
+ const io = self.job_queue.io;
+ const gpa = std.testing.allocator;
+
var package_dir = try self.packageDir();
- defer package_dir.close();
+ defer package_dir.close(io);
var actual_files: std.ArrayList([]u8) = .empty;
- defer actual_files.deinit(std.testing.allocator);
- defer for (actual_files.items) |file| std.testing.allocator.free(file);
- var walker = try package_dir.walk(std.testing.allocator);
+ defer actual_files.deinit(gpa);
+ defer for (actual_files.items) |file| gpa.free(file);
+ var walker = try package_dir.walk(gpa);
defer walker.deinit();
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
- const path = try std.testing.allocator.dupe(u8, entry.path);
- errdefer std.testing.allocator.free(path);
+ const path = try gpa.dupe(u8, entry.path);
+ errdefer gpa.free(path);
std.mem.replaceScalar(u8, path, std.fs.path.sep, '/');
- try actual_files.append(std.testing.allocator, path);
+ try actual_files.append(gpa, path);
}
std.mem.sortUnstable([]u8, actual_files.items, {}, struct {
fn lessThan(_: void, a: []u8, b: []u8) bool {
@@ -2346,17 +2348,19 @@ const TestFetchBuilder = struct {
// Test helper, asserts that fetch has failed with `msg` error message.
fn expectFetchErrors(self: *TestFetchBuilder, notes_len: usize, msg: []const u8) !void {
+ const gpa = std.testing.allocator;
+
var errors = try self.fetch.error_bundle.toOwnedBundle("");
- defer errors.deinit(std.testing.allocator);
+ defer errors.deinit(gpa);
const em = errors.getErrorMessage(errors.getMessages()[0]);
try std.testing.expectEqual(1, em.count);
if (notes_len > 0) {
try std.testing.expectEqual(notes_len, em.notes_len);
}
- var aw: Io.Writer.Allocating = .init(std.testing.allocator);
+ var aw: Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
- try errors.renderToWriter(.{}, &aw.writer, .no_color);
+ try errors.renderToWriter(.{}, &aw.writer);
try std.testing.expectEqualStrings(msg, aw.written());
}
};
diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig
index a2ea870c3f..18e15f457f 100644
--- a/src/Package/Fetch/git.zig
+++ b/src/Package/Fetch/git.zig
@@ -198,8 +198,8 @@ pub const Repository = struct {
repo: *Repository,
allocator: Allocator,
format: Oid.Format,
- pack_file: *std.fs.File.Reader,
- index_file: *std.fs.File.Reader,
+ pack_file: *Io.File.Reader,
+ index_file: *Io.File.Reader,
) !void {
repo.* = .{ .odb = undefined };
try repo.odb.init(allocator, format, pack_file, index_file);
@@ -213,7 +213,8 @@ pub const Repository = struct {
/// Checks out the repository at `commit_oid` to `worktree`.
pub fn checkout(
repository: *Repository,
- worktree: std.fs.Dir,
+ io: Io,
+ worktree: Io.Dir,
commit_oid: Oid,
diagnostics: *Diagnostics,
) !void {
@@ -223,13 +224,14 @@ pub const Repository = struct {
if (commit_object.type != .commit) return error.NotACommit;
break :tree_oid try getCommitTree(repository.odb.format, commit_object.data);
};
- try repository.checkoutTree(worktree, tree_oid, "", diagnostics);
+ try repository.checkoutTree(io, worktree, tree_oid, "", diagnostics);
}
/// Checks out the tree at `tree_oid` to `worktree`.
fn checkoutTree(
repository: *Repository,
- dir: std.fs.Dir,
+ io: Io,
+ dir: Io.Dir,
tree_oid: Oid,
current_path: []const u8,
diagnostics: *Diagnostics,
@@ -251,18 +253,18 @@ pub const Repository = struct {
while (try tree_iter.next()) |entry| {
switch (entry.type) {
.directory => {
- try dir.makeDir(entry.name);
- var subdir = try dir.openDir(entry.name, .{});
- defer subdir.close();
+ try dir.createDir(io, entry.name, .default_dir);
+ var subdir = try dir.openDir(io, entry.name, .{});
+ defer subdir.close(io);
const sub_path = try std.fs.path.join(repository.odb.allocator, &.{ current_path, entry.name });
defer repository.odb.allocator.free(sub_path);
- try repository.checkoutTree(subdir, entry.oid, sub_path, diagnostics);
+ try repository.checkoutTree(io, subdir, entry.oid, sub_path, diagnostics);
},
.file => {
try repository.odb.seekOid(entry.oid);
const file_object = try repository.odb.readObject();
if (file_object.type != .blob) return error.InvalidFile;
- var file = dir.createFile(entry.name, .{ .exclusive = true }) catch |e| {
+ var file = dir.createFile(io, entry.name, .{ .exclusive = true }) catch |e| {
const file_name = try std.fs.path.join(diagnostics.allocator, &.{ current_path, entry.name });
errdefer diagnostics.allocator.free(file_name);
try diagnostics.errors.append(diagnostics.allocator, .{ .unable_to_create_file = .{
@@ -271,15 +273,15 @@ pub const Repository = struct {
} });
continue;
};
- defer file.close();
- try file.writeAll(file_object.data);
+ defer file.close(io);
+ try file.writePositionalAll(io, file_object.data, 0);
},
.symlink => {
try repository.odb.seekOid(entry.oid);
const symlink_object = try repository.odb.readObject();
if (symlink_object.type != .blob) return error.InvalidFile;
const link_name = symlink_object.data;
- dir.symLink(link_name, entry.name, .{}) catch |e| {
+ dir.symLink(io, link_name, entry.name, .{}) catch |e| {
const file_name = try std.fs.path.join(diagnostics.allocator, &.{ current_path, entry.name });
errdefer diagnostics.allocator.free(file_name);
const link_name_dup = try diagnostics.allocator.dupe(u8, link_name);
@@ -294,7 +296,7 @@ pub const Repository = struct {
.gitlink => {
// Consistent with git archive behavior, create the directory but
// do nothing else
- try dir.makeDir(entry.name);
+ try dir.createDir(io, entry.name, .default_dir);
},
}
}
@@ -370,9 +372,9 @@ pub const Repository = struct {
/// [pack-format](https://git-scm.com/docs/pack-format).
const Odb = struct {
format: Oid.Format,
- pack_file: *std.fs.File.Reader,
+ pack_file: *Io.File.Reader,
index_header: IndexHeader,
- index_file: *std.fs.File.Reader,
+ index_file: *Io.File.Reader,
cache: ObjectCache = .{},
allocator: Allocator,
@@ -381,8 +383,8 @@ const Odb = struct {
odb: *Odb,
allocator: Allocator,
format: Oid.Format,
- pack_file: *std.fs.File.Reader,
- index_file: *std.fs.File.Reader,
+ pack_file: *Io.File.Reader,
+ index_file: *Io.File.Reader,
) !void {
try pack_file.seekTo(0);
try index_file.seekTo(0);
@@ -1270,8 +1272,8 @@ const IndexEntry = struct {
pub fn indexPack(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
- index_writer: *std.fs.File.Writer,
+ pack: *Io.File.Reader,
+ index_writer: *Io.File.Writer,
) !void {
try pack.seekTo(0);
@@ -1370,7 +1372,7 @@ pub fn indexPack(
fn indexPackFirstPass(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
+ pack: *Io.File.Reader,
index_entries: *std.AutoHashMapUnmanaged(Oid, IndexEntry),
pending_deltas: *std.ArrayList(IndexEntry),
) !Oid {
@@ -1423,7 +1425,7 @@ fn indexPackFirstPass(
fn indexPackHashDelta(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
+ pack: *Io.File.Reader,
delta: IndexEntry,
index_entries: std.AutoHashMapUnmanaged(Oid, IndexEntry),
cache: *ObjectCache,
@@ -1475,7 +1477,7 @@ fn indexPackHashDelta(
fn resolveDeltaChain(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
+ pack: *Io.File.Reader,
base_object: Object,
delta_offsets: []const u64,
cache: *ObjectCache,
@@ -1582,17 +1584,17 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
var git_dir = testing.tmpDir(.{});
defer git_dir.cleanup();
- var pack_file = try git_dir.dir.createFile("testrepo.pack", .{ .read = true });
- defer pack_file.close();
- try pack_file.writeAll(testrepo_pack);
+ var pack_file = try git_dir.dir.createFile(io, "testrepo.pack", .{ .read = true });
+ defer pack_file.close(io);
+ try pack_file.writeStreamingAll(io, testrepo_pack);
var pack_file_buffer: [2000]u8 = undefined;
var pack_file_reader = pack_file.reader(io, &pack_file_buffer);
- var index_file = try git_dir.dir.createFile("testrepo.idx", .{ .read = true });
- defer index_file.close();
+ var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true });
+ defer index_file.close(io);
var index_file_buffer: [2000]u8 = undefined;
- var index_file_writer = index_file.writer(&index_file_buffer);
+ var index_file_writer = index_file.writer(io, &index_file_buffer);
try indexPack(testing.allocator, format, &pack_file_reader, &index_file_writer);
// Arbitrary size limit on files read while checking the repository contents
@@ -1600,7 +1602,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
const max_file_size = 8192;
if (!skip_checksums) {
- const index_file_data = try git_dir.dir.readFileAlloc("testrepo.idx", testing.allocator, .limited(max_file_size));
+ const index_file_data = try git_dir.dir.readFileAlloc(io, "testrepo.idx", testing.allocator, .limited(max_file_size));
defer testing.allocator.free(index_file_data);
// testrepo.idx is generated by Git. The index created by this file should
// match it exactly. Running `git verify-pack -v testrepo.pack` can verify
@@ -1621,7 +1623,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try repository.checkout(worktree.dir, commit_id, &diagnostics);
+ try repository.checkout(io, worktree.dir, commit_id, &diagnostics);
try testing.expect(diagnostics.errors.items.len == 0);
const expected_files: []const []const u8 = &.{
@@ -1646,7 +1648,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
defer for (actual_files.items) |file| testing.allocator.free(file);
var walker = try worktree.dir.walk(testing.allocator);
defer walker.deinit();
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
const path = try testing.allocator.dupe(u8, entry.path);
errdefer testing.allocator.free(path);
@@ -1676,7 +1678,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
\\revision 19
\\
;
- const actual_file_contents = try worktree.dir.readFileAlloc("file", testing.allocator, .limited(max_file_size));
+ const actual_file_contents = try worktree.dir.readFileAlloc(io, "file", testing.allocator, .limited(max_file_size));
defer testing.allocator.free(actual_file_contents);
try testing.expectEqualStrings(expected_file_contents, actual_file_contents);
}
@@ -1700,7 +1702,7 @@ test "SHA-256 packfile indexing and checkout" {
pub fn main() !void {
const allocator = std.heap.smp_allocator;
- var threaded: Io.Threaded = .init(allocator);
+ var threaded: Io.Threaded = .init(allocator, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -1712,23 +1714,23 @@ pub fn main() !void {
const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat;
- var pack_file = try std.fs.cwd().openFile(args[2], .{});
- defer pack_file.close();
+ var pack_file = try Io.Dir.cwd().openFile(io, args[2], .{});
+ defer pack_file.close(io);
var pack_file_buffer: [4096]u8 = undefined;
var pack_file_reader = pack_file.reader(io, &pack_file_buffer);
const commit = try Oid.parse(format, args[3]);
- var worktree = try std.fs.cwd().makeOpenPath(args[4], .{});
- defer worktree.close();
+ var worktree = try Io.Dir.cwd().createDirPathOpen(io, args[4], .{});
+ defer worktree.close(io);
- var git_dir = try worktree.makeOpenPath(".git", .{});
- defer git_dir.close();
+ var git_dir = try worktree.createDirPathOpen(io, ".git", .{});
+ defer git_dir.close(io);
std.debug.print("Starting index...\n", .{});
- var index_file = try git_dir.createFile("idx", .{ .read = true });
- defer index_file.close();
+ var index_file = try git_dir.createFile(io, "idx", .{ .read = true });
+ defer index_file.close(io);
var index_file_buffer: [4096]u8 = undefined;
- var index_file_writer = index_file.writer(&index_file_buffer);
+ var index_file_writer = index_file.writer(io, &index_file_buffer);
try indexPack(allocator, format, &pack_file_reader, &index_file_writer);
std.debug.print("Starting checkout...\n", .{});
@@ -1738,7 +1740,7 @@ pub fn main() !void {
defer repository.deinit();
var diagnostics: Diagnostics = .{ .allocator = allocator };
defer diagnostics.deinit();
- try repository.checkout(worktree, commit, &diagnostics);
+ try repository.checkout(io, worktree, commit, &diagnostics);
for (diagnostics.errors.items) |err| {
std.debug.print("Diagnostic: {}\n", .{err});
diff --git a/src/Sema.zig b/src/Sema.zig
index fec6850c4c..298de783b8 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2668,16 +2668,18 @@ fn failWithTypeMismatch(sema: *Sema, block: *Block, src: LazySrcLoc, expected: T
pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
@branchHint(.cold);
- const gpa = sema.gpa;
const zcu = sema.pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
- if (build_options.enable_debug_extensions and zcu.comp.debug_compile_errors) {
+ if (build_options.enable_debug_extensions and comp.debug_compile_errors) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
wip_errors.init(gpa) catch @panic("out of memory");
Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, false) catch @panic("out of memory");
std.debug.print("compile error during Sema:\n", .{});
var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory");
- error_bundle.renderToStdErr(.{}, .auto);
+ error_bundle.renderToStderr(io, .{}, .auto) catch @panic("failed to print to stderr");
std.debug.panicExtra(@returnAddress(), "unexpected compile error occurred", .{});
}
diff --git a/src/Zcu.zig b/src/Zcu.zig
index 137b4d8b59..07fb1bdc94 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -1076,11 +1076,11 @@ pub const File = struct {
var f = f: {
const dir, const sub_path = file.path.openInfo(zcu.comp.dirs);
- break :f try dir.openFile(sub_path, .{});
+ break :f try dir.openFile(io, sub_path, .{});
};
- defer f.close();
+ defer f.close(io);
- const stat = f.stat() catch |err| switch (err) {
+ const stat = f.stat(io) catch |err| switch (err) {
error.Streaming => {
// Since `file.stat` is populated, this was previously a file stream; since it is
// now not a file stream, it must have changed.
@@ -1200,7 +1200,7 @@ pub const EmbedFile = struct {
/// `.none` means the file was not loaded, so `stat` is undefined.
val: InternPool.Index,
/// If this is `null` and `val` is `.none`, the file has never been loaded.
- err: ?(std.fs.File.OpenError || std.fs.File.StatError || std.fs.File.ReadError || error{UnexpectedEof}),
+ err: ?(Io.File.OpenError || Io.File.StatError || Io.File.Reader.Error || error{UnexpectedEof}),
stat: Cache.File.Stat,
pub const Index = enum(u32) {
@@ -2813,8 +2813,8 @@ pub fn init(zcu: *Zcu, gpa: Allocator, io: Io, thread_count: usize) !void {
pub fn deinit(zcu: *Zcu) void {
const comp = zcu.comp;
- const gpa = comp.gpa;
const io = comp.io;
+ const gpa = zcu.gpa;
{
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
@@ -2835,8 +2835,8 @@ pub fn deinit(zcu: *Zcu) void {
}
zcu.embed_table.deinit(gpa);
- zcu.local_zir_cache.handle.close();
- zcu.global_zir_cache.handle.close();
+ zcu.local_zir_cache.handle.close(io);
+ zcu.global_zir_cache.handle.close(io);
for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
@@ -2900,7 +2900,7 @@ pub fn deinit(zcu: *Zcu) void {
if (zcu.resolved_references) |*r| r.deinit(gpa);
- if (zcu.comp.debugIncremental()) {
+ if (comp.debugIncremental()) {
zcu.incremental_debug_state.deinit(gpa);
}
}
@@ -2927,7 +2927,7 @@ comptime {
}
}
-pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: std.fs.File) !Zir {
+pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: Io.File) !Zir {
var buffer: [2000]u8 = undefined;
var file_reader = cache_file.reader(io, &buffer);
return result: {
@@ -2986,7 +2986,12 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader
return zir;
}
-pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.Stat, zir: Zir) (std.fs.File.WriteError || Allocator.Error)!void {
+pub fn saveZirCache(
+ gpa: Allocator,
+ cache_file_writer: *Io.File.Writer,
+ stat: Io.File.Stat,
+ zir: Zir,
+) (Io.File.Writer.Error || Allocator.Error)!void {
const safety_buffer = if (data_has_safety_tag)
try gpa.alloc([8]u8, zir.instructions.len)
else
@@ -3020,13 +3025,12 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S
zir.string_bytes,
@ptrCast(zir.extra),
};
- var cache_fw = cache_file.writer(&.{});
- cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
- error.WriteFailed => return cache_fw.err.?,
+ cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
+ error.WriteFailed => return cache_file_writer.err.?,
};
}
-pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void {
+pub fn saveZoirCache(cache_file_writer: *Io.File.Writer, stat: Io.File.Stat, zoir: Zoir) Io.File.Writer.Error!void {
const header: Zoir.Header = .{
.nodes_len = @intCast(zoir.nodes.len),
.extra_len = @intCast(zoir.extra.len),
@@ -3050,9 +3054,8 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir
@ptrCast(zoir.compile_errors),
@ptrCast(zoir.error_notes),
};
- var cache_fw = cache_file.writer(&.{});
- cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
- error.WriteFailed => return cache_fw.err.?,
+ cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
+ error.WriteFailed => return cache_file_writer.err.?,
};
}
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 2ad5bac01c..103cbaaaae 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -94,11 +94,11 @@ pub fn updateFile(
// In any case we need to examine the stat of the file to determine the course of action.
var source_file = f: {
const dir, const sub_path = file.path.openInfo(comp.dirs);
- break :f try dir.openFile(sub_path, .{});
+ break :f try dir.openFile(io, sub_path, .{});
};
- defer source_file.close();
+ defer source_file.close(io);
- const stat = try source_file.stat();
+ const stat = try source_file.stat(io);
const want_local_cache = switch (file.path.root) {
.none, .local_cache => true,
@@ -118,7 +118,7 @@ pub fn updateFile(
const zir_dir = cache_directory.handle;
// Determine whether we need to reload the file from disk and redo parsing and AstGen.
- var lock: std.fs.File.Lock = switch (file.status) {
+ var lock: Io.File.Lock = switch (file.status) {
.never_loaded, .retryable_failure => lock: {
// First, load the cached ZIR code, if any.
log.debug("AstGen checking cache: {f} (local={}, digest={s})", .{
@@ -170,7 +170,7 @@ pub fn updateFile(
// version. Likewise if we're working on AstGen and another process asks for
// the cached file, they'll get it.
const cache_file = while (true) {
- break zir_dir.createFile(&hex_digest, .{
+ break zir_dir.createFile(io, &hex_digest, .{
.read = true,
.truncate = false,
.lock = lock,
@@ -196,7 +196,7 @@ pub fn updateFile(
cache_directory,
});
}
- break zir_dir.createFile(&hex_digest, .{
+ break zir_dir.createFile(io, &hex_digest, .{
.read = true,
.truncate = false,
.lock = lock,
@@ -215,7 +215,7 @@ pub fn updateFile(
else => |e| return e, // Retryable errors are handled at callsite.
};
};
- defer cache_file.close();
+ defer cache_file.close(io);
// Under `--time-report`, ignore cache hits; do the work anyway for those juicy numbers.
const ignore_hit = comp.time_report != null;
@@ -238,18 +238,13 @@ pub fn updateFile(
if (builtin.os.tag == .wasi or lock == .exclusive) break true;
// Otherwise, unlock to give someone a chance to get the exclusive lock
// and then upgrade to an exclusive lock.
- cache_file.unlock();
+ cache_file.unlock(io);
lock = .exclusive;
- try cache_file.lock(lock);
+ try cache_file.lock(io, lock);
};
if (need_update) {
- // The cache is definitely stale so delete the contents to avoid an underwrite later.
- cache_file.setEndPos(0) catch |err| switch (err) {
- error.FileTooBig => unreachable, // 0 is not too big
- else => |e| return e,
- };
- try cache_file.seekTo(0);
+ var cache_file_writer: Io.File.Writer = .init(cache_file, io, &.{});
if (stat.size > std.math.maxInt(u32))
return error.FileTooBig;
@@ -278,22 +273,28 @@ pub fn updateFile(
switch (file.getMode()) {
.zig => {
file.zir = try AstGen.generate(gpa, file.tree.?);
- Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) {
+ Zcu.saveZirCache(gpa, &cache_file_writer, stat, file.zir.?) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
- else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {s}", .{
- file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
+ else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {t}", .{
+ file.path.fmt(comp), cache_directory, &hex_digest, err,
}),
};
},
.zon => {
file.zoir = try ZonGen.generate(gpa, file.tree.?, .{});
- Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| {
- log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {s}", .{
- file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
+ Zcu.saveZoirCache(&cache_file_writer, stat, file.zoir.?) catch |err| {
+ log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {t}", .{
+ file.path.fmt(comp), cache_directory, &hex_digest, err,
});
};
},
}
+
+ cache_file_writer.end() catch |err| switch (err) {
+ error.WriteFailed => return cache_file_writer.err.?,
+ else => |e| return e,
+ };
+
if (timer.finish()) |ns_astgen| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -346,8 +347,8 @@ pub fn updateFile(
fn loadZirZoirCache(
zcu: *Zcu,
- cache_file: std.fs.File,
- stat: std.fs.File.Stat,
+ cache_file: Io.File,
+ stat: Io.File.Stat,
file: *Zcu.File,
comptime mode: Ast.Mode,
) !enum { success, invalid, truncated, stale } {
@@ -2466,11 +2467,11 @@ fn updateEmbedFileInner(
var file = f: {
const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs);
- break :f try dir.openFile(sub_path, .{});
+ break :f try dir.openFile(io, sub_path, .{});
};
- defer file.close();
+ defer file.close(io);
- const stat: Cache.File.Stat = .fromFs(try file.stat());
+ const stat: Cache.File.Stat = .fromFs(try file.stat(io));
if (ef.val != .none) {
const old_stat = ef.stat;
@@ -4524,12 +4525,14 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) Ru
.stage2_llvm,
=> {},
},
+ error.Canceled => |e| return e,
}
return error.AlreadyReported;
};
}
fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{
OutOfMemory,
+ Canceled,
CodegenFail,
NoLinkFile,
BackendDoesNotProduceMir,
@@ -4555,12 +4558,16 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
null;
defer if (liveness) |*l| l.deinit(gpa);
- if (build_options.enable_debug_extensions and comp.verbose_air) {
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- stderr.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)}) catch {};
- air.write(stderr, pt, liveness);
- stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {};
+ if (build_options.enable_debug_extensions and comp.verbose_air) p: {
+ const io = comp.io;
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ printVerboseAir(pt, liveness, fqn, air, &stderr.file_writer.interface) catch |err| switch (err) {
+ error.WriteFailed => switch (stderr.file_writer.err.?) {
+ error.Canceled => |e| return e,
+ else => break :p,
+ },
+ };
}
if (std.debug.runtime_safety) verify_liveness: {
@@ -4575,7 +4582,7 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
verify.verify() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
- else => return zcu.codegenFail(nav, "invalid liveness: {s}", .{@errorName(err)}),
+ else => return zcu.codegenFail(nav, "invalid liveness: {t}", .{err}),
};
}
@@ -4611,3 +4618,17 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
=> return zcu.codegenFail(nav, "unable to codegen: {s}", .{@errorName(err)}),
};
}
+
+fn printVerboseAir(
+ pt: Zcu.PerThread,
+ liveness: ?Air.Liveness,
+ fqn: InternPool.NullTerminatedString,
+ air: *const Air,
+ w: *Io.Writer,
+) Io.Writer.Error!void {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ try w.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)});
+ try air.write(w, pt, liveness);
+ try w.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)});
+}
diff --git a/src/codegen/aarch64/Select.zig b/src/codegen/aarch64/Select.zig
index f390d83f03..49de055b47 100644
--- a/src/codegen/aarch64/Select.zig
+++ b/src/codegen/aarch64/Select.zig
@@ -11273,15 +11273,17 @@ fn initValueAdvanced(
return @enumFromInt(isel.values.items.len);
}
pub fn dumpValues(isel: *Select, which: enum { only_referenced, all }) void {
- errdefer |err| @panic(@errorName(err));
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
-
const zcu = isel.pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = ip.getNav(isel.nav_index);
+ errdefer |err| @panic(@errorName(err));
+
+ const locked_stderr = std.debug.lockStderr(&.{});
+ defer std.debug.unlockStderr();
+ const stderr = &locked_stderr.file_writer.interface;
+
var reverse_live_values: std.AutoArrayHashMapUnmanaged(Value.Index, std.ArrayList(Air.Inst.Index)) = .empty;
defer {
for (reverse_live_values.values()) |*list| list.deinit(gpa);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 5dc55b74f6..fca89ea4fc 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1,19 +1,22 @@
-const std = @import("std");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.codegen);
const math = std.math;
const DW = std.dwarf;
-
const Builder = std.zig.llvm.Builder;
+
+const build_options = @import("build_options");
const llvm = if (build_options.have_llvm)
@import("llvm/bindings.zig")
else
@compileError("LLVM unavailable");
+
const link = @import("../link.zig");
const Compilation = @import("../Compilation.zig");
-const build_options = @import("build_options");
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Package = @import("../Package.zig");
@@ -799,6 +802,7 @@ pub const Object = struct {
pub fn emit(o: *Object, pt: Zcu.PerThread, options: EmitOptions) error{ LinkFailure, OutOfMemory }!void {
const zcu = pt.zcu;
const comp = zcu.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
{
@@ -961,10 +965,10 @@ pub const Object = struct {
const context, const module = emit: {
if (options.pre_ir_path) |path| {
if (std.mem.eql(u8, path, "-")) {
- o.builder.dump();
+ o.builder.dump(io);
} else {
- o.builder.printToFilePath(std.fs.cwd(), path) catch |err| {
- log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
+ o.builder.printToFilePath(io, Io.Dir.cwd(), path) catch |err| {
+ log.err("failed printing LLVM module to \"{s}\": {t}", .{ path, err });
};
}
}
@@ -977,26 +981,26 @@ pub const Object = struct {
o.builder.clearAndFree();
if (options.pre_bc_path) |path| {
- var file = std.fs.cwd().createFile(path, .{}) catch |err|
- return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
- defer file.close();
+ var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err|
+ return diags.fail("failed to create '{s}': {t}", .{ path, err });
+ defer file.close(io);
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
- file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
- return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
+ file.writeStreamingAll(io, ptr[0..(bitcode.len * 4)]) catch |err|
+ return diags.fail("failed to write to '{s}': {t}", .{ path, err });
}
if (options.asm_path == null and options.bin_path == null and
options.post_ir_path == null and options.post_bc_path == null) return;
if (options.post_bc_path) |path| {
- var file = std.fs.cwd().createFile(path, .{}) catch |err|
- return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
- defer file.close();
+ var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err|
+ return diags.fail("failed to create '{s}': {t}", .{ path, err });
+ defer file.close(io);
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
- file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
- return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
+ file.writeStreamingAll(io, ptr[0..(bitcode.len * 4)]) catch |err|
+ return diags.fail("failed to write to '{s}': {t}", .{ path, err });
}
if (!build_options.have_llvm or !comp.config.use_lib_llvm) {
@@ -2710,7 +2714,7 @@ pub const Object = struct {
}
fn allocTypeName(o: *Object, pt: Zcu.PerThread, ty: Type) Allocator.Error![:0]const u8 {
- var aw: std.Io.Writer.Allocating = .init(o.gpa);
+ var aw: Io.Writer.Allocating = .init(o.gpa);
defer aw.deinit();
ty.print(&aw.writer, pt, null) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
diff --git a/src/crash_report.zig b/src/crash_report.zig
index d525d4b3b5..e56bc7cec5 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -95,19 +95,20 @@ fn dumpCrashContext() Io.Writer.Error!void {
// TODO: this does mean that a different thread could grab the stderr mutex between the context
// and the actual panic printing, which would be quite confusing.
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
+ const stderr = std.debug.lockStderr(&.{});
+ defer std.debug.unlockStderr();
+ const w = &stderr.file_writer.interface;
- try stderr.writeAll("Compiler crash context:\n");
+ try w.writeAll("Compiler crash context:\n");
if (CodegenFunc.current) |*cg| {
const func_nav = cg.zcu.funcInfo(cg.func_index).owner_nav;
const func_fqn = cg.zcu.intern_pool.getNav(func_nav).fqn;
- try stderr.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)});
+ try w.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)});
} else if (AnalyzeBody.current) |anal| {
- try dumpCrashContextSema(anal, stderr, &S.crash_heap);
+ try dumpCrashContextSema(anal, w, &S.crash_heap);
} else {
- try stderr.writeAll("(no context)\n\n");
+ try w.writeAll("(no context)\n\n");
}
}
fn dumpCrashContextSema(anal: *AnalyzeBody, stderr: *Io.Writer, crash_heap: []u8) Io.Writer.Error!void {
diff --git a/src/fmt.zig b/src/fmt.zig
index 80925200d6..b1903aad53 100644
--- a/src/fmt.zig
+++ b/src/fmt.zig
@@ -37,9 +37,9 @@ const Fmt = struct {
arena: Allocator,
io: Io,
out_buffer: std.Io.Writer.Allocating,
- stdout_writer: *fs.File.Writer,
+ stdout_writer: *Io.File.Writer,
- const SeenMap = std.AutoHashMap(fs.File.INode, void);
+ const SeenMap = std.AutoHashMap(Io.File.INode, void);
};
pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !void {
@@ -59,8 +59,8 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_fmt);
- return process.cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_fmt);
+ return process.cleanExit(io);
} else if (mem.eql(u8, arg, "--color")) {
if (i + 1 >= args.len) {
fatal("expected [auto|on|off] after --color", .{});
@@ -99,9 +99,9 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
fatal("cannot use --stdin with positional arguments", .{});
}
- const stdin: fs.File = .stdin();
+ const stdin: Io.File = .stdin();
var stdio_buffer: [1024]u8 = undefined;
- var file_reader: fs.File.Reader = stdin.reader(io, &stdio_buffer);
+ var file_reader: Io.File.Reader = stdin.reader(io, &stdio_buffer);
const source_code = std.zig.readSourceFileToEndAlloc(gpa, &file_reader) catch |err| {
fatal("unable to read stdin: {}", .{err});
};
@@ -124,7 +124,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
try wip_errors.addZirErrorMessages(zir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(2);
}
} else {
@@ -138,12 +138,12 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(2);
}
}
} else if (tree.errors.len != 0) {
- try std.zig.printAstErrorsToStderr(gpa, tree, "<stdin>", color);
+ std.zig.printAstErrorsToStderr(gpa, io, tree, "<stdin>", color) catch {};
process.exit(2);
}
const formatted = try tree.renderAlloc(gpa);
@@ -154,7 +154,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
process.exit(code);
}
- return fs.File.stdout().writeAll(formatted);
+ return Io.File.stdout().writeStreamingAll(io, formatted);
}
if (input_files.items.len == 0) {
@@ -162,7 +162,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
}
var stdout_buffer: [4096]u8 = undefined;
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var fmt: Fmt = .{
.gpa = gpa,
@@ -182,13 +182,13 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
// Mark any excluded files/directories as already seen,
// so that they are skipped later during actual processing
for (excluded_files.items) |file_path| {
- const stat = fs.cwd().statFile(file_path) catch |err| switch (err) {
+ const stat = Io.Dir.cwd().statFile(io, file_path, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
// On Windows, statFile does not work for directories
error.IsDir => dir: {
- var dir = try fs.cwd().openDir(file_path, .{});
- defer dir.close();
- break :dir try dir.stat();
+ var dir = try Io.Dir.cwd().openDir(io, file_path, .{});
+ defer dir.close(io);
+ break :dir try dir.stat(io);
},
else => |e| return e,
};
@@ -196,7 +196,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
}
for (input_files.items) |file_path| {
- try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path);
+ try fmtPath(&fmt, file_path, check_flag, Io.Dir.cwd(), file_path);
}
try fmt.stdout_writer.interface.flush();
if (fmt.any_error) {
@@ -204,7 +204,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
}
}
-fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) !void {
+fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: Io.Dir, sub_path: []const u8) !void {
fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) {
error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path),
else => {
@@ -219,17 +219,19 @@ fn fmtPathDir(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
- parent_dir: fs.Dir,
+ parent_dir: Io.Dir,
parent_sub_path: []const u8,
) !void {
- var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true });
- defer dir.close();
+ const io = fmt.io;
+
+ var dir = try parent_dir.openDir(io, parent_sub_path, .{ .iterate = true });
+ defer dir.close(io);
- const stat = try dir.stat();
+ const stat = try dir.stat(io);
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
var dir_it = dir.iterate();
- while (try dir_it.next()) |entry| {
+ while (try dir_it.next(io)) |entry| {
const is_dir = entry.kind == .directory;
if (mem.startsWith(u8, entry.name, ".")) continue;
@@ -242,7 +244,7 @@ fn fmtPathDir(
try fmtPathDir(fmt, full_path, check_mode, dir, entry.name);
} else {
fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| {
- std.log.err("unable to format '{s}': {s}", .{ full_path, @errorName(err) });
+ std.log.err("unable to format '{s}': {t}", .{ full_path, err });
fmt.any_error = true;
return;
};
@@ -255,22 +257,22 @@ fn fmtPathFile(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
- dir: fs.Dir,
+ dir: Io.Dir,
sub_path: []const u8,
) !void {
const io = fmt.io;
- const source_file = try dir.openFile(sub_path, .{});
+ const source_file = try dir.openFile(io, sub_path, .{});
var file_closed = false;
- errdefer if (!file_closed) source_file.close();
+ errdefer if (!file_closed) source_file.close(io);
- const stat = try source_file.stat();
+ const stat = try source_file.stat(io);
if (stat.kind == .directory)
return error.IsDir;
var read_buffer: [1024]u8 = undefined;
- var file_reader: fs.File.Reader = source_file.reader(io, &read_buffer);
+ var file_reader: Io.File.Reader = source_file.reader(io, &read_buffer);
file_reader.size = stat.size;
const gpa = fmt.gpa;
@@ -280,7 +282,7 @@ fn fmtPathFile(
};
defer gpa.free(source_code);
- source_file.close();
+ source_file.close(io);
file_closed = true;
// Add to set after no longer possible to get error.IsDir.
@@ -296,7 +298,7 @@ fn fmtPathFile(
defer tree.deinit(gpa);
if (tree.errors.len != 0) {
- try std.zig.printAstErrorsToStderr(gpa, tree, file_path, fmt.color);
+ try std.zig.printAstErrorsToStderr(gpa, io, tree, file_path, fmt.color);
fmt.any_error = true;
return;
}
@@ -317,7 +319,7 @@ fn fmtPathFile(
try wip_errors.addZirErrorMessages(zir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, fmt.color);
+ try error_bundle.renderToStderr(io, .{}, fmt.color);
fmt.any_error = true;
}
},
@@ -332,7 +334,7 @@ fn fmtPathFile(
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, fmt.color);
+ try error_bundle.renderToStderr(io, .{}, fmt.color);
fmt.any_error = true;
}
},
@@ -353,7 +355,7 @@ fn fmtPathFile(
try fmt.stdout_writer.interface.print("{s}\n", .{file_path});
fmt.any_error = true;
} else {
- var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode, .write_buffer = &.{} });
+ var af = try dir.atomicFile(io, sub_path, .{ .permissions = stat.permissions, .write_buffer = &.{} });
defer af.deinit();
try af.file_writer.interface.writeAll(fmt.out_buffer.written());
@@ -368,7 +370,7 @@ pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(gpa);
const arena = arena_instance.allocator();
const args = try process.argsAlloc(arena);
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
return run(gpa, arena, io, args[1..]);
diff --git a/src/introspect.zig b/src/introspect.zig
index 8467b566c6..0a57505aeb 100644
--- a/src/introspect.zig
+++ b/src/introspect.zig
@@ -1,53 +1,55 @@
-const std = @import("std");
const builtin = @import("builtin");
+const build_options = @import("build_options");
+
+const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const mem = std.mem;
-const Allocator = mem.Allocator;
-const os = std.os;
-const fs = std.fs;
+const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
+
const Compilation = @import("Compilation.zig");
const Package = @import("Package.zig");
-const build_options = @import("build_options");
/// Returns the sub_path that worked, or `null` if none did.
/// The path of the returned Directory is relative to `base`.
/// The handle of the returned Directory is open.
-fn testZigInstallPrefix(base_dir: fs.Dir) ?Cache.Directory {
- const test_index_file = "std" ++ fs.path.sep_str ++ "std.zig";
+fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory {
+ const test_index_file = "std" ++ Dir.path.sep_str ++ "std.zig";
zig_dir: {
// Try lib/zig/std/std.zig
- const lib_zig = "lib" ++ fs.path.sep_str ++ "zig";
- var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir;
- const file = test_zig_dir.openFile(test_index_file, .{}) catch {
- test_zig_dir.close();
+ const lib_zig = "lib" ++ Dir.path.sep_str ++ "zig";
+ var test_zig_dir = base_dir.openDir(io, lib_zig, .{}) catch break :zig_dir;
+ const file = test_zig_dir.openFile(io, test_index_file, .{}) catch {
+ test_zig_dir.close(io);
break :zig_dir;
};
- file.close();
+ file.close(io);
return .{ .handle = test_zig_dir, .path = lib_zig };
}
// Try lib/std/std.zig
- var test_zig_dir = base_dir.openDir("lib", .{}) catch return null;
- const file = test_zig_dir.openFile(test_index_file, .{}) catch {
- test_zig_dir.close();
+ var test_zig_dir = base_dir.openDir(io, "lib", .{}) catch return null;
+ const file = test_zig_dir.openFile(io, test_index_file, .{}) catch {
+ test_zig_dir.close(io);
return null;
};
- file.close();
+ file.close(io);
return .{ .handle = test_zig_dir, .path = "lib" };
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
-pub fn findZigLibDir(gpa: Allocator) !Cache.Directory {
+pub fn findZigLibDir(gpa: Allocator, io: Io) !Cache.Directory {
const cwd_path = try getResolvedCwd(gpa);
defer gpa.free(cwd_path);
- const self_exe_path = try fs.selfExePathAlloc(gpa);
+ const self_exe_path = try std.process.executablePathAlloc(io, gpa);
defer gpa.free(self_exe_path);
- return findZigLibDirFromSelfExe(gpa, cwd_path, self_exe_path);
+ return findZigLibDirFromSelfExe(gpa, io, cwd_path, self_exe_path);
}
-/// Like `std.process.getCwdAlloc`, but also resolves the path with `std.fs.path.resolve`. This
+/// Like `std.process.getCwdAlloc`, but also resolves the path with `Dir.path.resolve`. This
/// means the path has no repeated separators, no "." or ".." components, and no trailing separator.
/// On WASI, "" is returned instead of ".".
pub fn getResolvedCwd(gpa: Allocator) error{
@@ -65,27 +67,28 @@ pub fn getResolvedCwd(gpa: Allocator) error{
}
const cwd = try std.process.getCwdAlloc(gpa);
defer gpa.free(cwd);
- const resolved = try fs.path.resolve(gpa, &.{cwd});
- std.debug.assert(fs.path.isAbsolute(resolved));
+ const resolved = try Dir.path.resolve(gpa, &.{cwd});
+ std.debug.assert(Dir.path.isAbsolute(resolved));
return resolved;
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
pub fn findZigLibDirFromSelfExe(
allocator: Allocator,
+ io: Io,
/// The return value of `getResolvedCwd`.
/// Passed as an argument to avoid pointlessly repeating the call.
cwd_path: []const u8,
self_exe_path: []const u8,
) error{ OutOfMemory, FileNotFound }!Cache.Directory {
- const cwd = fs.cwd();
+ const cwd = Io.Dir.cwd();
var cur_path: []const u8 = self_exe_path;
- while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
- var base_dir = cwd.openDir(dirname, .{}) catch continue;
- defer base_dir.close();
+ while (Dir.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
+ var base_dir = cwd.openDir(io, dirname, .{}) catch continue;
+ defer base_dir.close(io);
- const sub_directory = testZigInstallPrefix(base_dir) orelse continue;
- const p = try fs.path.join(allocator, &.{ dirname, sub_directory.path.? });
+ const sub_directory = testZigInstallPrefix(io, base_dir) orelse continue;
+ const p = try Dir.path.join(allocator, &.{ dirname, sub_directory.path.? });
defer allocator.free(p);
const resolved = try resolvePath(allocator, cwd_path, &.{p});
@@ -109,18 +112,18 @@ pub fn resolveGlobalCacheDir(allocator: Allocator) ![]u8 {
if (builtin.os.tag != .windows) {
if (std.zig.EnvVar.XDG_CACHE_HOME.getPosix()) |cache_root| {
if (cache_root.len > 0) {
- return fs.path.join(allocator, &.{ cache_root, appname });
+ return Dir.path.join(allocator, &.{ cache_root, appname });
}
}
if (std.zig.EnvVar.HOME.getPosix()) |home| {
- return fs.path.join(allocator, &.{ home, ".cache", appname });
+ return Dir.path.join(allocator, &.{ home, ".cache", appname });
}
}
- return fs.getAppDataDir(allocator, appname);
+ return std.fs.getAppDataDir(allocator, appname);
}
-/// Similar to `fs.path.resolve`, but converts to a cwd-relative path, or, if that would
+/// Similar to `Dir.path.resolve`, but converts to a cwd-relative path, or, if that would
/// start with a relative up-dir (".."), an absolute path based on the cwd. Also, the cwd
/// returns the empty string ("") instead of ".".
pub fn resolvePath(
@@ -132,7 +135,7 @@ pub fn resolvePath(
) Allocator.Error![]u8 {
if (builtin.target.os.tag == .wasi) {
std.debug.assert(mem.eql(u8, cwd_resolved, ""));
- const res = try fs.path.resolve(gpa, paths);
+ const res = try Dir.path.resolve(gpa, paths);
if (mem.eql(u8, res, ".")) {
gpa.free(res);
return "";
@@ -142,16 +145,16 @@ pub fn resolvePath(
// Heuristic for a fast path: if no component is absolute and ".." never appears, we just need to resolve `paths`.
for (paths) |p| {
- if (fs.path.isAbsolute(p)) break; // absolute path
+ if (Dir.path.isAbsolute(p)) break; // absolute path
if (mem.indexOf(u8, p, "..") != null) break; // may contain up-dir
} else {
// no absolute path, no "..".
- const res = try fs.path.resolve(gpa, paths);
+ const res = try Dir.path.resolve(gpa, paths);
if (mem.eql(u8, res, ".")) {
gpa.free(res);
return "";
}
- std.debug.assert(!fs.path.isAbsolute(res));
+ std.debug.assert(!Dir.path.isAbsolute(res));
std.debug.assert(!isUpDir(res));
return res;
}
@@ -160,19 +163,19 @@ pub fn resolvePath(
// Optimization: `paths` often has just one element.
const path_resolved = switch (paths.len) {
0 => unreachable,
- 1 => try fs.path.resolve(gpa, &.{ cwd_resolved, paths[0] }),
+ 1 => try Dir.path.resolve(gpa, &.{ cwd_resolved, paths[0] }),
else => r: {
const all_paths = try gpa.alloc([]const u8, paths.len + 1);
defer gpa.free(all_paths);
all_paths[0] = cwd_resolved;
@memcpy(all_paths[1..], paths);
- break :r try fs.path.resolve(gpa, all_paths);
+ break :r try Dir.path.resolve(gpa, all_paths);
},
};
errdefer gpa.free(path_resolved);
- std.debug.assert(fs.path.isAbsolute(path_resolved));
- std.debug.assert(fs.path.isAbsolute(cwd_resolved));
+ std.debug.assert(Dir.path.isAbsolute(path_resolved));
+ std.debug.assert(Dir.path.isAbsolute(cwd_resolved));
if (!std.mem.startsWith(u8, path_resolved, cwd_resolved)) return path_resolved; // not in cwd
if (path_resolved.len == cwd_resolved.len) {
@@ -180,7 +183,7 @@ pub fn resolvePath(
gpa.free(path_resolved);
return "";
}
- if (path_resolved[cwd_resolved.len] != std.fs.path.sep) return path_resolved; // not in cwd (last component differs)
+ if (path_resolved[cwd_resolved.len] != Dir.path.sep) return path_resolved; // not in cwd (last component differs)
// in cwd; extract sub path
const sub_path = try gpa.dupe(u8, path_resolved[cwd_resolved.len + 1 ..]);
@@ -188,9 +191,8 @@ pub fn resolvePath(
return sub_path;
}
-/// TODO move this to std.fs.path
pub fn isUpDir(p: []const u8) bool {
- return mem.startsWith(u8, p, "..") and (p.len == 2 or p[2] == fs.path.sep);
+ return mem.startsWith(u8, p, "..") and (p.len == 2 or p[2] == Dir.path.sep);
}
pub const default_local_zig_cache_basename = ".zig-cache";
@@ -198,15 +200,15 @@ pub const default_local_zig_cache_basename = ".zig-cache";
/// Searches upwards from `cwd` for a directory containing a `build.zig` file.
/// If such a directory is found, returns the path to it joined to the `.zig_cache` name.
/// Otherwise, returns `null`, indicating no suitable local cache location.
-pub fn resolveSuitableLocalCacheDir(arena: Allocator, cwd: []const u8) Allocator.Error!?[]u8 {
+pub fn resolveSuitableLocalCacheDir(arena: Allocator, io: Io, cwd: []const u8) Allocator.Error!?[]u8 {
var cur_dir = cwd;
while (true) {
- const joined = try fs.path.join(arena, &.{ cur_dir, Package.build_zig_basename });
- if (fs.cwd().access(joined, .{})) |_| {
- return try fs.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename });
+ const joined = try Dir.path.join(arena, &.{ cur_dir, Package.build_zig_basename });
+ if (Io.Dir.cwd().access(io, joined, .{})) |_| {
+ return try Dir.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename });
} else |err| switch (err) {
error.FileNotFound => {
- cur_dir = fs.path.dirname(cur_dir) orelse return null;
+ cur_dir = Dir.path.dirname(cur_dir) orelse return null;
continue;
},
else => return null,
diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig
index afeb5b3282..ba85f45830 100644
--- a/src/libs/freebsd.zig
+++ b/src/libs/freebsd.zig
@@ -1,9 +1,9 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const log = std.log;
-const fs = std.fs;
-const path = fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
const Path = std.Build.Cache.Path;
@@ -401,8 +401,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
- pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
@@ -444,12 +444,12 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -468,7 +468,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -477,10 +477,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
var o_directory: Cache.Directory = .{
- .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
@@ -520,7 +520,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
try map_contents.print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
}
- try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items });
map_contents.deinit();
}
@@ -974,7 +974,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "stdthreads", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
- try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items });
try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
@@ -986,7 +986,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -1014,7 +1014,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C
const so_path: Path = .{
.root_dir = so_files.dir_path.root_dir,
.sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
- so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.getSoVersion(&target.os),
+ so_files.dir_path.sub_path, path.sep, lib.name, lib.getSoVersion(&target.os),
}) catch return comp.setAllocFailure(),
};
task_buffer[task_buffer_i] = .{ .load_dso = so_path };
diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig
index 64d0fdbeac..e9b6ce1882 100644
--- a/src/libs/glibc.zig
+++ b/src/libs/glibc.zig
@@ -1,9 +1,9 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const log = std.log;
-const fs = std.fs;
-const path = fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
const Path = std.Build.Cache.Path;
@@ -640,8 +640,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
- pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
@@ -679,12 +679,12 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -703,7 +703,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -712,10 +712,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
var o_directory: Cache.Directory = .{
- .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
@@ -759,7 +759,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try map_contents.print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
}
}
- try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items });
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
}
@@ -775,7 +775,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try stubs_asm.appendSlice(".text\n");
var sym_i: usize = 0;
- var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
+ var sym_name_buf: Io.Writer.Allocating = .init(arena);
var opt_symbol_name: ?[]const u8 = null;
var versions_buffer: [32]u8 = undefined;
var versions_len: usize = undefined;
@@ -796,7 +796,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// twice, which causes a "duplicate symbol" assembler error.
var versions_written = std.AutoArrayHashMap(Version, void).init(arena);
- var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
+ var inc_reader: Io.Reader = .fixed(metadata.inclusions);
const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
@@ -1118,7 +1118,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
- try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items });
try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
@@ -1130,7 +1130,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -1156,7 +1156,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C
const so_path: Path = .{
.root_dir = so_files.dir_path.root_dir,
.sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
- so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover,
+ so_files.dir_path.sub_path, path.sep, lib.name, lib.sover,
}) catch return comp.setAllocFailure(),
};
task_buffer[task_buffer_i] = .{ .load_dso = so_path };
diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig
index b3c018996a..03ed917c4f 100644
--- a/src/libs/mingw.zig
+++ b/src/libs/mingw.zig
@@ -1,7 +1,8 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
-const path = std.fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const log = std.log.scoped(.mingw);
@@ -241,7 +242,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const def_file_path = findDef(arena, comp.getTarget(), comp.dirs.zig_lib, lib_name) catch |err| switch (err) {
+ const def_file_path = findDef(arena, io, comp.getTarget(), comp.dirs.zig_lib, lib_name) catch |err| switch (err) {
error.FileNotFound => {
log.debug("no {s}.def file available to make a DLL import {s}.lib", .{ lib_name, lib_name });
// In this case we will end up putting foo.lib onto the linker line and letting the linker
@@ -257,12 +258,12 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
cache.hash.addBytes(build_options.version);
cache.hash.addOptionalBytes(comp.dirs.zig_lib.path);
@@ -296,26 +297,32 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const aro = @import("aro");
var diagnostics: aro.Diagnostics = .{
.output = .{ .to_list = .{ .arena = .init(gpa) } },
};
defer diagnostics.deinit();
- var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, std.fs.cwd());
+ var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, Io.Dir.cwd());
defer aro_comp.deinit();
aro_comp.target = .fromZigTarget(target.*);
const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" });
- if (comp.verbose_cc) print: {
- var stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- nosuspend stderr.print("def file: {s}\n", .{def_file_path}) catch break :print;
- nosuspend stderr.print("include dir: {s}\n", .{include_dir}) catch break :print;
+ if (comp.verbose_cc) {
+ var buffer: [256]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ w.print("def file: {s}\n", .{def_file_path}) catch |err| switch (err) {
+ error.WriteFailed => return stderr.file_writer.err.?,
+ };
+ w.print("include dir: {s}\n", .{include_dir}) catch |err| switch (err) {
+ error.WriteFailed => return stderr.file_writer.err.?,
+ };
}
try aro_comp.search_path.append(gpa, .{ .path = include_dir, .kind = .normal });
@@ -332,18 +339,21 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
if (aro_comp.diagnostics.output.to_list.messages.items.len != 0) {
var buffer: [64]u8 = undefined;
- const w, const ttyconf = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
for (aro_comp.diagnostics.output.to_list.messages.items) |msg| {
if (msg.kind == .@"fatal error" or msg.kind == .@"error") {
- msg.write(w, ttyconf, true) catch {};
+ msg.write(stderr.terminal(), true) catch |err| switch (err) {
+ error.WriteFailed => return stderr.file_writer.err.?,
+ error.Unexpected => |e| return e,
+ };
return error.AroPreprocessorFailed;
}
}
}
const members = members: {
- var aw: std.Io.Writer.Allocating = .init(gpa);
+ var aw: Io.Writer.Allocating = .init(gpa);
errdefer aw.deinit();
try pp.prettyPrintTokens(&aw.writer, .result_only);
@@ -356,8 +366,9 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
error.OutOfMemory => |e| return e,
error.ParseError => {
var buffer: [64]u8 = undefined;
- const w, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
try w.writeAll("error: ");
try def_diagnostics.writeMsg(w, input);
try w.writeByte('\n');
@@ -376,10 +387,10 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
errdefer gpa.free(lib_final_path);
{
- const lib_final_file = try o_dir.createFile(final_lib_basename, .{ .truncate = true });
- defer lib_final_file.close();
+ const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true });
+ defer lib_final_file.close(io);
var buffer: [1024]u8 = undefined;
- var file_writer = lib_final_file.writer(&buffer);
+ var file_writer = lib_final_file.writer(io, &buffer);
try implib.writeCoffArchive(gpa, &file_writer.interface, members);
try file_writer.interface.flush();
}
@@ -401,11 +412,12 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
pub fn libExists(
allocator: Allocator,
+ io: Io,
target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
) !bool {
- const s = findDef(allocator, target, zig_lib_directory, lib_name) catch |err| switch (err) {
+ const s = findDef(allocator, io, target, zig_lib_directory, lib_name) catch |err| switch (err) {
error.FileNotFound => return false,
else => |e| return e,
};
@@ -417,6 +429,7 @@ pub fn libExists(
/// see if a .def file exists.
fn findDef(
allocator: Allocator,
+ io: Io,
target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
@@ -442,7 +455,7 @@ fn findDef(
} else {
try override_path.print(fmt_path, .{ lib_path, lib_name });
}
- if (std.fs.cwd().access(override_path.items, .{})) |_| {
+ if (Io.Dir.cwd().access(io, override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
@@ -459,7 +472,7 @@ fn findDef(
} else {
try override_path.print(fmt_path, .{lib_name});
}
- if (std.fs.cwd().access(override_path.items, .{})) |_| {
+ if (Io.Dir.cwd().access(io, override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
@@ -476,7 +489,7 @@ fn findDef(
} else {
try override_path.print(fmt_path, .{lib_name});
}
- if (std.fs.cwd().access(override_path.items, .{})) |_| {
+ if (Io.Dir.cwd().access(io, override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
diff --git a/src/libs/mingw/def.zig b/src/libs/mingw/def.zig
index 24dc95c13c..f1c112d16e 100644
--- a/src/libs/mingw/def.zig
+++ b/src/libs/mingw/def.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
pub const ModuleDefinitionType = enum {
mingw,
@@ -663,7 +664,9 @@ test parse {
\\
;
- try testParse(.AMD64, source, "foo.dll", &[_]ModuleDefinition.Export{
+ const io = std.testing.io;
+
+ try testParse(io, .AMD64, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "foo",
.mangled_symbol_name = null,
@@ -743,7 +746,7 @@ test parse {
},
});
- try testParse(.I386, source, "foo.dll", &[_]ModuleDefinition.Export{
+ try testParse(io, .I386, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "_foo",
.mangled_symbol_name = null,
@@ -823,7 +826,7 @@ test parse {
},
});
- try testParse(.ARMNT, source, "foo.dll", &[_]ModuleDefinition.Export{
+ try testParse(io, .ARMNT, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "foo",
.mangled_symbol_name = null,
@@ -903,7 +906,7 @@ test parse {
},
});
- try testParse(.ARM64, source, "foo.dll", &[_]ModuleDefinition.Export{
+ try testParse(io, .ARM64, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "foo",
.mangled_symbol_name = null,
@@ -997,7 +1000,9 @@ test "ntdll" {
\\RtlActivateActivationContextUnsafeFast@0
;
- try testParse(.AMD64, source, "ntdll.dll", &[_]ModuleDefinition.Export{
+ const io = std.testing.io;
+
+ try testParse(io, .AMD64, source, "ntdll.dll", &[_]ModuleDefinition.Export{
.{
.name = "RtlDispatchAPC@12",
.mangled_symbol_name = null,
@@ -1023,15 +1028,22 @@ test "ntdll" {
});
}
-fn testParse(machine_type: std.coff.IMAGE.FILE.MACHINE, source: [:0]const u8, expected_module_name: []const u8, expected_exports: []const ModuleDefinition.Export) !void {
+fn testParse(
+ io: Io,
+ machine_type: std.coff.IMAGE.FILE.MACHINE,
+ source: [:0]const u8,
+ expected_module_name: []const u8,
+ expected_exports: []const ModuleDefinition.Export,
+) !void {
var diagnostics: Diagnostics = undefined;
const module = parse(std.testing.allocator, source, machine_type, .mingw, &diagnostics) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.ParseError => {
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- try diagnostics.writeMsg(stderr, source);
- try stderr.writeByte('\n');
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ try diagnostics.writeMsg(w, source);
+ try w.writeByte('\n');
return err;
},
};
diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig
index 8d35e3bd71..9e4213d237 100644
--- a/src/libs/netbsd.zig
+++ b/src/libs/netbsd.zig
@@ -1,9 +1,9 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const log = std.log;
-const fs = std.fs;
-const path = fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
const Path = std.Build.Cache.Path;
@@ -346,8 +346,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
- pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
@@ -385,12 +385,12 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -409,7 +409,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -418,10 +418,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
var o_directory: Cache.Directory = .{
- .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
@@ -628,7 +628,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
- try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items });
try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
@@ -640,7 +640,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -661,7 +661,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C
const so_path: Path = .{
.root_dir = so_files.dir_path.root_dir,
.sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
- so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover,
+ so_files.dir_path.sub_path, path.sep, lib.name, lib.sover,
}) catch return comp.setAllocFailure(),
};
task_buffer[task_buffer_i] = .{ .load_dso = so_path };
diff --git a/src/link.zig b/src/link.zig
index 6ac96504c7..13306b90a4 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -393,7 +393,7 @@ pub const File = struct {
comp: *Compilation,
emit: Path,
- file: ?fs.File,
+ file: ?Io.File,
/// When using the LLVM backend, the emitted object is written to a file with this name. This
/// object file then becomes a normal link input to LLD or a self-hosted linker.
///
@@ -620,16 +620,16 @@ pub const File = struct {
emit.sub_path, std.crypto.random.int(u32),
});
defer gpa.free(tmp_sub_path);
- try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, .{});
- try emit.root_dir.handle.rename(tmp_sub_path, emit.sub_path);
+ try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, io, .{});
+ try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io);
switch (builtin.os.tag) {
.linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| {
- log.warn("ptrace failure: {s}", .{@errorName(err)});
+ log.warn("ptrace failure: {t}", .{err});
},
.maccatalyst, .macos => {
const macho_file = base.cast(.macho).?;
macho_file.ptraceAttach(pid) catch |err| {
- log.warn("attaching failed with error: {s}", .{@errorName(err)});
+ log.warn("attaching failed with error: {t}", .{err});
};
},
.windows => unreachable,
@@ -637,7 +637,7 @@ pub const File = struct {
}
}
}
- base.file = try emit.root_dir.handle.openFile(emit.sub_path, .{ .mode = .read_write });
+ base.file = try emit.root_dir.handle.openFile(io, emit.sub_path, .{ .mode = .read_write });
},
.elf2, .coff2 => if (base.file == null) {
const mf = if (base.cast(.elf2)) |elf|
@@ -646,10 +646,10 @@ pub const File = struct {
&coff.mf
else
unreachable;
- mf.file = try base.emit.root_dir.handle.adaptToNewApi().openFile(io, base.emit.sub_path, .{
+ mf.file = try base.emit.root_dir.handle.openFile(io, base.emit.sub_path, .{
.mode = .read_write,
});
- base.file = .adaptFromNewApi(mf.file);
+ base.file = mf.file;
try mf.ensureTotalCapacity(@intCast(mf.nodes.items[0].location().resolve(mf)[1]));
},
.c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
@@ -687,7 +687,7 @@ pub const File = struct {
.lld => assert(base.file == null),
.elf => if (base.file) |f| {
dev.check(.elf_linker);
- f.close();
+ f.close(io);
base.file = null;
if (base.child_pid) |pid| {
@@ -701,7 +701,7 @@ pub const File = struct {
},
.macho, .wasm => if (base.file) |f| {
dev.checkAny(&.{ .coff_linker, .macho_linker, .plan9_linker, .wasm_linker });
- f.close();
+ f.close(io);
base.file = null;
if (base.child_pid) |pid| {
@@ -852,10 +852,12 @@ pub const File = struct {
}
}
- pub fn releaseLock(self: *File) void {
- if (self.lock) |*lock| {
- lock.release();
- self.lock = null;
+ pub fn releaseLock(base: *File) void {
+ const comp = base.comp;
+ const io = comp.io;
+ if (base.lock) |*lock| {
+ lock.release(io);
+ base.lock = null;
}
}
@@ -866,8 +868,9 @@ pub const File = struct {
}
pub fn destroy(base: *File) void {
+ const io = base.comp.io;
base.releaseLock();
- if (base.file) |f| f.close();
+ if (base.file) |f| f.close(io);
switch (base.tag) {
.plan9 => unreachable,
inline else => |tag| {
@@ -897,16 +900,16 @@ pub const File = struct {
}
}
- pub const FlushError = error{
+ pub const FlushError = Io.Cancelable || Allocator.Error || error{
/// Indicates an error will be present in `Compilation.link_diags`.
LinkFailure,
- OutOfMemory,
};
/// Commit pending changes and write headers. Takes into account final output mode.
/// `arena` has the lifetime of the call to `Compilation.update`.
pub fn flush(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
const comp = base.comp;
+ const io = comp.io;
if (comp.clang_preprocessor_mode == .yes or comp.clang_preprocessor_mode == .pch) {
dev.check(.clang_command);
const emit = base.emit;
@@ -917,12 +920,19 @@ pub const File = struct {
assert(comp.c_object_table.count() == 1);
const the_key = comp.c_object_table.keys()[0];
const cached_pp_file_path = the_key.status.success.object_path;
- cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}) catch |err| {
+ Io.Dir.copyFile(
+ cached_pp_file_path.root_dir.handle,
+ cached_pp_file_path.sub_path,
+ emit.root_dir.handle,
+ emit.sub_path,
+ io,
+ .{},
+ ) catch |err| {
const diags = &base.comp.link_diags;
- return diags.fail("failed to copy '{f}' to '{f}': {s}", .{
+ return diags.fail("failed to copy '{f}' to '{f}': {t}", .{
std.fmt.alt(@as(Path, cached_pp_file_path), .formatEscapeChar),
std.fmt.alt(@as(Path, emit), .formatEscapeChar),
- @errorName(err),
+ err,
});
};
return;
@@ -1060,9 +1070,10 @@ pub const File = struct {
/// Opens a path as an object file and parses it into the linker.
fn openLoadObject(base: *File, path: Path) anyerror!void {
if (base.tag == .lld) return;
+ const io = base.comp.io;
const diags = &base.comp.link_diags;
- const input = try openObjectInput(diags, path);
- errdefer input.object.file.close();
+ const input = try openObjectInput(io, diags, path);
+ errdefer input.object.file.close(io);
try loadInput(base, input);
}
@@ -1070,21 +1081,22 @@ pub const File = struct {
/// If `query` is non-null, allows GNU ld scripts.
fn openLoadArchive(base: *File, path: Path, opt_query: ?UnresolvedInput.Query) anyerror!void {
if (base.tag == .lld) return;
+ const io = base.comp.io;
if (opt_query) |query| {
- const archive = try openObject(path, query.must_link, query.hidden);
- errdefer archive.file.close();
+ const archive = try openObject(io, path, query.must_link, query.hidden);
+ errdefer archive.file.close(io);
loadInput(base, .{ .archive = archive }) catch |err| switch (err) {
error.BadMagic, error.UnexpectedEndOfFile => {
if (base.tag != .elf and base.tag != .elf2) return err;
try loadGnuLdScript(base, path, query, archive.file);
- archive.file.close();
+ archive.file.close(io);
return;
},
else => return err,
};
} else {
- const archive = try openObject(path, false, false);
- errdefer archive.file.close();
+ const archive = try openObject(io, path, false, false);
+ errdefer archive.file.close(io);
try loadInput(base, .{ .archive = archive });
}
}
@@ -1093,29 +1105,30 @@ pub const File = struct {
/// Handles GNU ld scripts.
fn openLoadDso(base: *File, path: Path, query: UnresolvedInput.Query) anyerror!void {
if (base.tag == .lld) return;
- const dso = try openDso(path, query.needed, query.weak, query.reexport);
- errdefer dso.file.close();
+ const io = base.comp.io;
+ const dso = try openDso(io, path, query.needed, query.weak, query.reexport);
+ errdefer dso.file.close(io);
loadInput(base, .{ .dso = dso }) catch |err| switch (err) {
error.BadMagic, error.UnexpectedEndOfFile => {
if (base.tag != .elf and base.tag != .elf2) return err;
try loadGnuLdScript(base, path, query, dso.file);
- dso.file.close();
+ dso.file.close(io);
return;
},
else => return err,
};
}
- fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: fs.File) anyerror!void {
+ fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: Io.File) anyerror!void {
const comp = base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
const gpa = comp.gpa;
- const io = comp.io;
- const stat = try file.stat();
+ const stat = try file.stat(io);
const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig;
const buf = try gpa.alloc(u8, size);
defer gpa.free(buf);
- const n = try file.preadAll(buf, 0);
+ const n = try file.readPositionalAll(io, buf, 0);
if (buf.len != n) return error.UnexpectedEndOfFile;
var ld_script = try LdScript.parse(gpa, diags, path, buf);
defer ld_script.deinit(gpa);
@@ -1180,6 +1193,32 @@ pub const File = struct {
}
}
+ /// Legacy function for old linker code
+ pub fn copyRangeAll(base: *File, old_offset: u64, new_offset: u64, size: u64) !void {
+ const comp = base.comp;
+ const io = comp.io;
+ const file = base.file.?;
+ return copyRangeAll2(io, file, file, old_offset, new_offset, size);
+ }
+
+ /// Legacy function for old linker code
+ pub fn copyRangeAll2(io: Io, src_file: Io.File, dst_file: Io.File, old_offset: u64, new_offset: u64, size: u64) !void {
+ var write_buffer: [2048]u8 = undefined;
+ var file_reader = src_file.reader(io, &.{});
+ file_reader.pos = old_offset;
+ var file_writer = dst_file.writer(io, &write_buffer);
+ file_writer.pos = new_offset;
+ const size_u = std.math.cast(usize, size) orelse return error.Overflow;
+ const n = file_writer.interface.sendFileAll(&file_reader, .limited(size_u)) catch |err| switch (err) {
+ error.ReadFailed => return file_reader.err.?,
+ error.WriteFailed => return file_writer.err.?,
+ };
+ assert(n == size_u);
+ file_writer.interface.flush() catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ };
+ }
+
pub const Tag = enum {
coff2,
elf,
@@ -1231,22 +1270,26 @@ pub const File = struct {
ty: InternPool.Index,
};
- pub fn determineMode(
+ pub fn determinePermissions(
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
- ) fs.File.Mode {
+ ) Io.File.Permissions {
// On common systems with a 0o022 umask, 0o777 will still result in a file created
// with 0o755 permissions, but it works appropriately if the system is configured
// more leniently. As another data point, C's fopen seems to open files with the
// 666 mode.
- const executable_mode = if (builtin.target.os.tag == .windows) 0 else 0o777;
+ const executable_mode: Io.File.Permissions = if (builtin.target.os.tag == .windows)
+ .default_file
+ else
+ .fromMode(0o777);
+
switch (output_mode) {
.Lib => return switch (link_mode) {
.dynamic => executable_mode,
- .static => fs.File.default_mode,
+ .static => .default_file,
},
.Exe => return executable_mode,
- .Obj => return fs.File.default_mode,
+ .Obj => return .default_file,
}
}
@@ -1656,19 +1699,19 @@ pub const Input = union(enum) {
pub const Object = struct {
path: Path,
- file: fs.File,
+ file: Io.File,
must_link: bool,
hidden: bool,
};
pub const Res = struct {
path: Path,
- file: fs.File,
+ file: Io.File,
};
pub const Dso = struct {
path: Path,
- file: fs.File,
+ file: Io.File,
needed: bool,
weak: bool,
reexport: bool,
@@ -1690,7 +1733,7 @@ pub const Input = union(enum) {
}
/// Returns `null` in the case of `dso_exact`.
- pub fn pathAndFile(input: Input) ?struct { Path, fs.File } {
+ pub fn pathAndFile(input: Input) ?struct { Path, Io.File } {
return switch (input) {
.object, .archive => |obj| .{ obj.path, obj.file },
inline .res, .dso => |x| .{ x.path, x.file },
@@ -1735,6 +1778,7 @@ pub fn hashInputs(man: *Cache.Manifest, link_inputs: []const Input) !void {
pub fn resolveInputs(
gpa: Allocator,
arena: Allocator,
+ io: Io,
target: *const std.Target,
/// This function mutates this array but does not take ownership.
/// Allocated with `gpa`.
@@ -1784,6 +1828,7 @@ pub fn resolveInputs(
for (lib_directories) |lib_directory| switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1810,6 +1855,7 @@ pub fn resolveInputs(
for (lib_directories) |lib_directory| switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1837,6 +1883,7 @@ pub fn resolveInputs(
switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1855,6 +1902,7 @@ pub fn resolveInputs(
switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1886,6 +1934,7 @@ pub fn resolveInputs(
if (try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1903,6 +1952,7 @@ pub fn resolveInputs(
switch ((try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1930,6 +1980,7 @@ pub fn resolveInputs(
if (try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1969,6 +2020,7 @@ const fatal = std.process.fatal;
fn resolveLibInput(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated via `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated via `gpa`.
@@ -1994,11 +2046,11 @@ fn resolveLibInput(
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :tbd,
else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2013,7 +2065,7 @@ fn resolveLibInput(
}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
+ switch (try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
.path = test_path,
.query = name_query.query,
}, link_mode, color)) {
@@ -2030,13 +2082,13 @@ fn resolveLibInput(
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :so,
else => |e| fatal("unable to search for so library '{f}': {s}", .{
test_path, @errorName(e),
}),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2048,11 +2100,11 @@ fn resolveLibInput(
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :mingw,
else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2062,7 +2114,7 @@ fn resolveLibInput(
fn finishResolveLibInput(
resolved_inputs: *std.ArrayList(Input),
path: Path,
- file: std.fs.File,
+ file: Io.File,
link_mode: std.builtin.LinkMode,
query: UnresolvedInput.Query,
) ResolveLibInputResult {
@@ -2087,6 +2139,7 @@ fn finishResolveLibInput(
fn resolvePathInput(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated with `gpa`.
@@ -2098,12 +2151,12 @@ fn resolvePathInput(
color: std.zig.Color,
) Allocator.Error!?ResolveLibInputResult {
switch (Compilation.classifyFileExt(pq.path.sub_path)) {
- .static_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color),
- .shared_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
+ .static_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color),
+ .shared_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
.object => {
- var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
+ var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err|
fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) });
- errdefer file.close();
+ errdefer file.close(io);
try resolved_inputs.append(gpa, .{ .object = .{
.path = pq.path,
.file = file,
@@ -2113,9 +2166,9 @@ fn resolvePathInput(
return null;
},
.res => {
- var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
+ var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err|
fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) });
- errdefer file.close();
+ errdefer file.close(io);
try resolved_inputs.append(gpa, .{ .res = .{
.path = pq.path,
.file = file,
@@ -2129,6 +2182,7 @@ fn resolvePathInput(
fn resolvePathInputLib(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated with `gpa`.
@@ -2149,30 +2203,29 @@ fn resolvePathInputLib(
.static_library, .shared_library => true,
else => false,
}) {
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => return .no_match,
- else => |e| fatal("unable to search for {s} library '{f}': {s}", .{
- @tagName(link_mode), std.fmt.alt(test_path, .formatEscapeChar), @errorName(e),
+ else => |e| fatal("unable to search for {t} library '{f}': {t}", .{
+ link_mode, std.fmt.alt(test_path, .formatEscapeChar), e,
}),
};
- errdefer file.close();
+ errdefer file.close(io);
try ld_script_bytes.resize(gpa, @max(std.elf.MAGIC.len, std.elf.ARMAG.len));
- const n = file.preadAll(ld_script_bytes.items, 0) catch |err| fatal("failed to read '{f}': {s}", .{
- std.fmt.alt(test_path, .formatEscapeChar), @errorName(err),
- });
+ const n = file.readPositionalAll(io, ld_script_bytes.items, 0) catch |err|
+ fatal("failed to read '{f}': {t}", .{ std.fmt.alt(test_path, .formatEscapeChar), err });
const buf = ld_script_bytes.items[0..n];
if (mem.startsWith(u8, buf, std.elf.MAGIC) or mem.startsWith(u8, buf, std.elf.ARMAG)) {
// Appears to be an ELF or archive file.
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
}
- const stat = file.stat() catch |err|
- fatal("failed to stat {f}: {s}", .{ test_path, @errorName(err) });
+ const stat = file.stat(io) catch |err|
+ fatal("failed to stat {f}: {t}", .{ test_path, err });
const size = std.math.cast(u32, stat.size) orelse
fatal("{f}: linker script too big", .{test_path});
try ld_script_bytes.resize(gpa, size);
const buf2 = ld_script_bytes.items[n..];
- const n2 = file.preadAll(buf2, n) catch |err|
- fatal("failed to read {f}: {s}", .{ test_path, @errorName(err) });
+ const n2 = file.readPositionalAll(io, buf2, n) catch |err|
+ fatal("failed to read {f}: {t}", .{ test_path, err });
if (n2 != buf2.len) fatal("failed to read {f}: unexpected end of file", .{test_path});
// This `Io` is only used for a mutex, and we know we aren't doing anything async/concurrent.
@@ -2192,13 +2245,12 @@ fn resolvePathInputLib(
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, color);
-
+ error_bundle.renderToStderr(io, .{}, color) catch {};
std.process.exit(1);
}
var ld_script = ld_script_result catch |err|
- fatal("{f}: failed to parse linker script: {s}", .{ test_path, @errorName(err) });
+ fatal("{f}: failed to parse linker script: {t}", .{ test_path, err });
defer ld_script.deinit(gpa);
try unresolved_inputs.ensureUnusedCapacity(gpa, ld_script.args.len);
@@ -2223,23 +2275,23 @@ fn resolvePathInputLib(
} });
}
}
- file.close();
+ file.close(io);
return .ok;
}
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => return .no_match,
else => |e| fatal("unable to search for {s} library {f}: {s}", .{
@tagName(link_mode), test_path, @errorName(e),
}),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
}
-pub fn openObject(path: Path, must_link: bool, hidden: bool) !Input.Object {
- var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- errdefer file.close();
+pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Object {
+ var file = try path.root_dir.handle.openFile(io, path.sub_path, .{});
+ errdefer file.close(io);
return .{
.path = path,
.file = file,
@@ -2248,9 +2300,9 @@ pub fn openObject(path: Path, must_link: bool, hidden: bool) !Input.Object {
};
}
-pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso {
- var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- errdefer file.close();
+pub fn openDso(io: Io, path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso {
+ var file = try path.root_dir.handle.openFile(io, path.sub_path, .{});
+ errdefer file.close(io);
return .{
.path = path,
.file = file,
@@ -2260,20 +2312,20 @@ pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso
};
}
-pub fn openObjectInput(diags: *Diags, path: Path) error{LinkFailure}!Input {
- return .{ .object = openObject(path, false, false) catch |err| {
+pub fn openObjectInput(io: Io, diags: *Diags, path: Path) error{LinkFailure}!Input {
+ return .{ .object = openObject(io, path, false, false) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
-pub fn openArchiveInput(diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
- return .{ .archive = openObject(path, must_link, hidden) catch |err| {
+pub fn openArchiveInput(io: Io, diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
+ return .{ .archive = openObject(io, path, must_link, hidden) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
-pub fn openDsoInput(diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
- return .{ .dso = openDso(path, needed, weak, reexport) catch |err| {
+pub fn openDsoInput(io: Io, diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
+ return .{ .dso = openDso(io, path, needed, weak, reexport) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
diff --git a/src/link/C.zig b/src/link/C.zig
index ce48e85851..93e771ebfc 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -124,6 +124,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*C {
+ const io = comp.io;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .c);
const optimize_mode = comp.root_mod.optimize_mode;
@@ -135,11 +136,11 @@ pub fn createEmpty(
assert(!use_lld);
assert(!use_llvm);
- const file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ const file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
// Truncation is done on `flush`.
.truncate = false,
});
- errdefer file.close();
+ errdefer file.close(io);
const c_file = try arena.create(C);
@@ -370,6 +371,7 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P
const comp = self.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
+ const io = comp.io;
const zcu = self.base.comp.zcu.?;
const ip = &zcu.intern_pool;
const pt: Zcu.PerThread = .activate(zcu, tid);
@@ -507,8 +509,8 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P
}, self.getString(av_block.code));
const file = self.base.file.?;
- file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)});
- var fw = file.writer(&.{});
+ file.setLength(io, f.file_size) catch |err| return diags.fail("failed to allocate file: {t}", .{err});
+ var fw = file.writer(io, &.{});
var w = &fw.interface;
w.writeVecAll(f.all_buffers.items) catch |err| switch (err) {
error.WriteFailed => return diags.fail("failed to write to '{f}': {s}", .{
@@ -763,6 +765,7 @@ pub fn flushEmitH(zcu: *Zcu) !void {
if (true) return; // emit-h is regressed
const emit_h = zcu.emit_h orelse return;
+ const io = zcu.comp.io;
// We collect a list of buffers to write, and write them all at once with pwritev 😎
const num_buffers = emit_h.decl_table.count() + 1;
@@ -790,14 +793,14 @@ pub fn flushEmitH(zcu: *Zcu) !void {
}
const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory;
- const file = try directory.handle.createFile(emit_h.loc.basename, .{
+ const file = try directory.handle.createFile(io, emit_h.loc.basename, .{
// We set the end position explicitly below; by not truncating the file, we possibly
// make it easier on the file system by doing 1 reallocation instead of two.
.truncate = false,
});
- defer file.close();
+ defer file.close(io);
- try file.setEndPos(file_size);
+ try file.setLength(io, file_size);
try file.pwritevAll(all_buffers.items, 0);
}
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index f33e0ccdea..03b757f5b4 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1,3 +1,23 @@
+const Coff = @This();
+
+const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+
+const codegen = @import("../codegen.zig");
+const Compilation = @import("../Compilation.zig");
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const MappedFile = @import("MappedFile.zig");
+const target_util = @import("../target.zig");
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
+
base: link.File,
mf: MappedFile,
nodes: std.MultiArrayList(Node),
@@ -631,12 +651,14 @@ fn create(
else => return error.UnsupportedCOFFArchitecture,
};
+ const io = comp.io;
+
const coff = try arena.create(Coff);
- const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{
+ const file = try path.root_dir.handle.createFile(io, path.sub_path, .{
.read = true,
- .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
+ .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode),
});
- errdefer file.close(comp.io);
+ errdefer file.close(io);
coff.* = .{
.base = .{
.tag = .coff2,
@@ -644,14 +666,14 @@ fn create(
.comp = comp,
.emit = path,
- .file = .adaptFromNewApi(file),
+ .file = file,
.gc_sections = false,
.print_gc_sections = false,
.build_id = .none,
.allow_shlib_undefined = false,
.stack_size = 0,
},
- .mf = try .init(file, comp.gpa),
+ .mf = try .init(file, comp.gpa, io),
.nodes = .empty,
.import_table = .{
.ni = .none,
@@ -1727,22 +1749,20 @@ pub fn flush(
const comp = coff.base.comp;
if (comp.compiler_rt_dyn_lib) |crt_file| {
const gpa = comp.gpa;
+ const io = comp.io;
const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
std.fs.path.basename(crt_file.full_object_path.sub_path),
});
defer gpa.free(compiler_rt_sub_path);
- crt_file.full_object_path.root_dir.handle.copyFile(
+ std.Io.Dir.copyFile(
+ crt_file.full_object_path.root_dir.handle,
crt_file.full_object_path.sub_path,
coff.base.emit.root_dir.handle,
compiler_rt_sub_path,
+ io,
.{},
- ) catch |err| switch (err) {
- else => |e| return comp.link_diags.fail("Copy '{s}' failed: {s}", .{
- compiler_rt_sub_path,
- @errorName(e),
- }),
- };
+ ) catch |err| return comp.link_diags.fail("copy '{s}' failed: {t}", .{ compiler_rt_sub_path, err });
}
}
@@ -2358,10 +2378,16 @@ pub fn deleteExport(coff: *Coff, exported: Zcu.Exported, name: InternPool.NullTe
_ = name;
}
-pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) void {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- coff.printNode(tid, w, .root, 0) catch {};
+pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) Io.Cancelable!void {
+ const comp = coff.base.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ coff.printNode(tid, w, .root, 0) catch |err| switch (err) {
+ error.WriteFailed => return stderr.err.?,
+ };
}
pub fn printNode(
@@ -2459,19 +2485,3 @@ pub fn printNode(
}
}
}
-
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const codegen = @import("../codegen.zig");
-const Compilation = @import("../Compilation.zig");
-const Coff = @This();
-const InternPool = @import("../InternPool.zig");
-const link = @import("../link.zig");
-const log = std.log.scoped(.link);
-const MappedFile = @import("MappedFile.zig");
-const native_endian = builtin.cpu.arch.endian();
-const std = @import("std");
-const target_util = @import("../target.zig");
-const Type = @import("../Type.zig");
-const Value = @import("../Value.zig");
-const Zcu = @import("../Zcu.zig");
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 95f4ca8bbd..0fda09e385 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -1,3 +1,24 @@
+const Dwarf = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const Allocator = std.mem.Allocator;
+const DW = std.dwarf;
+const Zir = std.zig.Zir;
+const assert = std.debug.assert;
+const log = std.log.scoped(.dwarf);
+const Writer = std.Io.Writer;
+
+const InternPool = @import("../InternPool.zig");
+const Module = @import("../Package.zig").Module;
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
+const codegen = @import("../codegen.zig");
+const dev = @import("../dev.zig");
+const link = @import("../link.zig");
+const target_info = @import("../target.zig");
+
gpa: Allocator,
bin_file: *link.File,
format: DW.Format,
@@ -27,18 +48,18 @@ pub const UpdateError = error{
EndOfStream,
Underflow,
UnexpectedEndOfFile,
+ NonResizable,
} ||
codegen.GenerateSymbolError ||
- std.fs.File.OpenError ||
- std.fs.File.SetEndPosError ||
- std.fs.File.CopyRangeError ||
- std.fs.File.PReadError ||
- std.fs.File.PWriteError;
+ Io.File.OpenError ||
+ Io.File.LengthError ||
+ Io.File.ReadPositionalError ||
+ Io.File.WritePositionalError;
pub const FlushError = UpdateError;
pub const RelocError =
- std.fs.File.PWriteError;
+ Io.File.PWriteError;
pub const AddressSize = enum(u8) {
@"32" = 4,
@@ -135,11 +156,14 @@ const DebugInfo = struct {
fn declAbbrevCode(debug_info: *DebugInfo, unit: Unit.Index, entry: Entry.Index) !AbbrevCode {
const dwarf: *Dwarf = @fieldParentPtr("debug_info", debug_info);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const unit_ptr = debug_info.section.getUnit(unit);
const entry_ptr = unit_ptr.getEntry(entry);
if (entry_ptr.len < AbbrevCode.decl_bytes) return .null;
var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined;
- if (try dwarf.getFile().?.preadAll(
+ if (try dwarf.getFile().?.readPositionalAll(
+ io,
&abbrev_code_buf,
debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
) != abbrev_code_buf.len) return error.InputOutput;
@@ -619,13 +643,10 @@ const Unit = struct {
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
- const n = try dwarf.getFile().?.copyRangeAll(
- sec.off(dwarf) + unit.off,
- dwarf.getFile().?,
- sec.off(dwarf) + new_off,
- unit.len,
- );
- if (n != unit.len) return error.InputOutput;
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
+ const file = dwarf.getFile().?;
+ try link.File.copyRangeAll2(io, file, file, sec.off(dwarf) + unit.off, sec.off(dwarf) + new_off, unit.len);
unit.off = new_off;
}
@@ -655,10 +676,14 @@ const Unit = struct {
fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == unit.header_len);
- try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
+ try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off);
}
fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const start = unit.off + unit.header_len + if (unit.last.unwrap()) |last_entry| end: {
const last_entry_ptr = unit.getEntry(last_entry);
break :end last_entry_ptr.off + last_entry_ptr.len;
@@ -688,7 +713,7 @@ const Unit = struct {
assert(fw.end == extended_op_bytes + op_len_bytes);
fw.writeByte(DW.LNE.padding) catch unreachable;
assert(fw.end >= unit.trailer_len and fw.end <= len);
- return dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + start);
+ return dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + start);
}
var trailer_aw: Writer.Allocating = try .initCapacity(dwarf.gpa, len);
defer trailer_aw.deinit();
@@ -748,7 +773,7 @@ const Unit = struct {
assert(tw.end == unit.trailer_len);
tw.splatByteAll(fill_byte, len - unit.trailer_len) catch unreachable;
assert(tw.end == len);
- try dwarf.getFile().?.pwriteAll(trailer_aw.written(), sec.off(dwarf) + start);
+ try dwarf.getFile().?.writePositionalAll(io, trailer_aw.written(), sec.off(dwarf) + start);
}
fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
@@ -834,6 +859,8 @@ const Entry = struct {
dwarf: *Dwarf,
) (UpdateError || Writer.Error)!void {
assert(entry.len > 0);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const start = entry.off + entry.len;
if (sec == &dwarf.debug_frame.section) {
const len = if (entry.next.unwrap()) |next_entry|
@@ -843,11 +870,11 @@ const Entry = struct {
var unit_len_buf: [8]u8 = undefined;
const unit_len_bytes = unit_len_buf[0..dwarf.sectionOffsetBytes()];
dwarf.writeInt(unit_len_bytes, len - dwarf.unitLengthBytes());
- try dwarf.getFile().?.pwriteAll(unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off);
+ try dwarf.getFile().?.writePositionalAll(io, unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off);
const buf = try dwarf.gpa.alloc(u8, len - entry.len);
defer dwarf.gpa.free(buf);
@memset(buf, DW.CFA.nop);
- try dwarf.getFile().?.pwriteAll(buf, sec.off(dwarf) + unit.off + unit.header_len + start);
+ try dwarf.getFile().?.writePositionalAll(io, buf, sec.off(dwarf) + unit.off + unit.header_len + start);
return;
}
const len = unit.getEntry(entry.next.unwrap() orelse return).off - start;
@@ -906,7 +933,7 @@ const Entry = struct {
},
} else assert(!sec.pad_entries_to_ideal and len == 0);
assert(fw.end <= len);
- try dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start);
+ try dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start);
}
fn resize(
@@ -949,11 +976,13 @@ const Entry = struct {
fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == entry_ptr.len);
- try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
+ try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
if (false) {
const buf = try dwarf.gpa.alloc(u8, sec.len);
defer dwarf.gpa.free(buf);
- _ = try dwarf.getFile().?.preadAll(buf, sec.off(dwarf));
+ _ = try dwarf.getFile().?.readPositionalAll(io, buf, sec.off(dwarf));
log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(sec.first),
@intFromEnum(sec.last),
@@ -4682,6 +4711,8 @@ fn updateContainerTypeWriterError(
}
pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedInst.Index) UpdateError!void {
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_info = zir_index.resolveFull(ip).?;
@@ -4701,7 +4732,7 @@ pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedI
const unit = dwarf.debug_info.section.getUnit(dwarf.getUnitIfExists(file.mod.?) orelse return);
const entry = unit.getEntry(dwarf.decls.get(zir_index) orelse return);
- try dwarf.getFile().?.pwriteAll(&line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
+ try dwarf.getFile().?.writePositionalAll(io, &line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
}
pub fn freeNav(dwarf: *Dwarf, nav_index: InternPool.Nav.Index) void {
@@ -4738,6 +4769,8 @@ pub fn flush(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Error)!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
{
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, .anyerror_type);
@@ -4957,7 +4990,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro
if (dwarf.debug_str.section.dirty) {
const contents = dwarf.debug_str.contents.items;
try dwarf.debug_str.section.resize(dwarf, contents.len);
- try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off(dwarf));
+ try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_str.section.off(dwarf));
dwarf.debug_str.section.dirty = false;
}
if (dwarf.debug_line.section.dirty) {
@@ -5069,7 +5102,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro
if (dwarf.debug_line_str.section.dirty) {
const contents = dwarf.debug_line_str.contents.items;
try dwarf.debug_line_str.section.resize(dwarf, contents.len);
- try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off(dwarf));
+ try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_line_str.section.off(dwarf));
dwarf.debug_line_str.section.dirty = false;
}
if (dwarf.debug_loclists.section.dirty) {
@@ -6350,7 +6383,7 @@ const AbbrevCode = enum {
});
};
-fn getFile(dwarf: *Dwarf) ?std.fs.File {
+fn getFile(dwarf: *Dwarf) ?Io.File {
if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| return d_sym.file;
return dwarf.bin_file.file;
}
@@ -6391,9 +6424,11 @@ fn writeInt(dwarf: *Dwarf, buf: []u8, int: u64) void {
}
fn resolveReloc(dwarf: *Dwarf, source: u64, target: u64, size: u32) RelocError!void {
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
var buf: [8]u8 = undefined;
dwarf.writeInt(buf[0..size], target);
- try dwarf.getFile().?.pwriteAll(buf[0..size], source);
+ try dwarf.getFile().?.writePositionalAll(io, buf[0..size], source);
}
fn unitLengthBytes(dwarf: *Dwarf) u32 {
@@ -6429,21 +6464,3 @@ const force_incremental = false;
inline fn incremental(dwarf: Dwarf) bool {
return force_incremental or dwarf.bin_file.comp.config.incremental;
}
-
-const Allocator = std.mem.Allocator;
-const DW = std.dwarf;
-const Dwarf = @This();
-const InternPool = @import("../InternPool.zig");
-const Module = @import("../Package.zig").Module;
-const Type = @import("../Type.zig");
-const Value = @import("../Value.zig");
-const Zcu = @import("../Zcu.zig");
-const Zir = std.zig.Zir;
-const assert = std.debug.assert;
-const codegen = @import("../codegen.zig");
-const dev = @import("../dev.zig");
-const link = @import("../link.zig");
-const log = std.log.scoped(.dwarf);
-const std = @import("std");
-const target_info = @import("../target.zig");
-const Writer = std.Io.Writer;
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 69acbe034b..85f37f88ce 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -313,12 +313,14 @@ pub fn createEmpty(
const is_obj = output_mode == .Obj;
const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static);
+ const io = comp.io;
+
// What path should this ELF linker code output to?
const sub_path = emit.sub_path;
- self.base.file = try emit.root_dir.handle.createFile(sub_path, .{
+ self.base.file = try emit.root_dir.handle.createFile(io, sub_path, .{
.truncate = true,
.read = true,
- .mode = link.File.determineMode(output_mode, link_mode),
+ .permissions = link.File.determinePermissions(output_mode, link_mode),
});
const gpa = comp.gpa;
@@ -406,10 +408,12 @@ pub fn open(
}
pub fn deinit(self: *Elf) void {
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
for (self.file_handles.items) |fh| {
- fh.close();
+ fh.close(io);
}
self.file_handles.deinit(gpa);
@@ -483,6 +487,8 @@ pub fn getUavVAddr(self: *Elf, uav: InternPool.Index, reloc_info: link.File.Relo
/// Returns end pos of collision, if any.
fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
+ const comp = self.base.comp;
+ const io = comp.io;
const small_ptr = self.ptr_width == .p32;
const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
if (start < ehdr_size)
@@ -522,7 +528,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.base.file.?.setEndPos(end);
+ if (at_end) try self.base.file.?.setLength(io, end);
return null;
}
@@ -552,6 +558,8 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
}
pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void {
+ const comp = self.base.comp;
+ const io = comp.io;
const shdr = &self.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
@@ -574,18 +582,11 @@ pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment:
new_offset,
});
- const amt = try self.base.file.?.copyRangeAll(
- shdr.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- // TODO figure out what to about this error condition - how to communicate it up.
- if (amt != existing_size) return error.InputOutput;
+ try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
shdr.sh_offset = new_offset;
} else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
+ try self.base.file.?.setLength(io, shdr.sh_offset + needed_size);
}
}
@@ -737,8 +738,8 @@ pub fn loadInput(self: *Elf, input: link.Input) !void {
.res => unreachable,
.dso_exact => @panic("TODO"),
.object => |obj| try parseObject(self, obj),
- .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
- .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target),
+ .archive => |obj| try parseArchive(gpa, io, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
+ .dso => |dso| try parseDso(gpa, io, diags, dso, &self.shared_objects, &self.files, target),
}
}
@@ -747,9 +748,10 @@ pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std
defer tracy.end();
const comp = self.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- if (comp.verbose_link) Compilation.dump_argv(self.dump_argv_list.items);
+ if (comp.verbose_link) try Compilation.dumpArgv(io, self.dump_argv_list.items);
const sub_prog_node = prog_node.start("ELF Flush", 0);
defer sub_prog_node.end();
@@ -757,7 +759,7 @@ pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std
return flushInner(self, arena, tid) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
- else => |e| return diags.fail("ELF flush failed: {s}", .{@errorName(e)}),
+ else => |e| return diags.fail("ELF flush failed: {t}", .{e}),
};
}
@@ -1047,9 +1049,11 @@ fn dumpArgvInit(self: *Elf, arena: Allocator) !void {
}
pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
- const diags = &self.base.comp.link_diags;
- const obj = link.openObject(path, false, false) catch |err| {
- switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) {
+ const comp = self.base.comp;
+ const io = comp.io;
+ const diags = &comp.link_diags;
+ const obj = link.openObject(io, path, false, false) catch |err| {
+ switch (diags.failParse(path, "failed to open object: {t}", .{err})) {
error.LinkFailure => return,
}
};
@@ -1057,10 +1061,11 @@ pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
}
fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void {
- const diags = &self.base.comp.link_diags;
+ const comp = self.base.comp;
+ const diags = &comp.link_diags;
self.parseObject(obj) catch |err| switch (err) {
error.LinkFailure => return, // already reported
- else => |e| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
+ else => |e| diags.addParseError(obj.path, "failed to parse object: {t}", .{e}),
};
}
@@ -1068,10 +1073,12 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = self.base.comp.gpa;
- const diags = &self.base.comp.link_diags;
- const target = &self.base.comp.root_mod.resolved_target.result;
- const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
+ const comp = self.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
+ const diags = &comp.link_diags;
+ const target = &comp.root_mod.resolved_target.result;
+ const debug_fmt_strip = comp.config.debug_format == .strip;
const default_sym_version = self.default_sym_version;
const file_handles = &self.file_handles;
@@ -1090,14 +1097,15 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
try self.objects.append(gpa, index);
const object = self.file(index).?.object;
- try object.parseCommon(gpa, diags, obj.path, handle, target);
+ try object.parseCommon(gpa, io, diags, obj.path, handle, target);
if (!self.base.isStaticLib()) {
- try object.parse(gpa, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
+ try object.parse(gpa, io, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
}
}
fn parseArchive(
gpa: Allocator,
+ io: Io,
diags: *Diags,
file_handles: *std.ArrayList(File.Handle),
files: *std.MultiArrayList(File.Entry),
@@ -1112,7 +1120,7 @@ fn parseArchive(
defer tracy.end();
const fh = try addFileHandle(gpa, file_handles, obj.file);
- var archive = try Archive.parse(gpa, diags, file_handles, obj.path, fh);
+ var archive = try Archive.parse(gpa, io, diags, file_handles, obj.path, fh);
defer archive.deinit(gpa);
const init_alive = if (is_static_lib) true else obj.must_link;
@@ -1123,15 +1131,16 @@ fn parseArchive(
const object = &files.items(.data)[index].object;
object.index = index;
object.alive = init_alive;
- try object.parseCommon(gpa, diags, obj.path, obj.file, target);
+ try object.parseCommon(gpa, io, diags, obj.path, obj.file, target);
if (!is_static_lib)
- try object.parse(gpa, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
+ try object.parse(gpa, io, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
try objects.append(gpa, index);
}
}
fn parseDso(
gpa: Allocator,
+ io: Io,
diags: *Diags,
dso: link.Input.Dso,
shared_objects: *std.StringArrayHashMapUnmanaged(File.Index),
@@ -1143,8 +1152,8 @@ fn parseDso(
const handle = dso.file;
- const stat = Stat.fromFs(try handle.stat());
- var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target);
+ const stat = Stat.fromFs(try handle.stat(io));
+ var header = try SharedObject.parseHeader(gpa, io, diags, dso.path, handle, stat, target);
defer header.deinit(gpa);
const soname = header.soname() orelse dso.path.basename();
@@ -1158,7 +1167,7 @@ fn parseDso(
gop.value_ptr.* = index;
- var parsed = try SharedObject.parse(gpa, &header, handle);
+ var parsed = try SharedObject.parse(gpa, io, &header, handle);
errdefer parsed.deinit(gpa);
const duped_path: Path = .{
@@ -2888,13 +2897,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
if (shdr.sh_offset > 0) {
// Get size actually commited to the output file.
const existing_size = self.sectionSize(shndx);
- const amt = try self.base.file.?.copyRangeAll(
- shdr.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@@ -2930,13 +2933,7 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
if (shdr.sh_offset > 0) {
const existing_size = self.sectionSize(@intCast(shndx));
- const amt = try self.base.file.?.copyRangeAll(
- shdr.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@@ -3649,7 +3646,7 @@ fn fileLookup(files: std.MultiArrayList(File.Entry), index: File.Index, zig_obje
pub fn addFileHandle(
gpa: Allocator,
file_handles: *std.ArrayList(File.Handle),
- handle: fs.File,
+ handle: Io.File,
) Allocator.Error!File.HandleIndex {
try file_handles.append(gpa, handle);
return @intCast(file_handles.items.len - 1);
@@ -4066,10 +4063,10 @@ fn fmtDumpState(self: *Elf, writer: *std.Io.Writer) std.Io.Writer.Error!void {
}
/// Caller owns the memory.
-pub fn preadAllAlloc(allocator: Allocator, handle: fs.File, offset: u64, size: u64) ![]u8 {
+pub fn preadAllAlloc(allocator: Allocator, io: Io, io_file: Io.File, offset: u64, size: u64) ![]u8 {
const buffer = try allocator.alloc(u8, math.cast(usize, size) orelse return error.Overflow);
errdefer allocator.free(buffer);
- const amt = try handle.preadAll(buffer, offset);
+ const amt = try io_file.readPositionalAll(io, buffer, offset);
if (amt != size) return error.InputOutput;
return buffer;
}
@@ -4435,16 +4432,17 @@ pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 {
pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = elf_file.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
- return diags.fail("failed to write: {s}", .{@errorName(err)});
- };
+ elf_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err|
+ return diags.fail("failed to write: {t}", .{err});
}
-pub fn setEndPos(elf_file: *Elf, length: u64) error{LinkFailure}!void {
+pub fn setLength(elf_file: *Elf, length: u64) error{LinkFailure}!void {
const comp = elf_file.base.comp;
+ const io = comp.i;
const diags = &comp.link_diags;
- elf_file.base.file.?.setEndPos(length) catch |err| {
+ elf_file.base.file.?.setLength(io, length) catch |err| {
return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
};
}
@@ -4458,6 +4456,7 @@ pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T {
}
const std = @import("std");
+const Io = std.Io;
const build_options = @import("build_options");
const builtin = @import("builtin");
const assert = std.debug.assert;
diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig
index a9961bf8f9..14f2868956 100644
--- a/src/link/Elf/Archive.zig
+++ b/src/link/Elf/Archive.zig
@@ -1,3 +1,21 @@
+const Archive = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const elf = std.elf;
+const fs = std.fs;
+const log = std.log.scoped(.link);
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const Allocator = std.mem.Allocator;
+
+const Diags = @import("../../link.zig").Diags;
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const Object = @import("Object.zig");
+const StringTable = @import("../StringTable.zig");
+
objects: []const Object,
/// '\n'-delimited
strtab: []const u8,
@@ -10,22 +28,23 @@ pub fn deinit(a: *Archive, gpa: Allocator) void {
pub fn parse(
gpa: Allocator,
+ io: Io,
diags: *Diags,
file_handles: *const std.ArrayList(File.Handle),
path: Path,
handle_index: File.HandleIndex,
) !Archive {
- const handle = file_handles.items[handle_index];
+ const file = file_handles.items[handle_index];
var pos: usize = 0;
{
var magic_buffer: [elf.ARMAG.len]u8 = undefined;
- const n = try handle.preadAll(&magic_buffer, pos);
+ const n = try file.readPositionalAll(io, &magic_buffer, pos);
if (n != magic_buffer.len) return error.BadMagic;
if (!mem.eql(u8, &magic_buffer, elf.ARMAG)) return error.BadMagic;
pos += magic_buffer.len;
}
- const size = (try handle.stat()).size;
+ const size = (try file.stat(io)).size;
var objects: std.ArrayList(Object) = .empty;
defer objects.deinit(gpa);
@@ -36,7 +55,7 @@ pub fn parse(
while (pos < size) {
var hdr: elf.ar_hdr = undefined;
{
- const n = try handle.preadAll(mem.asBytes(&hdr), pos);
+ const n = try file.readPositionalAll(io, mem.asBytes(&hdr), pos);
if (n != @sizeOf(elf.ar_hdr)) return error.UnexpectedEndOfFile;
}
pos += @sizeOf(elf.ar_hdr);
@@ -53,7 +72,7 @@ pub fn parse(
if (hdr.isSymtab() or hdr.isSymtab64()) continue;
if (hdr.isStrtab()) {
try strtab.resize(gpa, obj_size);
- const amt = try handle.preadAll(strtab.items, pos);
+ const amt = try file.readPositionalAll(io, strtab.items, pos);
if (amt != obj_size) return error.InputOutput;
continue;
}
@@ -120,7 +139,7 @@ pub fn setArHdr(opts: struct {
@memset(mem.asBytes(&hdr), 0x20);
{
- var writer: std.Io.Writer = .fixed(&hdr.ar_name);
+ var writer: Io.Writer = .fixed(&hdr.ar_name);
switch (opts.name) {
.symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable,
.strtab => writer.print("//", .{}) catch unreachable,
@@ -133,7 +152,7 @@ pub fn setArHdr(opts: struct {
hdr.ar_gid[0] = '0';
hdr.ar_mode[0] = '0';
{
- var writer: std.Io.Writer = .fixed(&hdr.ar_size);
+ var writer: Io.Writer = .fixed(&hdr.ar_size);
writer.print("{d}", .{opts.size}) catch unreachable;
}
hdr.ar_fmag = elf.ARFMAG.*;
@@ -206,7 +225,7 @@ pub const ArSymtab = struct {
ar: ArSymtab,
elf_file: *Elf,
- fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ fn default(f: Format, writer: *Io.Writer) Io.Writer.Error!void {
const ar = f.ar;
const elf_file = f.elf_file;
for (ar.symtab.items, 0..) |entry, i| {
@@ -261,7 +280,7 @@ pub const ArStrtab = struct {
try writer.writeAll(ar.buffer.items);
}
- pub fn format(ar: ArStrtab, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ pub fn format(ar: ArStrtab, writer: *Io.Writer) Io.Writer.Error!void {
try writer.print("{f}", .{std.ascii.hexEscape(ar.buffer.items, .lower)});
}
};
@@ -277,19 +296,3 @@ pub const ArState = struct {
/// Total size of the contributing object (excludes ar_hdr).
size: u64 = 0,
};
-
-const std = @import("std");
-const assert = std.debug.assert;
-const elf = std.elf;
-const fs = std.fs;
-const log = std.log.scoped(.link);
-const mem = std.mem;
-const Path = std.Build.Cache.Path;
-const Allocator = std.mem.Allocator;
-
-const Diags = @import("../../link.zig").Diags;
-const Archive = @This();
-const Elf = @import("../Elf.zig");
-const File = @import("file.zig").File;
-const Object = @import("Object.zig");
-const StringTable = @import("../StringTable.zig");
diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig
index 8fdf555115..9350f1a276 100644
--- a/src/link/Elf/AtomList.zig
+++ b/src/link/Elf/AtomList.zig
@@ -90,7 +90,9 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
}
pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
assert(!list.dirty);
@@ -121,12 +123,14 @@ pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype,
try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
}
- try elf_file.base.file.?.pwriteAll(buffer.written(), list.offset(elf_file));
+ try elf_file.base.file.?.writePositionalAll(io, buffer.written(), list.offset(elf_file));
buffer.clearRetainingCapacity();
}
pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
@@ -152,7 +156,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf
@memcpy(out_code, code);
}
- try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
+ try elf_file.base.file.?.writePositionalAll(io, buffer.items, list.offset(elf_file));
buffer.clearRetainingCapacity();
}
diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig
index d51a82b266..ebdd1f2098 100644
--- a/src/link/Elf/Object.zig
+++ b/src/link/Elf/Object.zig
@@ -1,3 +1,30 @@
+const Object = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const eh_frame = @import("eh_frame.zig");
+const elf = std.elf;
+const fs = std.fs;
+const log = std.log.scoped(.link);
+const math = std.math;
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const Allocator = std.mem.Allocator;
+
+const Diags = @import("../../link.zig").Diags;
+const Archive = @import("Archive.zig");
+const Atom = @import("Atom.zig");
+const AtomList = @import("AtomList.zig");
+const Cie = eh_frame.Cie;
+const Elf = @import("../Elf.zig");
+const Fde = eh_frame.Fde;
+const File = @import("file.zig").File;
+const Merge = @import("Merge.zig");
+const Symbol = @import("Symbol.zig");
+const Alignment = Atom.Alignment;
+const riscv = @import("../riscv.zig");
+
archive: ?InArchive = null,
/// Archive files cannot contain subdirectories, so only the basename is needed
/// for output. However, the full path is kept for error reporting.
@@ -65,10 +92,11 @@ pub fn deinit(self: *Object, gpa: Allocator) void {
pub fn parse(
self: *Object,
gpa: Allocator,
+ io: Io,
diags: *Diags,
/// For error reporting purposes only.
path: Path,
- handle: fs.File,
+ handle: Io.File,
target: *const std.Target,
debug_fmt_strip: bool,
default_sym_version: elf.Versym,
@@ -78,7 +106,7 @@ pub fn parse(
// Allocate atom index 0 to null atom
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) });
- try self.initAtoms(gpa, diags, path, handle, debug_fmt_strip, target);
+ try self.initAtoms(gpa, io, diags, path, handle, debug_fmt_strip, target);
try self.initSymbols(gpa, default_sym_version);
for (self.shdrs.items, 0..) |shdr, i| {
@@ -87,7 +115,7 @@ pub fn parse(
if ((target.cpu.arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, self.getString(atom_ptr.name_offset), ".eh_frame"))
{
- try self.parseEhFrame(gpa, handle, @intCast(i), target);
+ try self.parseEhFrame(gpa, io, handle, @intCast(i), target);
}
}
}
@@ -95,15 +123,16 @@ pub fn parse(
pub fn parseCommon(
self: *Object,
gpa: Allocator,
+ io: Io,
diags: *Diags,
path: Path,
- handle: fs.File,
+ handle: Io.File,
target: *const std.Target,
) !void {
const offset = if (self.archive) |ar| ar.offset else 0;
- const file_size = (try handle.stat()).size;
+ const file_size = (try handle.stat(io)).size;
- const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr));
+ const header_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset, @sizeOf(elf.Elf64_Ehdr));
defer gpa.free(header_buffer);
self.header = @as(*align(1) const elf.Elf64_Ehdr, @ptrCast(header_buffer)).*;
if (!mem.eql(u8, self.header.?.e_ident[0..4], elf.MAGIC)) {
@@ -127,7 +156,7 @@ pub fn parseCommon(
return diags.failParse(path, "corrupt header: section header table extends past the end of file", .{});
}
- const shdrs_buffer = try Elf.preadAllAlloc(gpa, handle, offset + shoff, shsize);
+ const shdrs_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset + shoff, shsize);
defer gpa.free(shdrs_buffer);
const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(shdrs_buffer.ptr))[0..shnum];
try self.shdrs.appendUnalignedSlice(gpa, shdrs);
@@ -140,7 +169,7 @@ pub fn parseCommon(
}
}
- const shstrtab = try self.preadShdrContentsAlloc(gpa, handle, self.header.?.e_shstrndx);
+ const shstrtab = try self.preadShdrContentsAlloc(gpa, io, handle, self.header.?.e_shstrndx);
defer gpa.free(shstrtab);
for (self.shdrs.items) |shdr| {
if (shdr.sh_name >= shstrtab.len) {
@@ -158,7 +187,7 @@ pub fn parseCommon(
const shdr = self.shdrs.items[index];
self.first_global = shdr.sh_info;
- const raw_symtab = try self.preadShdrContentsAlloc(gpa, handle, index);
+ const raw_symtab = try self.preadShdrContentsAlloc(gpa, io, handle, index);
defer gpa.free(raw_symtab);
const nsyms = math.divExact(usize, raw_symtab.len, @sizeOf(elf.Elf64_Sym)) catch {
return diags.failParse(path, "symbol table not evenly divisible", .{});
@@ -166,7 +195,7 @@ pub fn parseCommon(
const symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(raw_symtab.ptr))[0..nsyms];
const strtab_bias = @as(u32, @intCast(self.strtab.items.len));
- const strtab = try self.preadShdrContentsAlloc(gpa, handle, shdr.sh_link);
+ const strtab = try self.preadShdrContentsAlloc(gpa, io, handle, shdr.sh_link);
defer gpa.free(strtab);
try self.strtab.appendSlice(gpa, strtab);
@@ -262,9 +291,10 @@ pub fn validateEFlags(
fn initAtoms(
self: *Object,
gpa: Allocator,
+ io: Io,
diags: *Diags,
path: Path,
- handle: fs.File,
+ handle: Io.File,
debug_fmt_strip: bool,
target: *const std.Target,
) !void {
@@ -297,7 +327,7 @@ fn initAtoms(
};
const shndx: u32 = @intCast(i);
- const group_raw_data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ const group_raw_data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(group_raw_data);
const group_nmembers = math.divExact(usize, group_raw_data.len, @sizeOf(u32)) catch {
return diags.failParse(path, "corrupt section group: not evenly divisible ", .{});
@@ -338,7 +368,7 @@ fn initAtoms(
const shndx: u32 = @intCast(i);
if (self.skipShdr(shndx, debug_fmt_strip)) continue;
const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: {
- const data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ const data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(data);
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
break :blk .{ chdr.ch_size, Alignment.fromNonzeroByteUnits(chdr.ch_addralign) };
@@ -359,7 +389,7 @@ fn initAtoms(
elf.SHT_REL, elf.SHT_RELA => {
const atom_index = self.atoms_indexes.items[shdr.sh_info];
if (self.atom(atom_index)) |atom_ptr| {
- const relocs = try self.preadRelocsAlloc(gpa, handle, @intCast(i));
+ const relocs = try self.preadRelocsAlloc(gpa, io, handle, @intCast(i));
defer gpa.free(relocs);
atom_ptr.relocs_section_index = @intCast(i);
const rel_index: u32 = @intCast(self.relocs.items.len);
@@ -421,7 +451,8 @@ fn initSymbols(
fn parseEhFrame(
self: *Object,
gpa: Allocator,
- handle: fs.File,
+ io: Io,
+ handle: Io.File,
shndx: u32,
target: *const std.Target,
) !void {
@@ -430,12 +461,12 @@ fn parseEhFrame(
else => {},
} else null;
- const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(raw);
const data_start: u32 = @intCast(self.eh_frame_data.items.len);
try self.eh_frame_data.appendSlice(gpa, raw);
const relocs = if (relocs_shndx) |index|
- try self.preadRelocsAlloc(gpa, handle, index)
+ try self.preadRelocsAlloc(gpa, io, handle, index)
else
&[0]elf.Elf64_Rela{};
defer gpa.free(relocs);
@@ -1095,13 +1126,18 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf
}
pub fn updateArSize(self: *Object, elf_file: *Elf) !void {
+ const comp = elf_file.base.comp;
+ const io = comp.io;
self.output_ar_state.size = if (self.archive) |ar| ar.size else size: {
const handle = elf_file.fileHandle(self.file_handle);
- break :size (try handle.stat()).size;
+ break :size (try handle.stat(io)).size;
};
}
pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const offset: u64 = if (self.archive) |ar| ar.offset else 0;
const name = fs.path.basename(self.path.sub_path);
@@ -1114,10 +1150,9 @@ pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
});
try writer.writeAll(mem.asBytes(&hdr));
const handle = elf_file.fileHandle(self.file_handle);
- const gpa = elf_file.base.comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
- const amt = try handle.preadAll(data, offset);
+ const amt = try handle.readPositionalAll(io, data, offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
@@ -1190,11 +1225,12 @@ pub fn writeSymtab(self: *Object, elf_file: *Elf) void {
/// Caller owns the memory.
pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const comp = elf_file.base.comp;
+ const io = comp.io;
const gpa = comp.gpa;
const atom_ptr = self.atom(atom_index).?;
const shdr = atom_ptr.inputShdr(elf_file);
const handle = elf_file.fileHandle(self.file_handle);
- const data = try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index);
+ const data = try self.preadShdrContentsAlloc(gpa, io, handle, atom_ptr.input_section_index);
defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(data);
if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
@@ -1310,18 +1346,18 @@ fn addString(self: *Object, gpa: Allocator, str: []const u8) !u32 {
}
/// Caller owns the memory.
-fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: fs.File, index: u32) ![]u8 {
+fn preadShdrContentsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, index: u32) ![]u8 {
assert(index < self.shdrs.items.len);
const offset = if (self.archive) |ar| ar.offset else 0;
const shdr = self.shdrs.items[index];
const sh_offset = math.cast(u64, shdr.sh_offset) orelse return error.Overflow;
const sh_size = math.cast(u64, shdr.sh_size) orelse return error.Overflow;
- return Elf.preadAllAlloc(gpa, handle, offset + sh_offset, sh_size);
+ return Elf.preadAllAlloc(gpa, io, handle, offset + sh_offset, sh_size);
}
/// Caller owns the memory.
-fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
- const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+fn preadRelocsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
+ const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela));
return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num];
}
@@ -1552,29 +1588,3 @@ const InArchive = struct {
offset: u64,
size: u32,
};
-
-const Object = @This();
-
-const std = @import("std");
-const assert = std.debug.assert;
-const eh_frame = @import("eh_frame.zig");
-const elf = std.elf;
-const fs = std.fs;
-const log = std.log.scoped(.link);
-const math = std.math;
-const mem = std.mem;
-const Path = std.Build.Cache.Path;
-const Allocator = std.mem.Allocator;
-
-const Diags = @import("../../link.zig").Diags;
-const Archive = @import("Archive.zig");
-const Atom = @import("Atom.zig");
-const AtomList = @import("AtomList.zig");
-const Cie = eh_frame.Cie;
-const Elf = @import("../Elf.zig");
-const Fde = eh_frame.Fde;
-const File = @import("file.zig").File;
-const Merge = @import("Merge.zig");
-const Symbol = @import("Symbol.zig");
-const Alignment = Atom.Alignment;
-const riscv = @import("../riscv.zig");
diff --git a/src/link/Elf/SharedObject.zig b/src/link/Elf/SharedObject.zig
index 1e17aa34a8..c97d53a862 100644
--- a/src/link/Elf/SharedObject.zig
+++ b/src/link/Elf/SharedObject.zig
@@ -1,3 +1,20 @@
+const SharedObject = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const elf = std.elf;
+const log = std.log.scoped(.elf);
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const Stat = std.Build.Cache.File.Stat;
+const Allocator = mem.Allocator;
+
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const Symbol = @import("Symbol.zig");
+const Diags = @import("../../link.zig").Diags;
+
path: Path,
index: File.Index,
@@ -92,16 +109,17 @@ pub const Parsed = struct {
pub fn parseHeader(
gpa: Allocator,
+ io: Io,
diags: *Diags,
file_path: Path,
- fs_file: std.fs.File,
+ file: Io.File,
stat: Stat,
target: *const std.Target,
) !Header {
var ehdr: elf.Elf64_Ehdr = undefined;
{
const buf = mem.asBytes(&ehdr);
- const amt = try fs_file.preadAll(buf, 0);
+ const amt = try file.readPositionalAll(io, buf, 0);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
if (!mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")) return error.BadMagic;
@@ -118,7 +136,7 @@ pub fn parseHeader(
errdefer gpa.free(sections);
{
const buf = mem.sliceAsBytes(sections);
- const amt = try fs_file.preadAll(buf, shoff);
+ const amt = try file.readPositionalAll(io, buf, shoff);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
@@ -143,7 +161,7 @@ pub fn parseHeader(
const dynamic_table = try gpa.alloc(elf.Elf64_Dyn, n);
errdefer gpa.free(dynamic_table);
const buf = mem.sliceAsBytes(dynamic_table);
- const amt = try fs_file.preadAll(buf, shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :dt dynamic_table;
} else &.{};
@@ -158,7 +176,7 @@ pub fn parseHeader(
const strtab_shdr = sections[dynsym_shdr.sh_link];
const n = std.math.cast(usize, strtab_shdr.sh_size) orelse return error.Overflow;
const buf = try strtab.addManyAsSlice(gpa, n);
- const amt = try fs_file.preadAll(buf, strtab_shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, strtab_shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
@@ -190,9 +208,10 @@ pub fn parseHeader(
pub fn parse(
gpa: Allocator,
+ io: Io,
/// Moves resources from header. Caller may unconditionally deinit.
header: *Header,
- fs_file: std.fs.File,
+ file: Io.File,
) !Parsed {
const symtab = if (header.dynsym_sect_index) |index| st: {
const shdr = header.sections[index];
@@ -200,7 +219,7 @@ pub fn parse(
const symtab = try gpa.alloc(elf.Elf64_Sym, n);
errdefer gpa.free(symtab);
const buf = mem.sliceAsBytes(symtab);
- const amt = try fs_file.preadAll(buf, shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :st symtab;
} else &.{};
@@ -211,7 +230,7 @@ pub fn parse(
if (header.verdef_sect_index) |shndx| {
const shdr = header.sections[shndx];
- const verdefs = try Elf.preadAllAlloc(gpa, fs_file, shdr.sh_offset, shdr.sh_size);
+ const verdefs = try Elf.preadAllAlloc(gpa, io, file, shdr.sh_offset, shdr.sh_size);
defer gpa.free(verdefs);
var offset: u32 = 0;
@@ -237,7 +256,7 @@ pub fn parse(
const versyms = try gpa.alloc(elf.Versym, symtab.len);
errdefer gpa.free(versyms);
const buf = mem.sliceAsBytes(versyms);
- const amt = try fs_file.preadAll(buf, shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :vs versyms;
} else &.{};
@@ -534,19 +553,3 @@ const Format = struct {
}
}
};
-
-const SharedObject = @This();
-
-const std = @import("std");
-const assert = std.debug.assert;
-const elf = std.elf;
-const log = std.log.scoped(.elf);
-const mem = std.mem;
-const Path = std.Build.Cache.Path;
-const Stat = std.Build.Cache.File.Stat;
-const Allocator = mem.Allocator;
-
-const Elf = @import("../Elf.zig");
-const File = @import("file.zig").File;
-const Symbol = @import("Symbol.zig");
-const Diags = @import("../../link.zig").Diags;
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index 1450e3ab92..588b4e3fc3 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -740,7 +740,9 @@ pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{O
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const shsize: u64 = switch (elf_file.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
.p64 => @sizeOf(elf.Elf64_Shdr),
@@ -753,7 +755,7 @@ pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
const size = std.math.cast(usize, end_pos) orelse return error.Overflow;
try self.data.resize(gpa, size);
- const amt = try elf_file.base.file.?.preadAll(self.data.items, 0);
+ const amt = try elf_file.base.file.?.readPositionalAll(io, self.data.items, 0);
if (amt != size) return error.InputOutput;
}
@@ -901,13 +903,15 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void {
/// Returns atom's code.
/// Caller owns the memory.
pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const atom_ptr = self.atom(atom_index).?;
const file_offset = atom_ptr.offset(elf_file);
const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
- const amt = try elf_file.base.file.?.preadAll(code, file_offset);
+ const amt = try elf_file.base.file.?.readPositionalAll(io, code, file_offset);
if (amt != code.len) {
log.err("fetching code for {s} failed", .{atom_ptr.name(elf_file)});
return error.InputOutput;
@@ -1365,6 +1369,8 @@ fn updateNavCode(
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@@ -1449,8 +1455,8 @@ fn updateNavCode(
const shdr = elf_file.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
- elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
- return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
+ elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
+ return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err});
log.debug("writing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
}
}
@@ -1467,6 +1473,8 @@ fn updateTlv(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
const nav = ip.getNav(nav_index);
log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
@@ -1503,8 +1511,8 @@ fn updateTlv(
const shdr = elf_file.sections.items(.shdr)[shndx];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
- elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
- return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
+ elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
+ return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err});
log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
atom_ptr.name(elf_file),
file_offset,
@@ -2003,6 +2011,8 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
}
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
+ const comp = elf_file.base.comp;
+ const io = comp.io;
const atom_ptr = tr_sym.atom(elf_file).?;
const fileoff = atom_ptr.offset(elf_file);
const source_addr = tr_sym.address(.{}, elf_file);
@@ -2012,7 +2022,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
else => @panic("TODO implement write trampoline for this CPU arch"),
};
- try elf_file.base.file.?.pwriteAll(out, fileoff);
+ try elf_file.base.file.?.writePositionalAll(io, out, fileoff);
if (elf_file.base.child_pid) |pid| {
switch (builtin.os.tag) {
diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig
index 50f5159d18..52d3c6e6f0 100644
--- a/src/link/Elf/file.zig
+++ b/src/link/Elf/file.zig
@@ -1,3 +1,20 @@
+const std = @import("std");
+const Io = std.Io;
+const elf = std.elf;
+const log = std.log.scoped(.link);
+const Path = std.Build.Cache.Path;
+const Allocator = std.mem.Allocator;
+
+const Archive = @import("Archive.zig");
+const Atom = @import("Atom.zig");
+const Cie = @import("eh_frame.zig").Cie;
+const Elf = @import("../Elf.zig");
+const LinkerDefined = @import("LinkerDefined.zig");
+const Object = @import("Object.zig");
+const SharedObject = @import("SharedObject.zig");
+const Symbol = @import("Symbol.zig");
+const ZigObject = @import("ZigObject.zig");
+
pub const File = union(enum) {
zig_object: *ZigObject,
linker_defined: *LinkerDefined,
@@ -279,22 +296,6 @@ pub const File = union(enum) {
shared_object: SharedObject,
};
- pub const Handle = std.fs.File;
+ pub const Handle = Io.File;
pub const HandleIndex = Index;
};
-
-const std = @import("std");
-const elf = std.elf;
-const log = std.log.scoped(.link);
-const Path = std.Build.Cache.Path;
-const Allocator = std.mem.Allocator;
-
-const Archive = @import("Archive.zig");
-const Atom = @import("Atom.zig");
-const Cie = @import("eh_frame.zig").Cie;
-const Elf = @import("../Elf.zig");
-const LinkerDefined = @import("LinkerDefined.zig");
-const Object = @import("Object.zig");
-const SharedObject = @import("SharedObject.zig");
-const Symbol = @import("Symbol.zig");
-const ZigObject = @import("ZigObject.zig");
diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig
index 7adeecdcde..ec3ff252fb 100644
--- a/src/link/Elf/relocatable.zig
+++ b/src/link/Elf/relocatable.zig
@@ -1,5 +1,26 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const elf = std.elf;
+const math = std.math;
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const log = std.log.scoped(.link);
+const state_log = std.log.scoped(.link_state);
+
+const build_options = @import("build_options");
+
+const eh_frame = @import("eh_frame.zig");
+const link = @import("../../link.zig");
+const Archive = @import("Archive.zig");
+const Compilation = @import("../../Compilation.zig");
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const Object = @import("Object.zig");
+const Symbol = @import("Symbol.zig");
+
pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
const gpa = comp.gpa;
+ const io = comp.io;
const diags = &comp.link_diags;
if (diags.hasErrors()) return error.LinkFailure;
@@ -125,8 +146,8 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
assert(writer.buffered().len == total_size);
- try elf_file.base.file.?.setEndPos(total_size);
- try elf_file.base.file.?.pwriteAll(writer.buffered(), 0);
+ try elf_file.base.file.?.setLength(io, total_size);
+ try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;
}
@@ -330,13 +351,7 @@ fn allocateAllocSections(elf_file: *Elf) !void {
if (shdr.sh_offset > 0) {
const existing_size = elf_file.sectionSize(@intCast(shndx));
- const amt = try elf_file.base.file.?.copyRangeAll(
- shdr.sh_offset,
- elf_file.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ try elf_file.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@@ -360,7 +375,9 @@ fn writeAtoms(elf_file: *Elf) !void {
}
fn writeSyntheticSections(elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const slice = elf_file.sections.slice();
const SortRelocs = struct {
@@ -397,7 +414,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset + shdr.sh_size,
});
- try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
+ try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset);
}
if (elf_file.section_indexes.eh_frame) |shndx| {
@@ -417,7 +434,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset + sh_size,
});
assert(writer.buffered().len == sh_size - existing_size);
- try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset + existing_size);
+ try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset + existing_size);
}
if (elf_file.section_indexes.eh_frame_rela) |shndx| {
const shdr = slice.items(.shdr)[shndx];
@@ -435,7 +452,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
});
- try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
+ try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset);
}
try writeGroups(elf_file);
@@ -444,7 +461,9 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
}
fn writeGroups(elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
for (elf_file.group_sections.items) |cgs| {
const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
@@ -457,25 +476,6 @@ fn writeGroups(elf_file: *Elf) !void {
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
});
- try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset);
+ try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset);
}
}
-
-const assert = std.debug.assert;
-const build_options = @import("build_options");
-const eh_frame = @import("eh_frame.zig");
-const elf = std.elf;
-const link = @import("../../link.zig");
-const log = std.log.scoped(.link);
-const math = std.math;
-const mem = std.mem;
-const state_log = std.log.scoped(.link_state);
-const Path = std.Build.Cache.Path;
-const std = @import("std");
-
-const Archive = @import("Archive.zig");
-const Compilation = @import("../../Compilation.zig");
-const Elf = @import("../Elf.zig");
-const File = @import("file.zig").File;
-const Object = @import("Object.zig");
-const Symbol = @import("Symbol.zig");
diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig
index 7d12ccedb2..bbdb439385 100644
--- a/src/link/Elf2.zig
+++ b/src/link/Elf2.zig
@@ -1,3 +1,23 @@
+const Elf = @This();
+
+const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+
+const codegen = @import("../codegen.zig");
+const Compilation = @import("../Compilation.zig");
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const MappedFile = @import("MappedFile.zig");
+const target_util = @import("../target.zig");
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
+
base: link.File,
options: link.File.OpenOptions,
mf: MappedFile,
@@ -908,6 +928,7 @@ fn create(
path: std.Build.Cache.Path,
options: link.File.OpenOptions,
) !*Elf {
+ const io = comp.io;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .elf);
const class: std.elf.CLASS = switch (target.ptrBitWidth()) {
@@ -953,11 +974,11 @@ fn create(
};
const elf = try arena.create(Elf);
- const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{
+ const file = try path.root_dir.handle.createFile(io, path.sub_path, .{
.read = true,
- .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
+ .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode),
});
- errdefer file.close(comp.io);
+ errdefer file.close(io);
elf.* = .{
.base = .{
.tag = .elf2,
@@ -965,7 +986,7 @@ fn create(
.comp = comp,
.emit = path,
- .file = .adaptFromNewApi(file),
+ .file = file,
.gc_sections = false,
.print_gc_sections = false,
.build_id = .none,
@@ -973,7 +994,7 @@ fn create(
.stack_size = 0,
},
.options = options,
- .mf = try .init(file, comp.gpa),
+ .mf = try .init(file, comp.gpa, io),
.ni = .{
.tls = .none,
},
@@ -1973,8 +1994,8 @@ pub fn lazySymbol(elf: *Elf, lazy: link.File.LazySymbol) !Symbol.Index {
return lazy_gop.value_ptr.*;
}
-pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError ||
- std.Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void {
+pub fn loadInput(elf: *Elf, input: link.Input) (Io.File.Reader.SizeError ||
+ Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void {
const io = elf.base.comp.io;
var buf: [4096]u8 = undefined;
switch (input) {
@@ -2007,7 +2028,7 @@ pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError ||
.dso_exact => |dso_exact| try elf.loadDsoExact(dso_exact.name),
}
}
-fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void {
+fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void {
const comp = elf.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
@@ -2067,7 +2088,7 @@ fn loadObject(
elf: *Elf,
path: std.Build.Cache.Path,
member: ?[]const u8,
- fr: *std.Io.File.Reader,
+ fr: *Io.File.Reader,
fl: MappedFile.Node.FileLocation,
) !void {
const comp = elf.base.comp;
@@ -2310,7 +2331,7 @@ fn loadObject(
},
}
}
-fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void {
+fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void {
const comp = elf.base.comp;
const diags = &comp.link_diags;
const r = &fr.interface;
@@ -3305,12 +3326,13 @@ fn flushInputSection(elf: *Elf, isi: Node.InputSectionIndex) !void {
const file_loc = isi.fileLocation(elf);
if (file_loc.size == 0) return;
const comp = elf.base.comp;
+ const io = comp.io;
const gpa = comp.gpa;
const ii = isi.input(elf);
const path = ii.path(elf);
- const file = try path.root_dir.handle.adaptToNewApi().openFile(comp.io, path.sub_path, .{});
- defer file.close(comp.io);
- var fr = file.reader(comp.io, &.{});
+ const file = try path.root_dir.handle.openFile(io, path.sub_path, .{});
+ defer file.close(io);
+ var fr = file.reader(io, &.{});
try fr.seekTo(file_loc.offset);
var nw: MappedFile.Node.Writer = undefined;
const si = isi.symbol(elf);
@@ -3707,10 +3729,16 @@ pub fn deleteExport(elf: *Elf, exported: Zcu.Exported, name: InternPool.NullTerm
_ = name;
}
-pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) void {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- elf.printNode(tid, w, .root, 0) catch {};
+pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) Io.Cancelable!void {
+ const comp = elf.base.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.lockStderr();
+ const w = &stderr.file_writer.interface;
+ elf.printNode(tid, w, .root, 0) catch |err| switch (err) {
+ error.WriteFailed => return stderr.err.?,
+ };
}
pub fn printNode(
@@ -3822,19 +3850,3 @@ pub fn printNode(
try w.writeByte('\n');
}
}
-
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const codegen = @import("../codegen.zig");
-const Compilation = @import("../Compilation.zig");
-const Elf = @This();
-const InternPool = @import("../InternPool.zig");
-const link = @import("../link.zig");
-const log = std.log.scoped(.link);
-const MappedFile = @import("MappedFile.zig");
-const native_endian = builtin.cpu.arch.endian();
-const std = @import("std");
-const target_util = @import("../target.zig");
-const Type = @import("../Type.zig");
-const Value = @import("../Value.zig");
-const Zcu = @import("../Zcu.zig");
diff --git a/src/link/Lld.zig b/src/link/Lld.zig
index 2345090482..b2a0f6e396 100644
--- a/src/link/Lld.zig
+++ b/src/link/Lld.zig
@@ -359,6 +359,7 @@ fn linkAsArchive(lld: *Lld, arena: Allocator) !void {
fn coffLink(lld: *Lld, arena: Allocator) !void {
const comp = lld.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
const base = &lld.base;
const coff = &lld.ofmt.coff;
@@ -400,11 +401,12 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
- try std.fs.Dir.copyFile(
+ try Io.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
+ io,
.{},
);
} else {
@@ -718,13 +720,13 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
argv.appendAssumeCapacity(try crt_file.full_object_path.toString(arena));
continue;
}
- if (try findLib(arena, lib_basename, coff.lib_directories)) |full_path| {
+ if (try findLib(arena, io, lib_basename, coff.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
if (target.abi.isGnu()) {
const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
- if (try findLib(arena, fallback_name, coff.lib_directories)) |full_path| {
+ if (try findLib(arena, io, fallback_name, coff.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
@@ -741,9 +743,9 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
try spawnLld(comp, arena, argv.items);
}
}
-fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Cache.Directory) !?[]const u8 {
+fn findLib(arena: Allocator, io: Io, name: []const u8, lib_directories: []const Cache.Directory) !?[]const u8 {
for (lib_directories) |lib_directory| {
- lib_directory.handle.access(name, .{}) catch |err| switch (err) {
+ lib_directory.handle.access(io, name, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -755,6 +757,7 @@ fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Cache.Di
fn elfLink(lld: *Lld, arena: Allocator) !void {
const comp = lld.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
const diags = &comp.link_diags;
const base = &lld.base;
const elf = &lld.ofmt.elf;
@@ -816,11 +819,12 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
- try std.fs.Dir.copyFile(
+ try Io.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
+ io,
.{},
);
} else {
@@ -1326,6 +1330,7 @@ fn getLDMOption(target: *const std.Target) ?[]const u8 {
}
fn wasmLink(lld: *Lld, arena: Allocator) !void {
const comp = lld.base.comp;
+ const diags = &comp.link_diags;
const shared_memory = comp.config.shared_memory;
const export_memory = comp.config.export_memory;
const import_memory = comp.config.import_memory;
@@ -1334,6 +1339,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
const wasm = &lld.ofmt.wasm;
const gpa = comp.gpa;
+ const io = comp.io;
const directory = base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{base.emit.sub_path});
@@ -1371,11 +1377,12 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
- try fs.Dir.copyFile(
+ try Io.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
+ io,
.{},
);
} else {
@@ -1565,27 +1572,23 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
// is not the case, it means we will get "exec format error" when trying to run
// it, and then can react to that in the same way as trying to run an ELF file
// from a foreign CPU architecture.
- if (fs.has_executable_bit and target.os.tag == .wasi and
+ if (Io.File.Permissions.has_executable_bit and target.os.tag == .wasi and
comp.config.output_mode == .Exe)
{
- // TODO: what's our strategy for reporting linker errors from this function?
- // report a nice error here with the file path if it fails instead of
- // just returning the error code.
// chmod does not interact with umask, so we use a conservative -rwxr--r-- here.
- std.posix.fchmodat(fs.cwd().fd, full_out_path, 0o744, 0) catch |err| switch (err) {
- error.OperationNotSupported => unreachable, // Not a symlink.
- else => |e| return e,
- };
+ Io.Dir.cwd().setFilePermissions(io, full_out_path, .fromMode(0o744), .{}) catch |err|
+ return diags.fail("{s}: failed to enable executable permissions: {t}", .{ full_out_path, err });
}
}
}
fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !void {
const io = comp.io;
+ const gpa = comp.gpa;
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
- Compilation.dump_argv(argv[1..]);
+ try Compilation.dumpArgv(io, argv[1..]);
}
// If possible, we run LLD as a child process because it does not always
@@ -1599,7 +1602,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
}
var stderr: []u8 = &.{};
- defer comp.gpa.free(stderr);
+ defer gpa.free(stderr);
var child = std.process.Child.init(argv, arena);
const term = (if (comp.clang_passthrough_mode) term: {
@@ -1607,16 +1610,16 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
- break :term child.spawnAndWait();
+ break :term child.spawnAndWait(io);
} else term: {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
- child.spawn() catch |err| break :term err;
+ child.spawn(io) catch |err| break :term err;
var stderr_reader = child.stderr.?.readerStreaming(io, &.{});
- stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
- break :term child.wait();
+ stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited);
+ break :term child.wait(io);
}) catch |first_err| term: {
const err = switch (first_err) {
error.NameTooLong => err: {
@@ -1624,13 +1627,13 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
const rand_int = std.crypto.random.int(u64);
const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp";
- const rsp_file = try comp.dirs.local_cache.handle.createFile(rsp_path, .{});
- defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err|
- log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) });
+ const rsp_file = try comp.dirs.local_cache.handle.createFile(io, rsp_path, .{});
+ defer comp.dirs.local_cache.handle.deleteFile(io, rsp_path) catch |err|
+ log.warn("failed to delete response file {s}: {t}", .{ rsp_path, err });
{
- defer rsp_file.close();
+ defer rsp_file.close(io);
var rsp_file_buffer: [1024]u8 = undefined;
- var rsp_file_writer = rsp_file.writer(&rsp_file_buffer);
+ var rsp_file_writer = rsp_file.writer(io, &rsp_file_buffer);
const rsp_writer = &rsp_file_writer.interface;
for (argv[2..]) |arg| {
try rsp_writer.writeByte('"');
@@ -1657,16 +1660,16 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
rsp_child.stdout_behavior = .Inherit;
rsp_child.stderr_behavior = .Inherit;
- break :term rsp_child.spawnAndWait() catch |err| break :err err;
+ break :term rsp_child.spawnAndWait(io) catch |err| break :err err;
} else {
rsp_child.stdin_behavior = .Ignore;
rsp_child.stdout_behavior = .Ignore;
rsp_child.stderr_behavior = .Pipe;
- rsp_child.spawn() catch |err| break :err err;
+ rsp_child.spawn(io) catch |err| break :err err;
var stderr_reader = rsp_child.stderr.?.readerStreaming(io, &.{});
- stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
- break :term rsp_child.wait() catch |err| break :err err;
+ stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited);
+ break :term rsp_child.wait(io) catch |err| break :err err;
}
},
else => first_err,
@@ -1692,6 +1695,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
}
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
const allocPrint = std.fmt.allocPrint;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 2c4ffd6632..b747b3de56 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -219,10 +219,12 @@ pub fn createEmpty(
};
errdefer self.base.destroy();
- self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ const io = comp.io;
+
+ self.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
.truncate = true,
.read = true,
- .mode = link.File.determineMode(output_mode, link_mode),
+ .permissions = link.File.determinePermissions(output_mode, link_mode),
});
// Append null file
@@ -267,14 +269,16 @@ pub fn open(
}
pub fn deinit(self: *MachO) void {
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
if (self.d_sym) |*d_sym| {
d_sym.deinit();
}
for (self.file_handles.items) |handle| {
- handle.close();
+ handle.close(io);
}
self.file_handles.deinit(gpa);
@@ -343,7 +347,8 @@ pub fn flush(
const comp = self.base.comp;
const gpa = comp.gpa;
- const diags = &self.base.comp.link_diags;
+ const io = comp.io;
+ const diags = &comp.link_diags;
const sub_prog_node = prog_node.start("MachO Flush", 0);
defer sub_prog_node.end();
@@ -376,26 +381,26 @@ pub fn flush(
// in this set.
try positionals.ensureUnusedCapacity(comp.c_object_table.keys().len);
for (comp.c_object_table.keys()) |key| {
- positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path));
+ positionals.appendAssumeCapacity(try link.openObjectInput(io, diags, key.status.success.object_path));
}
- if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
+ if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (comp.config.any_sanitize_thread) {
- try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path));
+ try positionals.append(try link.openObjectInput(io, diags, comp.tsan_lib.?.full_object_path));
}
if (comp.config.any_fuzz) {
- try positionals.append(try link.openArchiveInput(diags, comp.fuzzer_lib.?.full_object_path, false, false));
+ try positionals.append(try link.openArchiveInput(io, diags, comp.fuzzer_lib.?.full_object_path, false, false));
}
if (comp.ubsan_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
+ self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.ubsan_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
+ self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
@@ -430,7 +435,7 @@ pub fn flush(
if (comp.config.link_libc and is_exe_or_dyn_lib) {
if (comp.zigc_static_lib) |zigc| {
const path = zigc.full_object_path;
- self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
+ self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
}
@@ -453,12 +458,12 @@ pub fn flush(
for (system_libs.items) |lib| {
switch (Compilation.classifyFileExt(lib.path.sub_path)) {
.shared_library => {
- const dso_input = try link.openDsoInput(diags, lib.path, lib.needed, lib.weak, lib.reexport);
+ const dso_input = try link.openDsoInput(io, diags, lib.path, lib.needed, lib.weak, lib.reexport);
self.classifyInputFile(dso_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
.static_library => {
- const archive_input = try link.openArchiveInput(diags, lib.path, lib.must_link, lib.hidden);
+ const archive_input = try link.openArchiveInput(io, diags, lib.path, lib.must_link, lib.hidden);
self.classifyInputFile(archive_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
@@ -469,11 +474,11 @@ pub fn flush(
// Finally, link against compiler_rt.
if (comp.compiler_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
+ self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.compiler_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
+ self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
@@ -564,7 +569,7 @@ pub fn flush(
self.writeLinkeditSectionsToFile() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
- else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}),
+ else => |e| return diags.fail("failed to write linkedit sections to file: {t}", .{e}),
};
var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: {
@@ -575,8 +580,8 @@ pub fn flush(
// where the code signature goes into.
var codesig = CodeSignature.init(self.getPageSize());
codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path);
- if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err|
- return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) });
+ if (self.entitlements) |path| codesig.addEntitlements(gpa, io, path) catch |err|
+ return diags.fail("failed to add entitlements from {s}: {t}", .{ path, err });
try self.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
@@ -612,15 +617,17 @@ pub fn flush(
else => |e| return diags.fail("failed to write code signature: {s}", .{@errorName(e)}),
};
const emit = self.base.emit;
- invalidateKernelCache(emit.root_dir.handle, emit.sub_path) catch |err| switch (err) {
- else => |e| return diags.fail("failed to invalidate kernel cache: {s}", .{@errorName(e)}),
+ invalidateKernelCache(io, emit.root_dir.handle, emit.sub_path) catch |err| switch (err) {
+ else => |e| return diags.fail("failed to invalidate kernel cache: {t}", .{e}),
};
}
}
/// --verbose-link output
fn dumpArgv(self: *MachO, comp: *Compilation) !void {
- const gpa = self.base.comp.gpa;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@@ -815,7 +822,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
if (comp.ubsan_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena));
}
- Compilation.dump_argv(argv.items);
+ try Compilation.dumpArgv(io, argv.items);
}
/// TODO delete this, libsystem must be resolved when setting up the compilation pipeline
@@ -825,7 +832,8 @@ pub fn resolveLibSystem(
comp: *Compilation,
out_libs: anytype,
) !void {
- const diags = &self.base.comp.link_diags;
+ const io = comp.io;
+ const diags = &comp.link_diags;
var test_path = std.array_list.Managed(u8).init(arena);
var checked_paths = std.array_list.Managed([]const u8).init(arena);
@@ -834,16 +842,16 @@ pub fn resolveLibSystem(
if (self.sdk_layout) |sdk_layout| switch (sdk_layout) {
.sdk => {
const dir = try fs.path.join(arena, &.{ comp.sysroot.?, "usr", "lib" });
- if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, dir, "System")) break :success;
},
.vendored => {
const dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "darwin" });
- if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, dir, "System")) break :success;
},
};
for (self.lib_directories) |directory| {
- if (try accessLibPath(arena, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success;
}
diags.addMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{});
@@ -861,6 +869,9 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const tracy = trace(@src());
defer tracy.end();
+ const comp = self.base.comp;
+ const io = comp.io;
+
const path, const file = input.pathAndFile().?;
// TODO don't classify now, it's too late. The input file has already been classified
log.debug("classifying input file {f}", .{path});
@@ -871,7 +882,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const fat_arch: ?fat.Arch = try self.parseFatFile(file, path);
const offset = if (fat_arch) |fa| fa.offset else 0;
- if (readMachHeader(file, offset) catch null) |h| blk: {
+ if (readMachHeader(io, file, offset) catch null) |h| blk: {
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_OBJECT => try self.addObject(path, fh, offset),
@@ -880,7 +891,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
}
return;
}
- if (readArMagic(file, offset, &buffer) catch null) |ar_magic| blk: {
+ if (readArMagic(io, file, offset, &buffer) catch null) |ar_magic| blk: {
if (!mem.eql(u8, ar_magic, Archive.ARMAG)) break :blk;
try self.addArchive(input.archive, fh, fat_arch);
return;
@@ -888,12 +899,14 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
_ = try self.addTbd(.fromLinkInput(input), true, fh);
}
-fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch {
- const diags = &self.base.comp.link_diags;
- const fat_h = fat.readFatHeader(file) catch return null;
+fn parseFatFile(self: *MachO, file: Io.File, path: Path) !?fat.Arch {
+ const comp = self.base.comp;
+ const io = comp.io;
+ const diags = &comp.link_diags;
+ const fat_h = fat.readFatHeader(io, file) catch return null;
if (fat_h.magic != macho.FAT_MAGIC and fat_h.magic != macho.FAT_MAGIC_64) return null;
var fat_archs_buffer: [2]fat.Arch = undefined;
- const fat_archs = try fat.parseArchs(file, fat_h, &fat_archs_buffer);
+ const fat_archs = try fat.parseArchs(io, file, fat_h, &fat_archs_buffer);
const cpu_arch = self.getTarget().cpu.arch;
for (fat_archs) |arch| {
if (arch.tag == cpu_arch) return arch;
@@ -901,16 +914,16 @@ fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch {
return diags.failParse(path, "missing arch in universal file: expected {s}", .{@tagName(cpu_arch)});
}
-pub fn readMachHeader(file: std.fs.File, offset: usize) !macho.mach_header_64 {
+pub fn readMachHeader(io: Io, file: Io.File, offset: usize) !macho.mach_header_64 {
var buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
- const nread = try file.preadAll(&buffer, offset);
+ const nread = try file.readPositionalAll(io, &buffer, offset);
if (nread != buffer.len) return error.InputOutput;
const hdr = @as(*align(1) const macho.mach_header_64, @ptrCast(&buffer)).*;
return hdr;
}
-pub fn readArMagic(file: std.fs.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 {
- const nread = try file.preadAll(buffer, offset);
+pub fn readArMagic(io: Io, file: Io.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 {
+ const nread = try file.readPositionalAll(io, buffer, offset);
if (nread != buffer.len) return error.InputOutput;
return buffer[0..Archive.SARMAG];
}
@@ -921,6 +934,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u
const comp = self.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
const abs_path = try std.fs.path.resolvePosix(gpa, &.{
comp.dirs.cwd,
@@ -930,7 +944,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u
errdefer gpa.free(abs_path);
const file = self.getFileHandle(handle_index);
- const stat = try file.stat();
+ const stat = try file.stat(io);
const mtime = stat.mtime.toSeconds();
const index: File.Index = @intCast(try self.files.addOne(gpa));
self.files.set(index, .{ .object = .{
@@ -1069,6 +1083,7 @@ fn isHoisted(self: *MachO, install_name: []const u8) bool {
/// TODO delete this, libraries must be instead resolved when instantiating the compilation pipeline
fn accessLibPath(
arena: Allocator,
+ io: Io,
test_path: *std.array_list.Managed(u8),
checked_paths: *std.array_list.Managed([]const u8),
search_dir: []const u8,
@@ -1080,7 +1095,7 @@ fn accessLibPath(
test_path.clearRetainingCapacity();
try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
try checked_paths.append(try arena.dupe(u8, test_path.items));
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -1092,6 +1107,7 @@ fn accessLibPath(
fn accessFrameworkPath(
arena: Allocator,
+ io: Io,
test_path: *std.array_list.Managed(u8),
checked_paths: *std.array_list.Managed([]const u8),
search_dir: []const u8,
@@ -1108,7 +1124,7 @@ fn accessFrameworkPath(
ext,
});
try checked_paths.append(try arena.dupe(u8, test_path.items));
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -1124,7 +1140,9 @@ fn parseDependentDylibs(self: *MachO) !void {
if (self.dylibs.items.len == 0) return;
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const framework_dirs = self.framework_dirs;
// TODO delete this, directories must instead be resolved by the frontend
@@ -1165,14 +1183,14 @@ fn parseDependentDylibs(self: *MachO) !void {
// Framework
for (framework_dirs) |dir| {
test_path.clearRetainingCapacity();
- if (try accessFrameworkPath(arena, &test_path, &checked_paths, dir, stem)) break :full_path test_path.items;
+ if (try accessFrameworkPath(arena, io, &test_path, &checked_paths, dir, stem)) break :full_path test_path.items;
}
// Library
const lib_name = eatPrefix(stem, "lib") orelse stem;
for (lib_directories) |lib_directory| {
test_path.clearRetainingCapacity();
- if (try accessLibPath(arena, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items;
}
}
@@ -1181,13 +1199,13 @@ fn parseDependentDylibs(self: *MachO) !void {
const path = if (existing_ext.len > 0) id.name[0 .. id.name.len - existing_ext.len] else id.name;
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
test_path.clearRetainingCapacity();
- if (self.base.comp.sysroot) |root| {
+ if (comp.sysroot) |root| {
try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
} else {
try test_path.print("{s}{s}", .{ path, ext });
}
try checked_paths.append(try arena.dupe(u8, test_path.items));
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -1202,7 +1220,8 @@ fn parseDependentDylibs(self: *MachO) !void {
const rel_path = try fs.path.join(arena, &.{ prefix, path });
try checked_paths.append(rel_path);
var buffer: [fs.max_path_bytes]u8 = undefined;
- const full_path = fs.realpath(rel_path, &buffer) catch continue;
+ // TODO don't use realpath
+ const full_path = buffer[0 .. Io.Dir.realPathFileAbsolute(io, rel_path, &buffer) catch continue];
break :full_path try arena.dupe(u8, full_path);
}
} else if (eatPrefix(id.name, "@loader_path/")) |_| {
@@ -1215,8 +1234,9 @@ fn parseDependentDylibs(self: *MachO) !void {
try checked_paths.append(try arena.dupe(u8, id.name));
var buffer: [fs.max_path_bytes]u8 = undefined;
- if (fs.realpath(id.name, &buffer)) |full_path| {
- break :full_path try arena.dupe(u8, full_path);
+ // TODO don't use realpath
+ if (Io.Dir.realPathFileAbsolute(io, id.name, &buffer)) |full_path_n| {
+ break :full_path try arena.dupe(u8, buffer[0..full_path_n]);
} else |_| {
try self.reportMissingDependencyError(
self.getFile(dylib_index).?.dylib.getUmbrella(self).index,
@@ -1233,12 +1253,12 @@ fn parseDependentDylibs(self: *MachO) !void {
.path = Path.initCwd(full_path),
.weak = is_weak,
};
- const file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{});
+ const file = try lib.path.root_dir.handle.openFile(io, lib.path.sub_path, .{});
const fh = try self.addFileHandle(file);
const fat_arch = try self.parseFatFile(file, lib.path);
const offset = if (fat_arch) |fa| fa.offset else 0;
const file_index = file_index: {
- if (readMachHeader(file, offset) catch null) |h| blk: {
+ if (readMachHeader(io, file, offset) catch null) |h| blk: {
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_DYLIB => break :file_index try self.addDylib(lib, false, fh, offset),
@@ -3147,7 +3167,9 @@ fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.base.file.?.setEndPos(end);
+ const comp = self.base.comp;
+ const io = comp.io;
+ if (at_end) try self.base.file.?.setLength(io, end);
return null;
}
@@ -3232,21 +3254,36 @@ pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32)
}
pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
- const file = self.base.file.?;
- const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
- if (amt != size) return error.InputOutput;
+ return self.base.copyRangeAll(old_offset, new_offset, size);
}
-/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy.
+/// Like copyRangeAll but also ensures the source region is zeroed out after copy.
/// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader.
fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
- const gpa = self.base.comp.gpa;
- try self.copyRangeAll(old_offset, new_offset, size);
+ const comp = self.base.comp;
+ const io = comp.io;
+ const file = self.base.file.?;
+ var write_buffer: [2048]u8 = undefined;
+ var file_reader = file.reader(io, &.{});
+ file_reader.pos = old_offset;
+ var file_writer = file.writer(io, &write_buffer);
+ file_writer.pos = new_offset;
const size_u = math.cast(usize, size) orelse return error.Overflow;
- const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here.
- defer gpa.free(zeroes);
- @memset(zeroes, 0);
- try self.base.file.?.pwriteAll(zeroes, old_offset);
+ const n = file_writer.interface.sendFileAll(&file_reader, .limited(size_u)) catch |err| switch (err) {
+ error.ReadFailed => return file_reader.err.?,
+ error.WriteFailed => return file_writer.err.?,
+ };
+ assert(n == size_u);
+ file_writer.seekTo(old_offset) catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ else => |e| return e,
+ };
+ file_writer.interface.splatByteAll(0, size_u) catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ };
+ file_writer.interface.flush() catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ };
}
const InitMetadataOptions = struct {
@@ -3257,8 +3294,10 @@ const InitMetadataOptions = struct {
};
pub fn closeDebugInfo(self: *MachO) bool {
+ const comp = self.base.comp;
+ const io = comp.io;
const d_sym = &(self.d_sym orelse return false);
- d_sym.file.?.close();
+ d_sym.file.?.close(io);
d_sym.file = null;
return true;
}
@@ -3269,7 +3308,9 @@ pub fn reopenDebugInfo(self: *MachO) !void {
assert(!self.base.comp.config.use_llvm);
assert(self.base.comp.config.debug_format == .dwarf);
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const sep = fs.path.sep_str;
const d_sym_path = try std.fmt.allocPrint(
gpa,
@@ -3278,10 +3319,10 @@ pub fn reopenDebugInfo(self: *MachO) !void {
);
defer gpa.free(d_sym_path);
- var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{});
- defer d_sym_bundle.close();
+ var d_sym_bundle = try self.base.emit.root_dir.handle.createDirPathOpen(io, d_sym_path, .{});
+ defer d_sym_bundle.close(io);
- self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{
+ self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{
.truncate = false,
.read = true,
});
@@ -3289,6 +3330,10 @@ pub fn reopenDebugInfo(self: *MachO) !void {
// TODO: move to ZigObject
fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
if (!self.base.isRelocatable()) {
const base_vmaddr = blk: {
const pagezero_size = self.pagezero_size orelse default_pagezero_size;
@@ -3343,7 +3388,11 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
if (options.zo.dwarf) |*dwarf| {
// Create dSYM bundle.
log.debug("creating {s}.dSYM bundle", .{options.emit.sub_path});
- self.d_sym = .{ .allocator = self.base.comp.gpa, .file = null };
+ self.d_sym = .{
+ .io = io,
+ .allocator = gpa,
+ .file = null,
+ };
try self.reopenDebugInfo();
try self.d_sym.?.initMetadata(self);
try dwarf.initMetadata();
@@ -3463,6 +3512,9 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
const seg_id = self.sections.items(.segment_id)[sect_index];
const seg = &self.segments.items[seg_id];
+ const comp = self.base.comp;
+ const io = comp.io;
+
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (needed_size > allocated_size) {
@@ -3484,7 +3536,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
sect.offset = @intCast(new_offset);
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
+ try self.base.file.?.setLength(io, sect.offset + needed_size);
}
seg.filesize = needed_size;
}
@@ -3506,6 +3558,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
}
fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
+ const comp = self.base.comp;
+ const io = comp.io;
const sect = &self.sections.items(.header)[sect_index];
if (!sect.isZerofill()) {
@@ -3533,7 +3587,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
+ try self.base.file.?.setLength(io, sect.offset + needed_size);
}
}
sect.size = needed_size;
@@ -3567,11 +3621,11 @@ pub fn getTarget(self: *const MachO) *const std.Target {
/// into a new inode, remove the original file, and rename the copy to match
/// the original file. This is super messy, but there doesn't seem any other
/// way to please the XNU.
-pub fn invalidateKernelCache(dir: fs.Dir, sub_path: []const u8) !void {
+pub fn invalidateKernelCache(io: Io, dir: Io.Dir, sub_path: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
if (builtin.target.os.tag.isDarwin() and builtin.target.cpu.arch == .aarch64) {
- try dir.copyFile(sub_path, dir, sub_path, .{});
+ try dir.copyFile(sub_path, dir, sub_path, io, .{});
}
}
@@ -3762,7 +3816,7 @@ pub fn getInternalObject(self: *MachO) ?*InternalObject {
return self.getFile(index).?.internal;
}
-pub fn addFileHandle(self: *MachO, file: fs.File) !File.HandleIndex {
+pub fn addFileHandle(self: *MachO, file: Io.File) !File.HandleIndex {
const gpa = self.base.comp.gpa;
const index: File.HandleIndex = @intCast(self.file_handles.items.len);
const fh = try self.file_handles.addOne(gpa);
@@ -4333,11 +4387,13 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
+ const io = comp.io;
+
const sdk_dir = switch (sdk_layout) {
.sdk => comp.sysroot.?,
.vendored => fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "libc", "darwin" }) catch return null,
};
- if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| {
+ if (readSdkVersionFromSettings(arena, io, sdk_dir)) |ver| {
return parseSdkVersion(ver);
} else |_| {
// Read from settings should always succeed when vendored.
@@ -4360,9 +4416,9 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi
// Official Apple SDKs ship with a `SDKSettings.json` located at the top of SDK fs layout.
// Use property `MinimalDisplayName` to determine version.
// The file/property is also available with vendored libc.
-fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 {
+fn readSdkVersionFromSettings(arena: Allocator, io: Io, dir: []const u8) ![]const u8 {
const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" });
- const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16)));
+ const contents = try Io.Dir.cwd().readFileAlloc(io, sdk_path, arena, .limited(std.math.maxInt(u16)));
const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{});
if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string;
return error.SdkVersionFailure;
@@ -5324,18 +5380,18 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool {
pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = macho_file.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
- return diags.fail("failed to write: {s}", .{@errorName(err)});
- };
+ macho_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err|
+ return diags.fail("failed to write: {t}", .{err});
}
-pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void {
+pub fn setLength(macho_file: *MachO, length: u64) error{LinkFailure}!void {
const comp = macho_file.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- macho_file.base.file.?.setEndPos(length) catch |err| {
- return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
- };
+ macho_file.base.file.?.setLength(io, length) catch |err|
+ return diags.fail("failed to set file end pos: {t}", .{err});
}
pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T {
@@ -5367,10 +5423,11 @@ const max_distance = (1 << (jump_bits - 1));
const max_allowed_distance = max_distance - 0x500_000;
const MachO = @This();
-
-const std = @import("std");
const build_options = @import("build_options");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
@@ -5380,6 +5437,11 @@ const math = std.math;
const mem = std.mem;
const meta = std.meta;
const Writer = std.Io.Writer;
+const AtomicBool = std.atomic.Value(bool);
+const Cache = std.Build.Cache;
+const Hash = std.hash.Wyhash;
+const Md5 = std.crypto.hash.Md5;
+const Allocator = std.mem.Allocator;
const aarch64 = codegen.aarch64.encoding;
const bind = @import("MachO/dyld_info/bind.zig");
@@ -5397,11 +5459,8 @@ const trace = @import("../tracy.zig").trace;
const synthetic = @import("MachO/synthetic.zig");
const Alignment = Atom.Alignment;
-const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
-const AtomicBool = std.atomic.Value(bool);
const Bind = bind.Bind;
-const Cache = std.Build.Cache;
const CodeSignature = @import("MachO/CodeSignature.zig");
const Compilation = @import("../Compilation.zig");
const DataInCode = synthetic.DataInCode;
@@ -5411,14 +5470,12 @@ const ExportTrie = @import("MachO/dyld_info/Trie.zig");
const Path = Cache.Path;
const File = @import("MachO/file.zig").File;
const GotSection = synthetic.GotSection;
-const Hash = std.hash.Wyhash;
const Indsymtab = synthetic.Indsymtab;
const InternalObject = @import("MachO/InternalObject.zig");
const ObjcStubsSection = synthetic.ObjcStubsSection;
const Object = @import("MachO/Object.zig");
const LazyBind = bind.LazyBind;
const LaSymbolPtrSection = synthetic.LaSymbolPtrSection;
-const Md5 = std.crypto.hash.Md5;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Rebase = @import("MachO/dyld_info/Rebase.zig");
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index d1962412c4..54c00e33ee 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -6,6 +6,7 @@ pub fn deinit(self: *Archive, allocator: Allocator) void {
pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File.HandleIndex, fat_arch: ?fat.Arch) !void {
const comp = macho_file.base.comp;
+ const io = comp.io;
const gpa = comp.gpa;
const diags = &comp.link_diags;
@@ -14,7 +15,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
const handle = macho_file.getFileHandle(handle_index);
const offset = if (fat_arch) |ar| ar.offset else 0;
- const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat()).size;
+ const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat(io)).size;
var pos: usize = offset + SARMAG;
while (true) {
@@ -23,7 +24,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
var hdr_buffer: [@sizeOf(ar_hdr)]u8 = undefined;
{
- const amt = try handle.preadAll(&hdr_buffer, pos);
+ const amt = try handle.readPositionalAll(io, &hdr_buffer, pos);
if (amt != @sizeOf(ar_hdr)) return error.InputOutput;
}
const hdr = @as(*align(1) const ar_hdr, @ptrCast(&hdr_buffer)).*;
@@ -41,7 +42,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
if (try hdr.nameLength()) |len| {
hdr_size -= len;
const buf = try arena.allocator().alloc(u8, len);
- const amt = try handle.preadAll(buf, pos);
+ const amt = try handle.readPositionalAll(io, buf, pos);
if (amt != len) return error.InputOutput;
pos += len;
const actual_len = mem.indexOfScalar(u8, buf, @as(u8, 0)) orelse len;
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 5bded3b9e3..0955c823b8 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -1,20 +1,28 @@
const CodeSignature = @This();
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
const macho = std.macho;
const mem = std.mem;
const testing = std.testing;
+const Sha256 = std.crypto.hash.sha2.Sha256;
+const Allocator = std.mem.Allocator;
+
const trace = @import("../../tracy.zig").trace;
-const Allocator = mem.Allocator;
-const Hasher = @import("hasher.zig").ParallelHasher;
+const ParallelHasher = @import("hasher.zig").ParallelHasher;
const MachO = @import("../MachO.zig");
-const Sha256 = std.crypto.hash.sha2.Sha256;
const hash_size = Sha256.digest_length;
+page_size: u16,
+code_directory: CodeDirectory,
+requirements: ?Requirements = null,
+entitlements: ?Entitlements = null,
+signature: ?Signature = null,
+
const Blob = union(enum) {
code_directory: *CodeDirectory,
requirements: *Requirements,
@@ -218,12 +226,6 @@ const Signature = struct {
}
};
-page_size: u16,
-code_directory: CodeDirectory,
-requirements: ?Requirements = null,
-entitlements: ?Entitlements = null,
-signature: ?Signature = null,
-
pub fn init(page_size: u16) CodeSignature {
return .{
.page_size = page_size,
@@ -244,13 +246,13 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void {
}
}
-pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void {
- const inner = try fs.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32)));
+pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, io: Io, path: []const u8) !void {
+ const inner = try Io.Dir.cwd().readFileAlloc(io, path, allocator, .limited(std.math.maxInt(u32)));
self.entitlements = .{ .inner = inner };
}
pub const WriteOpts = struct {
- file: fs.File,
+ file: Io.File,
exec_seg_base: u64,
exec_seg_limit: u64,
file_size: u32,
@@ -266,7 +268,9 @@ pub fn writeAdhocSignature(
const tracy = trace(@src());
defer tracy.end();
- const allocator = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
var header: macho.SuperBlob = .{
.magic = macho.CSMAGIC_EMBEDDED_SIGNATURE,
@@ -274,7 +278,7 @@ pub fn writeAdhocSignature(
.count = 0,
};
- var blobs = std.array_list.Managed(Blob).init(allocator);
+ var blobs = std.array_list.Managed(Blob).init(gpa);
defer blobs.deinit();
self.code_directory.inner.execSegBase = opts.exec_seg_base;
@@ -284,13 +288,12 @@ pub fn writeAdhocSignature(
const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size));
- try self.code_directory.code_slots.ensureTotalCapacityPrecise(allocator, total_pages);
+ try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
self.code_directory.code_slots.items.len = total_pages;
self.code_directory.inner.nCodeSlots = total_pages;
// Calculate hash for each page (in file) and write it to the buffer
- var hasher = Hasher(Sha256){ .allocator = allocator, .io = macho_file.base.comp.io };
- try hasher.hash(opts.file, self.code_directory.code_slots.items, .{
+ try ParallelHasher(Sha256).hash(gpa, io, opts.file, self.code_directory.code_slots.items, .{
.chunk_size = self.page_size,
.max_file_size = opts.file_size,
});
@@ -302,7 +305,7 @@ pub fn writeAdhocSignature(
var hash: [hash_size]u8 = undefined;
if (self.requirements) |*req| {
- var a: std.Io.Writer.Allocating = .init(allocator);
+ var a: std.Io.Writer.Allocating = .init(gpa);
defer a.deinit();
try req.write(&a.writer);
Sha256.hash(a.written(), &hash, .{});
@@ -314,7 +317,7 @@ pub fn writeAdhocSignature(
}
if (self.entitlements) |*ents| {
- var a: std.Io.Writer.Allocating = .init(allocator);
+ var a: std.Io.Writer.Allocating = .init(gpa);
defer a.deinit();
try ents.write(&a.writer);
Sha256.hash(a.written(), &hash, .{});
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 5d7b9b88c3..3e723bd9d7 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -1,5 +1,28 @@
+const DebugSymbols = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const fs = std.fs;
+const log = std.log.scoped(.link_dsym);
+const macho = std.macho;
+const makeStaticString = MachO.makeStaticString;
+const math = std.math;
+const mem = std.mem;
+const Writer = std.Io.Writer;
+const Allocator = std.mem.Allocator;
+
+const link = @import("../../link.zig");
+const MachO = @import("../MachO.zig");
+const StringTable = @import("../StringTable.zig");
+const Type = @import("../../Type.zig");
+const trace = @import("../../tracy.zig").trace;
+const load_commands = @import("load_commands.zig");
+const padToIdeal = MachO.padToIdeal;
+
+io: Io,
allocator: Allocator,
-file: ?fs.File,
+file: ?Io.File,
symtab_cmd: macho.symtab_command = .{},
uuid_cmd: macho.uuid_command = .{ .uuid = [_]u8{0} ** 16 },
@@ -102,6 +125,7 @@ pub fn growSection(
requires_file_copy: bool,
macho_file: *MachO,
) !void {
+ const io = self.io;
const sect = self.getSectionPtr(sect_index);
const allocated_size = self.allocatedSize(sect.offset);
@@ -111,25 +135,17 @@ pub fn growSection(
const new_offset = try self.findFreeSpace(needed_size, 1);
log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{
- sect.sectName(),
- existing_size,
- sect.offset,
- new_offset,
+ sect.sectName(), existing_size, sect.offset, new_offset,
});
if (requires_file_copy) {
- const amt = try self.file.?.copyRangeAll(
- sect.offset,
- self.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ const file = self.file.?;
+ try link.File.copyRangeAll2(io, file, file, sect.offset, new_offset, existing_size);
}
sect.offset = @intCast(new_offset);
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.file.?.setEndPos(sect.offset + needed_size);
+ try self.file.?.setLength(io, sect.offset + needed_size);
}
sect.size = needed_size;
@@ -153,6 +169,7 @@ pub fn markDirty(self: *DebugSymbols, sect_index: u8, macho_file: *MachO) void {
}
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 {
+ const io = self.io;
var at_end = true;
const end = start + padToIdeal(size);
@@ -165,7 +182,7 @@ fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.file.?.setEndPos(end);
+ if (at_end) try self.file.?.setLength(io, end);
return null;
}
@@ -179,6 +196,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) !u64
}
pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
+ const io = self.io;
const zo = macho_file.getZigObject().?;
for (self.relocs.items) |*reloc| {
const sym = zo.symbols.items[reloc.target];
@@ -190,12 +208,9 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
const sect = &self.sections.items[self.debug_info_section_index.?];
const file_offset = sect.offset + reloc.offset;
log.debug("resolving relocation: {d}@{x} ('{s}') at offset {x}", .{
- reloc.target,
- addr,
- sym_name,
- file_offset,
+ reloc.target, addr, sym_name, file_offset,
});
- try self.file.?.pwriteAll(mem.asBytes(&addr), file_offset);
+ try self.file.?.writePositionalAll(io, mem.asBytes(&addr), file_offset);
}
self.finalizeDwarfSegment(macho_file);
@@ -208,7 +223,8 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
pub fn deinit(self: *DebugSymbols) void {
const gpa = self.allocator;
- if (self.file) |file| file.close();
+ const io = self.io;
+ if (self.file) |file| file.close(io);
self.segments.deinit(gpa);
self.sections.deinit(gpa);
self.relocs.deinit(gpa);
@@ -268,6 +284,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
}
fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, usize } {
+ const io = self.io;
const gpa = self.allocator;
const needed_size = load_commands.calcLoadCommandsSizeDsym(macho_file, self);
const buffer = try gpa.alloc(u8, needed_size);
@@ -319,12 +336,13 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
assert(writer.end == needed_size);
- try self.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
+ try self.file.?.writePositionalAll(io, buffer, @sizeOf(macho.mach_header_64));
return .{ ncmds, buffer.len };
}
fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
+ const io = self.io;
var header: macho.mach_header_64 = .{};
header.filetype = macho.MH_DSYM;
@@ -345,7 +363,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds
log.debug("writing Mach-O header {}", .{header});
- try self.file.?.pwriteAll(mem.asBytes(&header), 0);
+ try self.file.?.writePositionalAll(io, mem.asBytes(&header), 0);
}
fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
@@ -380,6 +398,8 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void {
pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 {
const tracy = trace(@src());
defer tracy.end();
+
+ const io = self.io;
const gpa = self.allocator;
const cmd = &self.symtab_cmd;
cmd.nsyms = macho_file.symtab_cmd.nsyms;
@@ -403,15 +423,16 @@ pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 {
internal.writeSymtab(macho_file, self);
}
- try self.file.?.pwriteAll(@ptrCast(self.symtab.items), cmd.symoff);
+ try self.file.?.writePositionalAll(io, @ptrCast(self.symtab.items), cmd.symoff);
return off + cmd.nsyms * @sizeOf(macho.nlist_64);
}
pub fn writeStrtab(self: *DebugSymbols, off: u32) !u32 {
+ const io = self.io;
const cmd = &self.symtab_cmd;
cmd.stroff = off;
- try self.file.?.pwriteAll(self.strtab.items, cmd.stroff);
+ try self.file.?.writePositionalAll(io, self.strtab.items, cmd.stroff);
return off + cmd.strsize;
}
@@ -443,25 +464,3 @@ pub fn getSection(self: DebugSymbols, sect: u8) macho.section_64 {
assert(sect < self.sections.items.len);
return self.sections.items[sect];
}
-
-const DebugSymbols = @This();
-
-const std = @import("std");
-const build_options = @import("build_options");
-const assert = std.debug.assert;
-const fs = std.fs;
-const link = @import("../../link.zig");
-const load_commands = @import("load_commands.zig");
-const log = std.log.scoped(.link_dsym);
-const macho = std.macho;
-const makeStaticString = MachO.makeStaticString;
-const math = std.math;
-const mem = std.mem;
-const padToIdeal = MachO.padToIdeal;
-const trace = @import("../../tracy.zig").trace;
-const Writer = std.Io.Writer;
-
-const Allocator = mem.Allocator;
-const MachO = @import("../MachO.zig");
-const StringTable = @import("../StringTable.zig");
-const Type = @import("../../Type.zig");
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 69c64b6717..638630b608 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -57,7 +57,9 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
const offset = self.offset;
@@ -65,7 +67,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
- const amt = try file.preadAll(&header_buffer, offset);
+ const amt = try file.readPositionalAll(io, &header_buffer, offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
const header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@@ -86,7 +88,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, header.sizeofcmds);
defer gpa.free(lc_buffer);
{
- const amt = try file.preadAll(lc_buffer, offset + @sizeOf(macho.mach_header_64));
+ const amt = try file.readPositionalAll(io, lc_buffer, offset + @sizeOf(macho.mach_header_64));
if (amt != lc_buffer.len) return error.InputOutput;
}
@@ -103,7 +105,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const dyld_cmd = cmd.cast(macho.dyld_info_command).?;
const data = try gpa.alloc(u8, dyld_cmd.export_size);
defer gpa.free(data);
- const amt = try file.preadAll(data, dyld_cmd.export_off + offset);
+ const amt = try file.readPositionalAll(io, data, dyld_cmd.export_off + offset);
if (amt != data.len) return error.InputOutput;
try self.parseTrie(data, macho_file);
},
@@ -111,7 +113,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const ld_cmd = cmd.cast(macho.linkedit_data_command).?;
const data = try gpa.alloc(u8, ld_cmd.datasize);
defer gpa.free(data);
- const amt = try file.preadAll(data, ld_cmd.dataoff + offset);
+ const amt = try file.readPositionalAll(io, data, ld_cmd.dataoff + offset);
if (amt != data.len) return error.InputOutput;
try self.parseTrie(data, macho_file);
},
@@ -238,13 +240,15 @@ fn parseTbd(self: *Dylib, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
log.debug("parsing dylib from stub: {f}", .{self.path});
const file = macho_file.getFileHandle(self.file_handle);
- var lib_stub = LibStub.loadFromFile(gpa, file) catch |err| {
- try macho_file.reportParseError2(self.index, "failed to parse TBD file: {s}", .{@errorName(err)});
+ var lib_stub = LibStub.loadFromFile(gpa, io, file) catch |err| {
+ try macho_file.reportParseError2(self.index, "failed to parse TBD file: {t}", .{err});
return error.MalformedTbd;
};
defer lib_stub.deinit();
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 5fc77fe763..b9def4568d 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -1,3 +1,30 @@
+const Object = @This();
+
+const trace = @import("../../tracy.zig").trace;
+const Archive = @import("Archive.zig");
+const Atom = @import("Atom.zig");
+const Dwarf = @import("Dwarf.zig");
+const File = @import("file.zig").File;
+const MachO = @import("../MachO.zig");
+const Relocation = @import("Relocation.zig");
+const Symbol = @import("Symbol.zig");
+const UnwindInfo = @import("UnwindInfo.zig");
+
+const std = @import("std");
+const Io = std.Io;
+const Writer = std.Io.Writer;
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+const macho = std.macho;
+const LoadCommandIterator = macho.LoadCommandIterator;
+const math = std.math;
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+
+const eh_frame = @import("eh_frame.zig");
+const Cie = eh_frame.Cie;
+const Fde = eh_frame.Fde;
+
/// Non-zero for fat object files or archives
offset: u64,
/// If `in_archive` is not `null`, this is the basename of the object in the archive. Otherwise,
@@ -75,7 +102,9 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
log.debug("parsing {f}", .{self.fmtPath()});
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const handle = macho_file.getFileHandle(self.file_handle);
const cpu_arch = macho_file.getTarget().cpu.arch;
@@ -84,7 +113,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
- const amt = try handle.preadAll(&header_buffer, self.offset);
+ const amt = try handle.readPositionalAll(io, &header_buffer, self.offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@@ -105,7 +134,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds);
defer gpa.free(lc_buffer);
{
- const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
+ const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
if (amt != self.header.?.sizeofcmds) return error.InputOutput;
}
@@ -129,14 +158,14 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const cmd = lc.cast(macho.symtab_command).?;
try self.strtab.resize(gpa, cmd.strsize);
{
- const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset);
+ const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset);
if (amt != self.strtab.items.len) return error.InputOutput;
}
const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64));
defer gpa.free(symtab_buffer);
{
- const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset);
+ const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset);
if (amt != symtab_buffer.len) return error.InputOutput;
}
const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms];
@@ -154,7 +183,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const buffer = try gpa.alloc(u8, cmd.datasize);
defer gpa.free(buffer);
{
- const amt = try handle.preadAll(buffer, self.offset + cmd.dataoff);
+ const amt = try handle.readPositionalAll(io, buffer, self.offset + cmd.dataoff);
if (amt != buffer.len) return error.InputOutput;
}
const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry));
@@ -440,12 +469,14 @@ fn initCstringLiterals(self: *Object, allocator: Allocator, file: File.Handle, m
const tracy = trace(@src());
defer tracy.end();
+ const comp = macho_file.base.comp;
+ const io = comp.io;
const slice = self.sections.slice();
for (slice.items(.header), 0..) |sect, n_sect| {
if (!isCstringLiteral(sect)) continue;
- const data = try self.readSectionData(allocator, file, @intCast(n_sect));
+ const data = try self.readSectionData(allocator, io, file, @intCast(n_sect));
defer allocator.free(data);
var count: u32 = 0;
@@ -628,7 +659,9 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
var buffer = std.array_list.Managed(u8).init(gpa);
@@ -647,7 +680,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.subsections), 0..) |header, subs, n_sect| {
if (isCstringLiteral(header) or isFixedSizeLiteral(header)) {
- const data = try self.readSectionData(gpa, file, @intCast(n_sect));
+ const data = try self.readSectionData(gpa, io, file, @intCast(n_sect));
defer gpa.free(data);
for (subs.items) |sub| {
@@ -682,7 +715,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
buffer.resize(target_size) catch unreachable;
const gop = try sections_data.getOrPut(target.n_sect);
if (!gop.found_existing) {
- gop.value_ptr.* = try self.readSectionData(gpa, file, @intCast(target.n_sect));
+ gop.value_ptr.* = try self.readSectionData(gpa, io, file, @intCast(target.n_sect));
}
const data = gop.value_ptr.*;
const target_off = try macho_file.cast(usize, target.off);
@@ -1037,9 +1070,11 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi
const sect = slice.items(.header)[sect_id];
const relocs = slice.items(.relocs)[sect_id];
+ const comp = macho_file.base.comp;
+ const io = comp.io;
const size = try macho_file.cast(usize, sect.size);
try self.eh_frame_data.resize(allocator, size);
- const amt = try file.preadAll(self.eh_frame_data.items, sect.offset + self.offset);
+ const amt = try file.readPositionalAll(io, self.eh_frame_data.items, sect.offset + self.offset);
if (amt != self.eh_frame_data.items.len) return error.InputOutput;
// Check for non-personality relocs in FDEs and apply them
@@ -1138,8 +1173,10 @@ fn initUnwindRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fil
}
};
+ const comp = macho_file.base.comp;
+ const io = comp.io;
const header = self.sections.items(.header)[sect_id];
- const data = try self.readSectionData(allocator, file, sect_id);
+ const data = try self.readSectionData(allocator, io, file, sect_id);
defer allocator.free(data);
const nrecs = @divExact(data.len, @sizeOf(macho.compact_unwind_entry));
@@ -1348,7 +1385,9 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
var dwarf: Dwarf = .{};
@@ -1358,18 +1397,18 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void {
const n_sect: u8 = @intCast(index);
if (sect.attrs() & macho.S_ATTR_DEBUG == 0) continue;
if (mem.eql(u8, sect.sectName(), "__debug_info")) {
- dwarf.debug_info = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_info = try self.readSectionData(gpa, io, file, n_sect);
}
if (mem.eql(u8, sect.sectName(), "__debug_abbrev")) {
- dwarf.debug_abbrev = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_abbrev = try self.readSectionData(gpa, io, file, n_sect);
}
if (mem.eql(u8, sect.sectName(), "__debug_str")) {
- dwarf.debug_str = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_str = try self.readSectionData(gpa, io, file, n_sect);
}
// __debug_str_offs[ets] section is a new addition in DWARFv5 and is generally
// required in order to correctly parse strings.
if (mem.eql(u8, sect.sectName(), "__debug_str_offs")) {
- dwarf.debug_str_offsets = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_str_offsets = try self.readSectionData(gpa, io, file, n_sect);
}
}
@@ -1611,12 +1650,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const handle = macho_file.getFileHandle(self.file_handle);
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
- const amt = try handle.preadAll(&header_buffer, self.offset);
+ const amt = try handle.readPositionalAll(io, &header_buffer, self.offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@@ -1637,7 +1678,7 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds);
defer gpa.free(lc_buffer);
{
- const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
+ const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
if (amt != self.header.?.sizeofcmds) return error.InputOutput;
}
@@ -1647,14 +1688,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const cmd = lc.cast(macho.symtab_command).?;
try self.strtab.resize(gpa, cmd.strsize);
{
- const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset);
+ const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset);
if (amt != self.strtab.items.len) return error.InputOutput;
}
const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64));
defer gpa.free(symtab_buffer);
{
- const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset);
+ const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset);
if (amt != symtab_buffer.len) return error.InputOutput;
}
const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms];
@@ -1689,13 +1730,15 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, macho_file: *M
}
pub fn updateArSize(self: *Object, macho_file: *MachO) !void {
+ const comp = macho_file.base.comp;
+ const io = comp.io;
self.output_ar_state.size = if (self.in_archive) |ar| ar.size else size: {
const file = macho_file.getFileHandle(self.file_handle);
- break :size (try file.stat()).size;
+ break :size (try file.stat(io)).size;
};
}
-pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void {
+pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: *Writer) !void {
// Header
const size = try macho_file.cast(usize, self.output_ar_state.size);
const basename = std.fs.path.basename(self.path);
@@ -1703,10 +1746,12 @@ pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writ
// Data
const file = macho_file.getFileHandle(self.file_handle);
// TODO try using copyRangeAll
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
- const amt = try file.preadAll(data, self.offset);
+ const amt = try file.readPositionalAll(io, data, self.offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
@@ -1811,7 +1856,9 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const headers = self.sections.items(.header);
const sections_data = try gpa.alloc([]const u8, headers.len);
defer {
@@ -1827,7 +1874,7 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
if (header.isZerofill()) continue;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
- const amt = try file.preadAll(data, header.offset + self.offset);
+ const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
sections_data[n_sect] = data;
}
@@ -1850,7 +1897,9 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const headers = self.sections.items(.header);
const sections_data = try gpa.alloc([]const u8, headers.len);
defer {
@@ -1866,7 +1915,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
if (header.isZerofill()) continue;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
- const amt = try file.preadAll(data, header.offset + self.offset);
+ const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
sections_data[n_sect] = data;
}
@@ -2482,11 +2531,11 @@ pub fn getUnwindRecord(self: *Object, index: UnwindInfo.Record.Index) *UnwindInf
}
/// Caller owns the memory.
-pub fn readSectionData(self: Object, allocator: Allocator, file: File.Handle, n_sect: u8) ![]u8 {
+pub fn readSectionData(self: Object, allocator: Allocator, io: Io, file: File.Handle, n_sect: u8) ![]u8 {
const header = self.sections.items(.header)[n_sect];
const size = math.cast(usize, header.size) orelse return error.Overflow;
const data = try allocator.alloc(u8, size);
- const amt = try file.preadAll(data, header.offset + self.offset);
+ const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
errdefer allocator.free(data);
if (amt != data.len) return error.InputOutput;
return data;
@@ -2710,15 +2759,17 @@ const x86_64 = struct {
handle: File.Handle,
macho_file: *MachO,
) !void {
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info));
defer gpa.free(relocs_buffer);
- const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset);
+ const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset);
if (amt != relocs_buffer.len) return error.InputOutput;
const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc];
- const code = try self.readSectionData(gpa, handle, n_sect);
+ const code = try self.readSectionData(gpa, io, handle, n_sect);
defer gpa.free(code);
try out.ensureTotalCapacityPrecise(gpa, relocs.len);
@@ -2877,15 +2928,17 @@ const aarch64 = struct {
handle: File.Handle,
macho_file: *MachO,
) !void {
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info));
defer gpa.free(relocs_buffer);
- const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset);
+ const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset);
if (amt != relocs_buffer.len) return error.InputOutput;
const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc];
- const code = try self.readSectionData(gpa, handle, n_sect);
+ const code = try self.readSectionData(gpa, io, handle, n_sect);
defer gpa.free(code);
try out.ensureTotalCapacityPrecise(gpa, relocs.len);
@@ -3061,27 +3114,3 @@ const aarch64 = struct {
}
}
};
-
-const std = @import("std");
-const assert = std.debug.assert;
-const log = std.log.scoped(.link);
-const macho = std.macho;
-const math = std.math;
-const mem = std.mem;
-const Allocator = std.mem.Allocator;
-const Writer = std.Io.Writer;
-
-const eh_frame = @import("eh_frame.zig");
-const trace = @import("../../tracy.zig").trace;
-const Archive = @import("Archive.zig");
-const Atom = @import("Atom.zig");
-const Cie = eh_frame.Cie;
-const Dwarf = @import("Dwarf.zig");
-const Fde = eh_frame.Fde;
-const File = @import("file.zig").File;
-const LoadCommandIterator = macho.LoadCommandIterator;
-const MachO = @import("../MachO.zig");
-const Object = @This();
-const Relocation = @import("Relocation.zig");
-const Symbol = @import("Symbol.zig");
-const UnwindInfo = @import("UnwindInfo.zig");
diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig
index 5a4ea65790..49555c2746 100644
--- a/src/link/MachO/ZigObject.zig
+++ b/src/link/MachO/ZigObject.zig
@@ -171,6 +171,9 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8
const isec = atom.getInputSection(macho_file);
assert(!isec.isZerofill());
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+
switch (isec.type()) {
macho.S_THREAD_LOCAL_REGULAR => {
const tlv = self.tlv_initializers.get(atom.atom_index).?;
@@ -182,7 +185,7 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8
else => {
const sect = macho_file.sections.items(.header)[atom.out_n_sect];
const file_offset = sect.offset + atom.value;
- const amt = try macho_file.base.file.?.preadAll(buffer, file_offset);
+ const amt = try macho_file.base.file.?.readPositionalAll(io, buffer, file_offset);
if (amt != buffer.len) return error.InputOutput;
},
}
@@ -290,12 +293,14 @@ pub fn dedupLiterals(self: *ZigObject, lp: MachO.LiteralPool, macho_file: *MachO
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, macho_file: *MachO) !void {
- const diags = &macho_file.base.comp.link_diags;
+ const comp = macho_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const diags = &comp.link_diags;
// Size of the output object file is always the offset + size of the strtab
const size = macho_file.symtab_cmd.stroff + macho_file.symtab_cmd.strsize;
- const gpa = macho_file.base.comp.gpa;
try self.data.resize(gpa, size);
- const amt = macho_file.base.file.?.preadAll(self.data.items, 0) catch |err|
+ const amt = macho_file.base.file.?.readPositionalAll(io, self.data.items, 0) catch |err|
return diags.fail("failed to read output file: {s}", .{@errorName(err)});
if (amt != size)
return diags.fail("unexpected EOF reading from output file", .{});
@@ -945,6 +950,8 @@ fn updateNavCode(
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@@ -1012,8 +1019,8 @@ fn updateNavCode(
if (!sect.isZerofill()) {
const file_offset = sect.offset + atom.value;
- macho_file.base.file.?.pwriteAll(code, file_offset) catch |err|
- return macho_file.base.cgFail(nav_index, "failed to write output file: {s}", .{@errorName(err)});
+ macho_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
+ return macho_file.base.cgFail(nav_index, "failed to write output file: {t}", .{err});
}
}
@@ -1493,7 +1500,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, macho_file: *MachO) !void {
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
else => @panic("TODO implement write trampoline for this CPU arch"),
};
- try macho_file.base.file.?.pwriteAll(out, fileoff);
+ return macho_file.pwriteAll(out, fileoff);
}
pub fn getOrCreateMetadataForNav(
diff --git a/src/link/MachO/fat.zig b/src/link/MachO/fat.zig
index 7772f7a4de..73b9c626e8 100644
--- a/src/link/MachO/fat.zig
+++ b/src/link/MachO/fat.zig
@@ -1,20 +1,22 @@
+const builtin = @import("builtin");
+const native_endian = builtin.target.cpu.arch.endian();
+
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
-const builtin = @import("builtin");
const log = std.log.scoped(.macho);
const macho = std.macho;
const mem = std.mem;
-const native_endian = builtin.target.cpu.arch.endian();
const MachO = @import("../MachO.zig");
-pub fn readFatHeader(file: std.fs.File) !macho.fat_header {
- return readFatHeaderGeneric(macho.fat_header, file, 0);
+pub fn readFatHeader(io: Io, file: Io.File) !macho.fat_header {
+ return readFatHeaderGeneric(io, macho.fat_header, file, 0);
}
-fn readFatHeaderGeneric(comptime Hdr: type, file: std.fs.File, offset: usize) !Hdr {
+fn readFatHeaderGeneric(io: Io, comptime Hdr: type, file: Io.File, offset: usize) !Hdr {
var buffer: [@sizeOf(Hdr)]u8 = undefined;
- const nread = try file.preadAll(&buffer, offset);
+ const nread = try file.readPositionalAll(io, &buffer, offset);
if (nread != buffer.len) return error.InputOutput;
var hdr = @as(*align(1) const Hdr, @ptrCast(&buffer)).*;
mem.byteSwapAllFields(Hdr, &hdr);
@@ -27,12 +29,12 @@ pub const Arch = struct {
size: u32,
};
-pub fn parseArchs(file: std.fs.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch {
+pub fn parseArchs(io: Io, file: Io.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch {
var count: usize = 0;
var fat_arch_index: u32 = 0;
while (fat_arch_index < fat_header.nfat_arch and count < out.len) : (fat_arch_index += 1) {
const offset = @sizeOf(macho.fat_header) + @sizeOf(macho.fat_arch) * fat_arch_index;
- const fat_arch = try readFatHeaderGeneric(macho.fat_arch, file, offset);
+ const fat_arch = try readFatHeaderGeneric(io, macho.fat_arch, file, offset);
// If we come across an architecture that we do not know how to handle, that's
// fine because we can keep looking for one that might match.
const arch: std.Target.Cpu.Arch = switch (fat_arch.cputype) {
diff --git a/src/link/MachO/file.zig b/src/link/MachO/file.zig
index 05b43de181..cd687a4941 100644
--- a/src/link/MachO/file.zig
+++ b/src/link/MachO/file.zig
@@ -355,11 +355,12 @@ pub const File = union(enum) {
dylib: Dylib,
};
- pub const Handle = std.fs.File;
+ pub const Handle = Io.File;
pub const HandleIndex = Index;
};
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig
index 78cd847c40..822474e3e1 100644
--- a/src/link/MachO/hasher.zig
+++ b/src/link/MachO/hasher.zig
@@ -1,34 +1,36 @@
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+const trace = @import("../../tracy.zig").trace;
+
pub fn ParallelHasher(comptime Hasher: type) type {
const hash_size = Hasher.digest_length;
return struct {
- allocator: Allocator,
- io: std.Io,
-
- pub fn hash(self: Self, file: fs.File, out: [][hash_size]u8, opts: struct {
+ pub fn hash(gpa: Allocator, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct {
chunk_size: u64 = 0x4000,
max_file_size: ?u64 = null,
}) !void {
const tracy = trace(@src());
defer tracy.end();
- const io = self.io;
-
const file_size = blk: {
- const file_size = opts.max_file_size orelse try file.getEndPos();
+ const file_size = opts.max_file_size orelse try file.length(io);
break :blk std.math.cast(usize, file_size) orelse return error.Overflow;
};
const chunk_size = std.math.cast(usize, opts.chunk_size) orelse return error.Overflow;
- const buffer = try self.allocator.alloc(u8, chunk_size * out.len);
- defer self.allocator.free(buffer);
+ const buffer = try gpa.alloc(u8, chunk_size * out.len);
+ defer gpa.free(buffer);
- const results = try self.allocator.alloc(fs.File.PReadError!usize, out.len);
- defer self.allocator.free(results);
+ const results = try gpa.alloc(Io.File.ReadPositionalError!usize, out.len);
+ defer gpa.free(results);
{
- var group: std.Io.Group = .init;
- errdefer group.cancel(io);
+ var group: Io.Group = .init;
+ defer group.cancel(io);
for (out, results, 0..) |*out_buf, *result, i| {
const fstart = i * chunk_size;
@@ -37,6 +39,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
else
chunk_size;
group.async(io, worker, .{
+ io,
file,
fstart,
buffer[fstart..][0..fsize],
@@ -51,26 +54,15 @@ pub fn ParallelHasher(comptime Hasher: type) type {
}
fn worker(
- file: fs.File,
+ io: Io,
+ file: Io.File,
fstart: usize,
buffer: []u8,
out: *[hash_size]u8,
- err: *fs.File.PReadError!usize,
+ err: *Io.File.ReadPositionalError!usize,
) void {
- const tracy = trace(@src());
- defer tracy.end();
- err.* = file.preadAll(buffer, fstart);
+ err.* = file.readPositionalAll(io, buffer, fstart);
Hasher.hash(buffer, out, .{});
}
-
- const Self = @This();
};
}
-
-const assert = std.debug.assert;
-const fs = std.fs;
-const mem = std.mem;
-const std = @import("std");
-const trace = @import("../../tracy.zig").trace;
-
-const Allocator = mem.Allocator;
diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig
index d2a6c2a3ab..13dd35a558 100644
--- a/src/link/MachO/relocatable.zig
+++ b/src/link/MachO/relocatable.zig
@@ -1,6 +1,7 @@
pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
- const gpa = macho_file.base.comp.gpa;
- const diags = &macho_file.base.comp.link_diags;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const diags = &comp.link_diags;
// TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
var positionals = std.array_list.Managed(link.Input).init(gpa);
@@ -9,24 +10,22 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
- try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
+ try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path));
}
- if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
+ if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (macho_file.getZigObject() == null and positionals.items.len == 1) {
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
// debug info segments/sections (this is apparently by design by Apple), we copy
// the *only* input file over.
const path = positionals.items[0].path().?;
- const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err|
+ const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err|
return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) });
- const stat = in_file.stat() catch |err|
+ const stat = in_file.stat(io) catch |err|
return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) });
- const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err|
- return diags.fail("failed to copy range of file {f}: {s}", .{ path, @errorName(err) });
- if (amt != stat.size)
- return diags.fail("unexpected short write in copy range of file {f}", .{path});
+ link.File.copyRangeAll2(io, in_file, macho_file.base.file.?, 0, 0, stat.size) catch |err|
+ return diags.fail("failed to copy range of file {f}: {t}", .{ path, err });
return;
}
@@ -79,6 +78,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
const gpa = comp.gpa;
+ const io = comp.io;
const diags = &macho_file.base.comp.link_diags;
var positionals = std.array_list.Managed(link.Input).init(gpa);
@@ -88,17 +88,17 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
- try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
+ try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path));
}
- if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
+ if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (comp.compiler_rt_strat == .obj) {
- try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path));
+ try positionals.append(try link.openObjectInput(io, diags, comp.compiler_rt_obj.?.full_object_path));
}
if (comp.ubsan_rt_strat == .obj) {
- try positionals.append(try link.openObjectInput(diags, comp.ubsan_rt_obj.?.full_object_path));
+ try positionals.append(try link.openObjectInput(io, diags, comp.ubsan_rt_obj.?.full_object_path));
}
for (positionals.items) |link_input| {
@@ -229,7 +229,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
assert(writer.end == total_size);
- try macho_file.setEndPos(total_size);
+ try macho_file.setLength(total_size);
try macho_file.pwriteAll(writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;
diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig
index d08ac0c5b8..a75799d01e 100644
--- a/src/link/MachO/uuid.zig
+++ b/src/link/MachO/uuid.zig
@@ -1,28 +1,38 @@
+const std = @import("std");
+const Io = std.Io;
+const Md5 = std.crypto.hash.Md5;
+
+const trace = @import("../../tracy.zig").trace;
+const Compilation = @import("../../Compilation.zig");
+const ParallelHasher = @import("hasher.zig").ParallelHasher;
+
/// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce
/// the final digest.
/// While this is NOT a correct MD5 hash of the contents, this methodology is used by LLVM/LLD
/// and we will use it too as it seems accepted by Apple OSes.
/// TODO LLD also hashes the output filename to disambiguate between same builds with different
/// output files. Should we also do that?
-pub fn calcUuid(comp: *const Compilation, file: fs.File, file_size: u64, out: *[Md5.digest_length]u8) !void {
+pub fn calcUuid(comp: *const Compilation, file: Io.File, file_size: u64, out: *[Md5.digest_length]u8) !void {
const tracy = trace(@src());
defer tracy.end();
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const chunk_size: usize = 1024 * 1024;
const num_chunks: usize = std.math.cast(usize, @divTrunc(file_size, chunk_size)) orelse return error.Overflow;
const actual_num_chunks = if (@rem(file_size, chunk_size) > 0) num_chunks + 1 else num_chunks;
- const hashes = try comp.gpa.alloc([Md5.digest_length]u8, actual_num_chunks);
- defer comp.gpa.free(hashes);
+ const hashes = try gpa.alloc([Md5.digest_length]u8, actual_num_chunks);
+ defer gpa.free(hashes);
- var hasher = Hasher(Md5){ .allocator = comp.gpa, .io = comp.io };
- try hasher.hash(file, hashes, .{
+ try ParallelHasher(Md5).hash(gpa, io, file, hashes, .{
.chunk_size = chunk_size,
.max_file_size = file_size,
});
- const final_buffer = try comp.gpa.alloc(u8, actual_num_chunks * Md5.digest_length);
- defer comp.gpa.free(final_buffer);
+ const final_buffer = try gpa.alloc(u8, actual_num_chunks * Md5.digest_length);
+ defer gpa.free(final_buffer);
for (hashes, 0..) |hash, i| {
@memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash);
@@ -37,12 +47,3 @@ inline fn conform(out: *[Md5.digest_length]u8) void {
out[6] = (out[6] & 0x0F) | (3 << 4);
out[8] = (out[8] & 0x3F) | 0x80;
}
-
-const fs = std.fs;
-const mem = std.mem;
-const std = @import("std");
-const trace = @import("../../tracy.zig").trace;
-
-const Compilation = @import("../../Compilation.zig");
-const Md5 = std.crypto.hash.Md5;
-const Hasher = @import("hasher.zig").ParallelHasher;
diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig
index 975b94578b..2986e27e24 100644
--- a/src/link/MappedFile.zig
+++ b/src/link/MappedFile.zig
@@ -1,3 +1,17 @@
+/// TODO add a mapped file abstraction to std.Io
+const MappedFile = @This();
+
+const builtin = @import("builtin");
+const is_linux = builtin.os.tag == .linux;
+const is_windows = builtin.os.tag == .windows;
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const linux = std.os.linux;
+const windows = std.os.windows;
+
+io: Io,
file: std.Io.File,
flags: packed struct {
block_size: std.mem.Alignment,
@@ -16,16 +30,22 @@ writers: std.SinglyLinkedList,
pub const growth_factor = 4;
-pub const Error = std.posix.MMapError || std.posix.MRemapError || std.fs.File.SetEndPosError || error{
+pub const Error = std.posix.MMapError || std.posix.MRemapError || Io.File.LengthError || error{
NotFile,
SystemResources,
IsDir,
Unseekable,
NoSpaceLeft,
+
+ InputOutput,
+ FileTooBig,
+ FileBusy,
+ NonResizable,
};
-pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile {
+pub fn init(file: std.Io.File, gpa: std.mem.Allocator, io: Io) !MappedFile {
var mf: MappedFile = .{
+ .io = io,
.file = file,
.flags = undefined,
.section = if (is_windows) windows.INVALID_HANDLE_VALUE else {},
@@ -55,18 +75,41 @@ pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile {
};
}
if (is_linux) {
- const statx = try linux.wrapped.statx(
- mf.file.handle,
- "",
- std.posix.AT.EMPTY_PATH,
- .{ .TYPE = true, .SIZE = true, .BLOCKS = true },
- );
- assert(statx.mask.TYPE);
- assert(statx.mask.SIZE);
- assert(statx.mask.BLOCKS);
-
- if (!std.posix.S.ISREG(statx.mode)) return error.PathAlreadyExists;
- break :stat .{ statx.size, @max(std.heap.pageSize(), statx.blksize) };
+ const use_c = std.c.versionCheck(if (builtin.abi.isAndroid())
+ .{ .major = 30, .minor = 0, .patch = 0 }
+ else
+ .{ .major = 2, .minor = 28, .patch = 0 });
+ const sys = if (use_c) std.c else std.os.linux;
+ while (true) {
+ var statx = std.mem.zeroes(linux.Statx);
+ const rc = sys.statx(
+ mf.file.handle,
+ "",
+ std.posix.AT.EMPTY_PATH,
+ .{ .TYPE = true, .SIZE = true, .BLOCKS = true },
+ &statx,
+ );
+ switch (sys.errno(rc)) {
+ .SUCCESS => {
+ assert(statx.mask.TYPE);
+ assert(statx.mask.SIZE);
+ assert(statx.mask.BLOCKS);
+ if (!std.posix.S.ISREG(statx.mode)) return error.PathAlreadyExists;
+ break :stat .{ statx.size, @max(std.heap.pageSize(), statx.blksize) };
+ },
+ .INTR => continue,
+ .ACCES => return error.AccessDenied,
+ .BADF => if (std.debug.runtime_safety) unreachable else return error.Unexpected,
+ .FAULT => if (std.debug.runtime_safety) unreachable else return error.Unexpected,
+ .INVAL => if (std.debug.runtime_safety) unreachable else return error.Unexpected,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ else => |err| return std.posix.unexpectedErrno(err),
+ }
+ }
}
const stat = try std.posix.fstat(mf.file.handle);
if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists;
@@ -433,8 +476,8 @@ pub const Node = extern struct {
return n;
},
.streaming,
- .streaming_reading,
- .positional_reading,
+ .streaming_simple,
+ .positional_simple,
.failure,
=> {
const dest = limit.slice(interface.unusedCapacitySlice());
@@ -612,13 +655,14 @@ pub fn addNodeAfter(
}
fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void {
+ const io = mf.io;
const node = ni.get(mf);
const old_offset, const old_size = node.location().resolve(mf);
const new_size = node.flags.alignment.forward(@intCast(requested_size));
// Resize the entire file
if (ni == Node.Index.root) {
try mf.ensureCapacityForSetLocation(gpa);
- try std.fs.File.adaptFromNewApi(mf.file).setEndPos(new_size);
+ try mf.file.setLength(io, new_size);
try mf.ensureTotalCapacity(@intCast(new_size));
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
@@ -1059,12 +1103,3 @@ fn verifyNode(mf: *MappedFile, parent_ni: Node.Index) void {
ni = node.next;
}
}
-
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const is_linux = builtin.os.tag == .linux;
-const is_windows = builtin.os.tag == .windows;
-const linux = std.os.linux;
-const MappedFile = @This();
-const std = @import("std");
-const windows = std.os.windows;
diff --git a/src/link/Queue.zig b/src/link/Queue.zig
index e8e7700695..b716800bae 100644
--- a/src/link/Queue.zig
+++ b/src/link/Queue.zig
@@ -121,7 +121,7 @@ pub fn enqueueZcu(
link.doZcuTask(comp, tid, task);
}
-pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) void {
+pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) Io.Cancelable!void {
if (q.future != null) {
q.prelink_queue.close(comp.io);
return;
@@ -136,6 +136,7 @@ pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) void {
} else |err| switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
error.LinkFailure => {},
+ error.Canceled => |e| return e,
}
}
}
@@ -175,6 +176,7 @@ fn runLinkTasks(q: *Queue, comp: *Compilation) void {
lf.post_prelink = true;
} else |err| switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
+ error.Canceled => @panic("TODO"),
error.LinkFailure => {},
}
}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 7e28dc0a8b..4dbdd5c089 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -33,6 +33,7 @@ pub fn createEmpty(
options: link.File.OpenOptions,
) !*Linker {
const gpa = comp.gpa;
+ const io = comp.io;
const target = &comp.root_mod.resolved_target.result;
assert(!comp.config.use_lld); // Caught by Compilation.Config.resolve
@@ -78,7 +79,7 @@ pub fn createEmpty(
};
errdefer linker.deinit();
- linker.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ linker.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
.truncate = true,
.read = true,
});
@@ -245,6 +246,7 @@ pub fn flush(
const comp = linker.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
+ const io = comp.io;
// We need to export the list of error names somewhere so that we can pretty-print them in the
// executor. This is not really an important thing though, so we can just dump it in any old
@@ -286,8 +288,8 @@ pub fn flush(
};
// TODO endianness bug. use file writer and call writeSliceEndian instead
- linker.base.file.?.writeAll(@ptrCast(linked_module)) catch |err|
- return diags.fail("failed to write: {s}", .{@errorName(err)});
+ linker.base.file.?.writeStreamingAll(io, @ptrCast(linked_module)) catch |err|
+ return diags.fail("failed to write: {t}", .{err});
}
fn linkModule(arena: Allocator, module: []Word, progress: std.Progress.Node) ![]Word {
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 92307ec40c..af800d77d2 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -20,6 +20,7 @@ const native_endian = builtin.cpu.arch.endian();
const build_options = @import("build_options");
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
const Path = Cache.Path;
@@ -428,7 +429,11 @@ pub const OutputFunctionIndex = enum(u32) {
pub fn fromSymbolName(wasm: *const Wasm, name: String) OutputFunctionIndex {
if (wasm.flush_buffer.function_imports.getIndex(name)) |i| return @enumFromInt(i);
- return fromFunctionIndex(wasm, FunctionIndex.fromSymbolName(wasm, name).?);
+ return fromFunctionIndex(wasm, FunctionIndex.fromSymbolName(wasm, name) orelse {
+ if (std.debug.runtime_safety) {
+ std.debug.panic("function index for symbol not found: {s}", .{name.slice(wasm)});
+ } else unreachable;
+ });
}
};
@@ -2996,16 +3001,18 @@ pub fn createEmpty(
.named => |name| (try wasm.internString(name)).toOptional(),
};
- wasm.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ const io = comp.io;
+
+ wasm.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
.truncate = true,
.read = true,
- .mode = if (fs.has_executable_bit)
+ .permissions = if (Io.File.Permissions.has_executable_bit)
if (target.os.tag == .wasi and output_mode == .Exe)
- fs.File.default_mode | 0b001_000_000
+ .executable_file
else
- fs.File.default_mode
+ .default_file
else
- 0,
+ .default_file,
});
wasm.name = emit.sub_path;
@@ -3013,14 +3020,16 @@ pub fn createEmpty(
}
fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void {
- const diags = &wasm.base.comp.link_diags;
- const obj = link.openObject(path, false, false) catch |err| {
- switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) {
+ const comp = wasm.base.comp;
+ const io = comp.io;
+ const diags = &comp.link_diags;
+ const obj = link.openObject(io, path, false, false) catch |err| {
+ switch (diags.failParse(path, "failed to open object: {t}", .{err})) {
error.LinkFailure => return,
}
};
wasm.parseObject(obj) catch |err| {
- switch (diags.failParse(path, "failed to parse object: {s}", .{@errorName(err)})) {
+ switch (diags.failParse(path, "failed to parse object: {t}", .{err})) {
error.LinkFailure => return,
}
};
@@ -3032,7 +3041,7 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
const io = wasm.base.comp.io;
const gc_sections = wasm.base.gc_sections;
- defer obj.file.close();
+ defer obj.file.close(io);
var file_reader = obj.file.reader(io, &.{});
@@ -3060,7 +3069,7 @@ fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void {
const io = wasm.base.comp.io;
const gc_sections = wasm.base.gc_sections;
- defer obj.file.close();
+ defer obj.file.close(io);
var file_reader = obj.file.reader(io, &.{});
@@ -3529,7 +3538,10 @@ pub fn markFunctionImport(
import: *FunctionImport,
func_index: FunctionImport.Index,
) link.File.FlushError!void {
- if (import.flags.alive) return;
+ // import.flags.alive might be already true from a previous update. In such
+ // case, we must still run the logic in this function, in case the item
+ // being marked was reverted by the `flush` logic that resets the hash
+ // table watermarks.
import.flags.alive = true;
const comp = wasm.base.comp;
@@ -3549,8 +3561,9 @@ pub fn markFunctionImport(
} else {
try wasm.function_imports.put(gpa, name, .fromObject(func_index, wasm));
}
- } else {
- try markFunction(wasm, import.resolution.unpack(wasm).object_function, import.flags.exported);
+ } else switch (import.resolution.unpack(wasm)) {
+ .object_function => try markFunction(wasm, import.resolution.unpack(wasm).object_function, import.flags.exported),
+ else => return,
}
}
@@ -3589,7 +3602,10 @@ fn markGlobalImport(
import: *GlobalImport,
global_index: GlobalImport.Index,
) link.File.FlushError!void {
- if (import.flags.alive) return;
+ // import.flags.alive might be already true from a previous update. In such
+ // case, we must still run the logic in this function, in case the item
+ // being marked was reverted by the `flush` logic that resets the hash
+ // table watermarks.
import.flags.alive = true;
const comp = wasm.base.comp;
@@ -3619,8 +3635,9 @@ fn markGlobalImport(
} else {
try wasm.global_imports.put(gpa, name, .fromObject(global_index, wasm));
}
- } else {
- try markGlobal(wasm, import.resolution.unpack(wasm).object_global, import.flags.exported);
+ } else switch (import.resolution.unpack(wasm)) {
+ .object_global => try markGlobal(wasm, import.resolution.unpack(wasm).object_global, import.flags.exported),
+ else => return,
}
}
@@ -3823,8 +3840,9 @@ pub fn flush(
const comp = wasm.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
+ const io = comp.io;
- if (comp.verbose_link) Compilation.dump_argv(wasm.dump_argv_list.items);
+ if (comp.verbose_link) try Compilation.dumpArgv(io, wasm.dump_argv_list.items);
if (wasm.base.zcu_object_basename) |raw| {
const zcu_obj_path: Path = try comp.resolveEmitPathFlush(arena, .temp, raw);
@@ -4037,7 +4055,7 @@ pub fn tagNameSymbolIndex(wasm: *Wasm, ip_index: InternPool.Index) Allocator.Err
const comp = wasm.base.comp;
assert(comp.config.output_mode == .Obj);
const gpa = comp.gpa;
- const name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(ip_index)});
+ const name = try wasm.internStringFmt("__zig_tag_name_{d}", .{ip_index});
const gop = try wasm.symbol_table.getOrPut(gpa, name);
gop.value_ptr.* = {};
return @enumFromInt(gop.index);
diff --git a/src/link/Wasm/Flush.zig b/src/link/Wasm/Flush.zig
index 6f7792f473..5bd18a1936 100644
--- a/src/link/Wasm/Flush.zig
+++ b/src/link/Wasm/Flush.zig
@@ -108,6 +108,7 @@ pub fn deinit(f: *Flush, gpa: Allocator) void {
pub fn finish(f: *Flush, wasm: *Wasm) !void {
const comp = wasm.base.comp;
+ const io = comp.io;
const shared_memory = comp.config.shared_memory;
const diags = &comp.link_diags;
const gpa = comp.gpa;
@@ -127,17 +128,20 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
if (comp.zcu) |zcu| {
const ip: *const InternPool = &zcu.intern_pool; // No mutations allowed!
+ log.debug("total MIR instructions: {d}", .{wasm.mir_instructions.len});
+
// Detect any intrinsics that were called; they need to have dependencies on the symbols marked.
// Likewise detect `@tagName` calls so those functions can be included in the output and synthesized.
for (wasm.mir_instructions.items(.tag), wasm.mir_instructions.items(.data)) |tag, *data| switch (tag) {
.call_intrinsic => {
const symbol_name = try wasm.internString(@tagName(data.intrinsic));
const i: Wasm.FunctionImport.Index = @enumFromInt(wasm.object_function_imports.getIndex(symbol_name) orelse {
- return diags.fail("missing compiler runtime intrinsic '{s}' (undefined linker symbol)", .{
- @tagName(data.intrinsic),
+ return diags.fail("missing compiler runtime intrinsic '{t}' (undefined linker symbol)", .{
+ data.intrinsic,
});
});
try wasm.markFunctionImport(symbol_name, i.value(wasm), i);
+ log.debug("markFunctionImport intrinsic {d}={t}", .{ i, data.intrinsic });
},
.call_tag_name => {
assert(ip.indexToKey(data.ip_index) == .enum_type);
@@ -146,11 +150,10 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
wasm.tag_name_table_ref_count += 1;
const int_tag_ty = Zcu.Type.fromInterned(data.ip_index).intTagType(zcu);
gop.value_ptr.* = .{ .tag_name = .{
- .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(data.ip_index)}),
+ .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{data.ip_index}),
.type_index = try wasm.internFunctionType(.auto, &.{int_tag_ty.ip_index}, .slice_const_u8_sentinel_0, target),
.table_index = @intCast(wasm.tag_name_offs.items.len),
} };
- try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {});
const tag_names = ip.loadEnumType(data.ip_index).names;
for (tag_names.get(ip)) |tag_name| {
const slice = tag_name.toSlice(ip);
@@ -158,6 +161,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
try wasm.tag_name_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]);
}
}
+ try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {});
},
else => continue,
};
@@ -1067,7 +1071,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
}
// Finally, write the entire binary into the file.
- var file_writer = wasm.base.file.?.writer(&.{});
+ var file_writer = wasm.base.file.?.writer(io, &.{});
file_writer.interface.writeAll(binary_bytes.items) catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
};
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 4c1471a6b4..33c31a8415 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -1,10 +1,10 @@
const std = @import("std");
-const fs = std.fs;
+const Io = std.Io;
const mem = std.mem;
const log = std.log.scoped(.tapi);
-const yaml = @import("tapi/yaml.zig");
+const Allocator = std.mem.Allocator;
-const Allocator = mem.Allocator;
+const yaml = @import("tapi/yaml.zig");
const Yaml = yaml.Yaml;
const VersionField = union(enum) {
@@ -130,7 +130,7 @@ pub const Tbd = union(enum) {
pub const TapiError = error{
NotLibStub,
InputOutput,
-} || yaml.YamlError || std.fs.File.PReadError;
+} || yaml.YamlError || Io.File.ReadPositionalError;
pub const LibStub = struct {
/// Underlying memory for stub's contents.
@@ -139,14 +139,14 @@ pub const LibStub = struct {
/// Typed contents of the tbd file.
inner: []Tbd,
- pub fn loadFromFile(allocator: Allocator, file: fs.File) TapiError!LibStub {
+ pub fn loadFromFile(allocator: Allocator, io: Io, file: Io.File) TapiError!LibStub {
const filesize = blk: {
- const stat = file.stat() catch break :blk std.math.maxInt(u32);
+ const stat = file.stat(io) catch break :blk std.math.maxInt(u32);
break :blk @min(stat.size, std.math.maxInt(u32));
};
const source = try allocator.alloc(u8, filesize);
defer allocator.free(source);
- const amt = try file.preadAll(source, 0);
+ const amt = try file.readPositionalAll(io, source, 0);
if (amt != filesize) return error.InputOutput;
var lib_stub = LibStub{
diff --git a/src/main.zig b/src/main.zig
index a897f2a847..628d91017a 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -162,17 +162,20 @@ var debug_allocator: std.heap.DebugAllocator(.{
.stack_trace_frames = build_options.mem_leak_frames,
}) = .init;
+const use_debug_allocator = build_options.debug_gpa or
+ (native_os != .wasi and !builtin.link_libc and switch (builtin.mode) {
+ .Debug, .ReleaseSafe => true,
+ .ReleaseFast, .ReleaseSmall => false,
+ });
+
pub fn main() anyerror!void {
- const gpa, const is_debug = gpa: {
- if (build_options.debug_gpa) break :gpa .{ debug_allocator.allocator(), true };
- if (native_os == .wasi) break :gpa .{ std.heap.wasm_allocator, false };
- if (builtin.link_libc) break :gpa .{ std.heap.c_allocator, false };
- break :gpa switch (builtin.mode) {
- .Debug, .ReleaseSafe => .{ debug_allocator.allocator(), true },
- .ReleaseFast, .ReleaseSmall => .{ std.heap.smp_allocator, false },
- };
+ const gpa = gpa: {
+ if (use_debug_allocator) break :gpa debug_allocator.allocator();
+ if (native_os == .wasi) break :gpa std.heap.wasm_allocator;
+ if (builtin.link_libc) break :gpa std.heap.c_allocator;
+ break :gpa std.heap.smp_allocator;
};
- defer if (is_debug) {
+ defer if (use_debug_allocator) {
_ = debug_allocator.deinit();
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
@@ -238,7 +241,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
}
}
- var threaded: Io.Threaded = .init(gpa);
+ var threaded: Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
threaded_impl_ptr = &threaded;
threaded.stack_size = thread_stack_size;
@@ -328,23 +331,24 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.prepend_global_cache_path = true,
});
} else if (mem.eql(u8, cmd, "init")) {
- return cmdInit(gpa, arena, cmd_args);
+ return cmdInit(gpa, arena, io, cmd_args);
} else if (mem.eql(u8, cmd, "targets")) {
dev.check(.targets_command);
const host = std.zig.resolveTargetQueryOrFatal(io, .{});
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
- try @import("print_targets.zig").cmdTargets(arena, cmd_args, &stdout_writer.interface, &host);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
+ try @import("print_targets.zig").cmdTargets(arena, io, cmd_args, &stdout_writer.interface, &host);
return stdout_writer.interface.flush();
} else if (mem.eql(u8, cmd, "version")) {
dev.check(.version_command);
- try fs.File.stdout().writeAll(build_options.version ++ "\n");
+ try Io.File.stdout().writeStreamingAll(io, build_options.version ++ "\n");
return;
} else if (mem.eql(u8, cmd, "env")) {
dev.check(.env_command);
const host = std.zig.resolveTargetQueryOrFatal(io, .{});
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
try @import("print_env.zig").cmdEnv(
arena,
+ io,
&stdout_writer.interface,
args,
if (native_os == .wasi) wasi_preopens,
@@ -358,10 +362,10 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
});
} else if (mem.eql(u8, cmd, "zen")) {
dev.check(.zen_command);
- return fs.File.stdout().writeAll(info_zen);
+ return Io.File.stdout().writeStreamingAll(io, info_zen);
} else if (mem.eql(u8, cmd, "help") or mem.eql(u8, cmd, "-h") or mem.eql(u8, cmd, "--help")) {
dev.check(.help_command);
- return fs.File.stdout().writeAll(usage);
+ return Io.File.stdout().writeStreamingAll(io, usage);
} else if (mem.eql(u8, cmd, "ast-check")) {
return cmdAstCheck(arena, io, cmd_args);
} else if (mem.eql(u8, cmd, "detect-cpu")) {
@@ -371,7 +375,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
} else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "dump-zir")) {
return cmdDumpZir(arena, io, cmd_args);
} else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "llvm-ints")) {
- return cmdDumpLlvmInts(gpa, arena, cmd_args);
+ return cmdDumpLlvmInts(gpa, arena, io, cmd_args);
} else {
std.log.info("{s}", .{usage});
fatal("unknown command: {s}", .{args[1]});
@@ -698,7 +702,7 @@ const Emit = union(enum) {
yes: []const u8,
const OutputToCacheReason = enum { listen, @"zig run", @"zig test" };
- fn resolve(emit: Emit, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit {
+ fn resolve(emit: Emit, io: Io, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit {
return switch (emit) {
.no => .no,
.yes_default_path => if (output_to_cache != null) .yes_cache else .{ .yes_path = default_basename },
@@ -713,10 +717,10 @@ const Emit = union(enum) {
} else e: {
// If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would.
if (fs.path.dirname(path)) |dir_path| {
- var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
+ var dir = Io.Dir.cwd().openDir(io, dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
- dir.close();
+ dir.close(io);
}
break :e .{ .yes_path = path };
},
@@ -1029,13 +1033,12 @@ fn buildOutputType(
if (mem.cutPrefix(u8, arg, "@")) |resp_file_path| {
// This is a "compiler response file". We must parse the file and treat its
// contents as command line parameters.
- args_iter.resp_file = initArgIteratorResponseFile(arena, resp_file_path) catch |err| {
- fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) });
- };
+ args_iter.resp_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err|
+ fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err });
} else if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_build_generic);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_build_generic);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "--")) {
if (arg_mode == .run) {
// args_iter.i is 1, referring the next arg after "--" in ["--", ...]
@@ -1856,9 +1859,7 @@ fn buildOutputType(
var must_link = false;
var file_ext: ?Compilation.FileExt = null;
while (it.has_next) {
- it.next() catch |err| {
- fatal("unable to parse command line parameters: {s}", .{@errorName(err)});
- };
+ it.next(io) catch |err| fatal("unable to parse command line parameters: {t}", .{err});
switch (it.zig_equivalent) {
.target => target_arch_os_abi = it.only_arg, // example: -target riscv64-linux-unknown
.o => {
@@ -2834,9 +2835,9 @@ fn buildOutputType(
} else if (mem.eql(u8, arg, "-V")) {
warn("ignoring request for supported emulations: unimplemented", .{});
} else if (mem.eql(u8, arg, "-v")) {
- try fs.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
+ try Io.File.stdout().writeStreamingAll(io, "zig ld " ++ build_options.version ++ "\n");
} else if (mem.eql(u8, arg, "--version")) {
- try fs.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
+ try Io.File.stdout().writeStreamingAll(io, "zig ld " ++ build_options.version ++ "\n");
process.exit(0);
} else {
fatal("unsupported linker arg: {s}", .{arg});
@@ -3075,14 +3076,13 @@ fn buildOutputType(
const self_exe_path = switch (native_os) {
.wasi => {},
- else => fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find zig self exe path: {s}", .{@errorName(err)});
- },
+ else => process.executablePathAlloc(io, arena) catch |err| fatal("unable to find zig self exe path: {t}", .{err}),
};
// This `init` calls `fatal` on error.
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
s: {
@@ -3095,11 +3095,9 @@ fn buildOutputType(
if (native_os == .wasi) wasi_preopens,
self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
- if (linker_optimization) |o| {
- warn("ignoring deprecated linker optimization setting '{s}'", .{o});
- }
+ if (linker_optimization) |o| warn("ignoring deprecated linker optimization setting '{s}'", .{o});
create_module.dirs = dirs;
create_module.opts.emit_llvm_ir = emit_llvm_ir != .no;
@@ -3208,6 +3206,7 @@ fn buildOutputType(
for (create_module.framework_dirs.items) |framework_dir_path| {
if (try accessFrameworkPath(
+ io,
&test_path,
&checked_paths,
framework_dir_path,
@@ -3251,8 +3250,8 @@ fn buildOutputType(
}
}
- var cleanup_emit_bin_dir: ?fs.Dir = null;
- defer if (cleanup_emit_bin_dir) |*dir| dir.close();
+ var cleanup_emit_bin_dir: ?Io.Dir = null;
+ defer if (cleanup_emit_bin_dir) |*dir| dir.close(io);
// For `zig run` and `zig test`, we don't want to put the binary in the cwd by default. So, if
// the binary is requested with no explicit path (as is the default), we emit to the cache.
@@ -3304,10 +3303,10 @@ fn buildOutputType(
} else emit: {
// If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would.
if (fs.path.dirname(path)) |dir_path| {
- var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
+ var dir = Io.Dir.cwd().openDir(io, dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
- dir.close();
+ dir.close(io);
}
break :emit .{ .yes_path = path };
},
@@ -3321,18 +3320,18 @@ fn buildOutputType(
};
const default_h_basename = try std.fmt.allocPrint(arena, "{s}.h", .{root_name});
- const emit_h_resolved = emit_h.resolve(default_h_basename, output_to_cache);
+ const emit_h_resolved = emit_h.resolve(io, default_h_basename, output_to_cache);
const default_asm_basename = try std.fmt.allocPrint(arena, "{s}.s", .{root_name});
- const emit_asm_resolved = emit_asm.resolve(default_asm_basename, output_to_cache);
+ const emit_asm_resolved = emit_asm.resolve(io, default_asm_basename, output_to_cache);
const default_llvm_ir_basename = try std.fmt.allocPrint(arena, "{s}.ll", .{root_name});
- const emit_llvm_ir_resolved = emit_llvm_ir.resolve(default_llvm_ir_basename, output_to_cache);
+ const emit_llvm_ir_resolved = emit_llvm_ir.resolve(io, default_llvm_ir_basename, output_to_cache);
const default_llvm_bc_basename = try std.fmt.allocPrint(arena, "{s}.bc", .{root_name});
- const emit_llvm_bc_resolved = emit_llvm_bc.resolve(default_llvm_bc_basename, output_to_cache);
+ const emit_llvm_bc_resolved = emit_llvm_bc.resolve(io, default_llvm_bc_basename, output_to_cache);
- const emit_docs_resolved = emit_docs.resolve("docs", output_to_cache);
+ const emit_docs_resolved = emit_docs.resolve(io, "docs", output_to_cache);
const is_exe_or_dyn_lib = switch (create_module.resolved_options.output_mode) {
.Obj => false,
@@ -3353,7 +3352,7 @@ fn buildOutputType(
const default_implib_basename = try std.fmt.allocPrint(arena, "{s}.lib", .{root_name});
const emit_implib_resolved: Compilation.CreateOptions.Emit = switch (emit_implib) {
.no => .no,
- .yes => emit_implib.resolve(default_implib_basename, output_to_cache),
+ .yes => emit_implib.resolve(io, default_implib_basename, output_to_cache),
.yes_default_path => emit: {
if (output_to_cache != null) break :emit .yes_cache;
const p = try fs.path.join(arena, &.{
@@ -3382,24 +3381,24 @@ fn buildOutputType(
const dump_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-dump-stdin{s}", .{
std.crypto.random.int(u64), ext.canonicalName(target),
});
- try dirs.local_cache.handle.makePath("tmp");
+ try dirs.local_cache.handle.createDirPath(io, "tmp");
// Note that in one of the happy paths, execve() is used to switch to
// clang in which case any cleanup logic that exists for this temporary
// file will not run and this temp file will be leaked. The filename
// will be a hash of its contents — so multiple invocations of
// `zig cc -` will result in the same temp file name.
- var f = try dirs.local_cache.handle.createFile(dump_path, .{});
- defer f.close();
+ var f = try dirs.local_cache.handle.createFile(io, dump_path, .{});
+ defer f.close(io);
// Re-using the hasher from Cache, since the functional requirements
// for the hashing algorithm here and in the cache are the same.
// We are providing our own cache key, because this file has nothing
// to do with the cache manifest.
- var file_writer = f.writer(&.{});
+ var file_writer = f.writer(io, &.{});
var buffer: [1000]u8 = undefined;
var hasher = file_writer.interface.hashed(Cache.Hasher.init("0123456789abcdef"), &buffer);
- var stdin_reader = fs.File.stdin().readerStreaming(io, &.{});
+ var stdin_reader = Io.File.stdin().readerStreaming(io, &.{});
_ = hasher.writer.sendFileAll(&stdin_reader, .unlimited) catch |err| switch (err) {
error.WriteFailed => fatal("failed to write {s}: {t}", .{ dump_path, file_writer.err.? }),
else => fatal("failed to pipe stdin to {s}: {t}", .{ dump_path, err }),
@@ -3411,7 +3410,7 @@ fn buildOutputType(
const sub_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-stdin{s}", .{
&bin_digest, ext.canonicalName(target),
});
- try dirs.local_cache.handle.rename(dump_path, sub_path);
+ try dirs.local_cache.handle.rename(dump_path, dirs.local_cache.handle, sub_path, io);
// Convert `sub_path` to be relative to current working directory.
src.src_path = try dirs.local_cache.join(arena, &.{sub_path});
@@ -3630,13 +3629,13 @@ fn buildOutputType(
if (show_builtin) {
const builtin_opts = comp.root_mod.getBuiltinOptions(comp.config);
const source = try builtin_opts.generate(arena);
- return fs.File.stdout().writeAll(source);
+ return Io.File.stdout().writeStreamingAll(io, source);
}
switch (listen) {
.none => {},
.stdio => {
- var stdin_reader = fs.File.stdin().reader(io, &stdin_buffer);
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdin_reader = Io.File.stdin().reader(io, &stdin_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
try serve(
comp,
&stdin_reader.interface,
@@ -3647,7 +3646,7 @@ fn buildOutputType(
all_args,
runtime_args_start,
);
- return cleanExit();
+ return cleanExit(io);
},
.ip4 => |ip4_addr| {
const addr: Io.net.IpAddress = .{ .ip4 = ip4_addr };
@@ -3673,12 +3672,12 @@ fn buildOutputType(
all_args,
runtime_args_start,
);
- return cleanExit();
+ return cleanExit(io);
},
}
{
- const root_prog_node = std.Progress.start(.{
+ const root_prog_node = std.Progress.start(io, .{
.disable_printing = (color == .off),
});
defer root_prog_node.end();
@@ -3756,7 +3755,7 @@ fn buildOutputType(
}
// Skip resource deallocation in release builds; let the OS do it.
- return cleanExit();
+ return cleanExit(io);
}
const CreateModule = struct {
@@ -3927,11 +3926,8 @@ fn createModule(
}
if (target.isMinGW()) {
- const exists = mingw.libExists(arena, target, create_module.dirs.zig_lib, lib_name) catch |err| {
- fatal("failed to check zig installation for DLL import libs: {s}", .{
- @errorName(err),
- });
- };
+ const exists = mingw.libExists(arena, io, target, create_module.dirs.zig_lib, lib_name) catch |err|
+ fatal("failed to check zig installation for DLL import libs: {t}", .{err});
if (exists) {
try create_module.windows_libs.put(arena, lib_name, {});
continue;
@@ -3959,14 +3955,14 @@ fn createModule(
if (fs.path.isAbsolute(lib_dir_arg)) {
const stripped_dir = lib_dir_arg[fs.path.parsePath(lib_dir_arg).root.len..];
const full_path = try fs.path.join(arena, &[_][]const u8{ root, stripped_dir });
- addLibDirectoryWarn(&create_module.lib_directories, full_path);
+ addLibDirectoryWarn(io, &create_module.lib_directories, full_path);
} else {
- addLibDirectoryWarn(&create_module.lib_directories, lib_dir_arg);
+ addLibDirectoryWarn(io, &create_module.lib_directories, lib_dir_arg);
}
}
} else {
for (create_module.lib_dir_args.items) |lib_dir_arg| {
- addLibDirectoryWarn(&create_module.lib_directories, lib_dir_arg);
+ addLibDirectoryWarn(io, &create_module.lib_directories, lib_dir_arg);
}
}
create_module.lib_dir_args = undefined; // From here we use lib_directories instead.
@@ -3989,9 +3985,8 @@ fn createModule(
resolved_target.is_native_os and resolved_target.is_native_abi and
create_module.want_native_include_dirs)
{
- var paths = std.zig.system.NativePaths.detect(arena, target) catch |err| {
- fatal("unable to detect native system paths: {s}", .{@errorName(err)});
- };
+ var paths = std.zig.system.NativePaths.detect(arena, io, target) catch |err|
+ fatal("unable to detect native system paths: {t}", .{err});
for (paths.warnings.items) |warning| {
warn("{s}", .{warning});
}
@@ -4002,38 +3997,35 @@ fn createModule(
try create_module.rpath_list.appendSlice(arena, paths.rpaths.items);
try create_module.lib_directories.ensureUnusedCapacity(arena, paths.lib_dirs.items.len);
- for (paths.lib_dirs.items) |path| addLibDirectoryWarn2(&create_module.lib_directories, path, true);
+ for (paths.lib_dirs.items) |path| addLibDirectoryWarn2(io, &create_module.lib_directories, path, true);
}
if (create_module.libc_paths_file) |paths_file| {
- create_module.libc_installation = LibCInstallation.parse(arena, paths_file, target) catch |err| {
- fatal("unable to parse libc paths file at path {s}: {s}", .{
- paths_file, @errorName(err),
- });
- };
+ create_module.libc_installation = LibCInstallation.parse(arena, io, paths_file, target) catch |err|
+ fatal("unable to parse libc paths file at path {s}: {t}", .{ paths_file, err });
}
if (target.os.tag == .windows and (target.abi == .msvc or target.abi == .itanium) and
any_name_queries_remaining)
{
if (create_module.libc_installation == null) {
- create_module.libc_installation = LibCInstallation.findNative(.{
- .allocator = arena,
+ create_module.libc_installation = LibCInstallation.findNative(arena, io, .{
.verbose = true,
.target = target,
}) catch |err| {
- fatal("unable to find native libc installation: {s}", .{@errorName(err)});
+ fatal("unable to find native libc installation: {t}", .{err});
};
}
try create_module.lib_directories.ensureUnusedCapacity(arena, 2);
- addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.msvc_lib_dir.?);
- addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.kernel32_lib_dir.?);
+ addLibDirectoryWarn(io, &create_module.lib_directories, create_module.libc_installation.?.msvc_lib_dir.?);
+ addLibDirectoryWarn(io, &create_module.lib_directories, create_module.libc_installation.?.kernel32_lib_dir.?);
}
// Destructively mutates but does not transfer ownership of `unresolved_link_inputs`.
link.resolveInputs(
gpa,
arena,
+ io,
target,
&unresolved_link_inputs,
&create_module.link_inputs,
@@ -4160,7 +4152,9 @@ fn serve(
var child_pid: ?std.process.Child.Id = null;
- const main_progress_node = std.Progress.start(.{});
+ const main_progress_node = std.Progress.start(io, .{});
+ defer main_progress_node.end();
+
const file_system_inputs = comp.file_system_inputs.?;
const IncrementalDebugServer = if (build_options.enable_debug_extensions and !builtin.single_threaded)
@@ -4183,7 +4177,7 @@ fn serve(
defer if (comp.debugIncremental()) ids.mutex.unlock(io);
switch (hdr.tag) {
- .exit => return cleanExit(),
+ .exit => return cleanExit(io),
.update => {
tracy.frameMark();
file_system_inputs.clearRetainingCapacity();
@@ -4436,12 +4430,12 @@ fn runOrTest(
// the error message and invocation below.
if (process.can_execv and arg_mode == .run) {
// execv releases the locks; no need to destroy the Compilation here.
- std.debug.lockStdErr();
+ _ = try io.lockStderr(&.{}, .no_color);
const err = process.execve(gpa, argv.items, &env_map);
- std.debug.unlockStdErr();
+ io.unlockStderr();
try warnAboutForeignBinaries(io, arena, arg_mode, target, link_libc);
const cmd = try std.mem.join(arena, " ", argv.items);
- fatal("the following command failed to execve with '{s}':\n{s}", .{ @errorName(err), cmd });
+ fatal("the following command failed to execve with '{t}':\n{s}", .{ err, cmd });
} else if (process.can_spawn) {
var child = std.process.Child.init(argv.items, gpa);
child.env_map = &env_map;
@@ -4455,9 +4449,9 @@ fn runOrTest(
comp_destroyed.* = true;
const term_result = t: {
- std.debug.lockStdErr();
- defer std.debug.unlockStdErr();
- break :t child.spawnAndWait();
+ _ = try io.lockStderr(&.{}, .no_color);
+ defer io.unlockStderr();
+ break :t child.spawnAndWait(io);
};
const term = term_result catch |err| {
try warnAboutForeignBinaries(io, arena, arg_mode, target, link_libc);
@@ -4469,7 +4463,7 @@ fn runOrTest(
switch (term) {
.Exited => |code| {
if (code == 0) {
- return cleanExit();
+ return cleanExit(io);
} else {
process.exit(code);
}
@@ -4483,7 +4477,7 @@ fn runOrTest(
switch (term) {
.Exited => |code| {
if (code == 0) {
- return cleanExit();
+ return cleanExit(io);
} else {
const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following test command failed with exit code {d}:\n{s}", .{ code, cmd });
@@ -4512,6 +4506,7 @@ fn runOrTestHotSwap(
all_args: []const []const u8,
runtime_args_start: ?usize,
) !std.process.Child.Id {
+ const io = comp.io;
const lf = comp.bin_file.?;
const exe_path = switch (builtin.target.os.tag) {
@@ -4520,7 +4515,7 @@ fn runOrTestHotSwap(
// tmp zig-cache and use it to spawn the child process. This way we are free to update
// the binary with each requested hot update.
.windows => blk: {
- try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.dirs.local_cache.handle, lf.emit.sub_path, .{});
+ try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.dirs.local_cache.handle, lf.emit.sub_path, io, .{});
break :blk try fs.path.join(gpa, &.{ comp.dirs.local_cache.path orelse ".", lf.emit.sub_path });
},
@@ -4593,7 +4588,7 @@ fn runOrTestHotSwap(
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
- try child.spawn();
+ try child.spawn(io);
return child.id;
},
@@ -4604,6 +4599,8 @@ const UpdateModuleError = Compilation.UpdateError || error{
/// The update caused compile errors. The error bundle has already been
/// reported to the user by being rendered to stderr.
CompileErrorsReported,
+ /// Error occurred printing compilation errors to stderr.
+ PrintingErrorsFailed,
};
fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) UpdateModuleError!void {
try comp.update(prog_node);
@@ -4612,7 +4609,11 @@ fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node)
defer errors.deinit(comp.gpa);
if (errors.errorMessageCount() > 0) {
- errors.renderToStdErr(.{}, color);
+ const io = comp.io;
+ errors.renderToStderr(io, .{}, color) catch |err| switch (err) {
+ error.Canceled => |e| return e,
+ else => return error.PrintingErrorsFailed,
+ };
return error.CompileErrorsReported;
}
}
@@ -4665,7 +4666,7 @@ fn cmdTranslateC(
return;
} else {
const color: Color = .auto;
- result.errors.renderToStdErr(.{}, color);
+ result.errors.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
}
@@ -4680,7 +4681,7 @@ fn cmdTranslateC(
} else {
const hex_digest = Cache.binToHex(result.digest);
const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_basename });
- const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| {
+ const zig_file = comp.dirs.local_cache.handle.openFile(io, out_zig_path, .{}) catch |err| {
const path = comp.dirs.local_cache.path orelse ".";
fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{
path,
@@ -4689,12 +4690,12 @@ fn cmdTranslateC(
@errorName(err),
});
};
- defer zig_file.close();
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ defer zig_file.close(io);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var file_reader = zig_file.reader(io, &.{});
_ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited);
try stdout_writer.interface.flush();
- return cleanExit();
+ return cleanExit(io);
}
}
@@ -4728,7 +4729,7 @@ const usage_init =
\\
;
-fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !void {
dev.check(.init_command);
var template: enum { example, minimal } = .example;
@@ -4740,8 +4741,8 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (mem.eql(u8, arg, "-m") or mem.eql(u8, arg, "--minimal")) {
template = .minimal;
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_init);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_init);
+ return cleanExit(io);
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
@@ -4759,8 +4760,8 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
switch (template) {
.example => {
- var templates = findTemplates(gpa, arena);
- defer templates.deinit();
+ var templates = findTemplates(gpa, arena, io);
+ defer templates.deinit(io);
const s = fs.path.sep_str;
const template_paths = [_][]const u8{
@@ -4772,7 +4773,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var ok_count: usize = 0;
for (template_paths) |template_path| {
- if (templates.write(arena, fs.cwd(), sanitized_root_name, template_path, fingerprint)) |_| {
+ if (templates.write(arena, io, Io.Dir.cwd(), sanitized_root_name, template_path, fingerprint)) |_| {
std.log.info("created {s}", .{template_path});
ok_count += 1;
} else |err| switch (err) {
@@ -4786,10 +4787,10 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (ok_count == template_paths.len) {
std.log.info("see `zig build --help` for a menu of options", .{});
}
- return cleanExit();
+ return cleanExit(io);
},
.minimal => {
- writeSimpleTemplateFile(Package.Manifest.basename,
+ writeSimpleTemplateFile(io, Package.Manifest.basename,
\\.{{
\\ .name = .{s},
\\ .version = "0.0.1",
@@ -4806,7 +4807,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
else => fatal("failed to create '{s}': {s}", .{ Package.Manifest.basename, @errorName(err) }),
error.PathAlreadyExists => fatal("refusing to overwrite '{s}'", .{Package.Manifest.basename}),
};
- writeSimpleTemplateFile(Package.build_zig_basename,
+ writeSimpleTemplateFile(io, Package.build_zig_basename,
\\const std = @import("std");
\\
\\pub fn build(b: *std.Build) void {{
@@ -4819,11 +4820,11 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
// their `build.zig.zon` *after* writing their `build.zig`. So this one isn't fatal.
error.PathAlreadyExists => {
std.log.info("successfully populated '{s}', preserving existing '{s}'", .{ Package.Manifest.basename, Package.build_zig_basename });
- return cleanExit();
+ return cleanExit(io);
},
};
std.log.info("successfully populated '{s}' and '{s}'", .{ Package.Manifest.basename, Package.build_zig_basename });
- return cleanExit();
+ return cleanExit(io);
},
}
}
@@ -4894,7 +4895,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
const argv_index_exe = child_argv.items.len;
_ = try child_argv.addOne();
- const self_exe_path = try fs.selfExePathAlloc(arena);
+ const self_exe_path = try process.executablePathAlloc(io, arena);
try child_argv.append(self_exe_path);
const argv_index_zig_lib_dir = child_argv.items.len;
@@ -5075,7 +5076,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
const work_around_btrfs_bug = native_os == .linux and
EnvVar.ZIG_BTRFS_WORKAROUND.isSet();
- const root_prog_node = std.Progress.start(.{
+ const root_prog_node = std.Progress.start(io, .{
.disable_printing = (color == .off),
.root_name = "Compile Build Script",
});
@@ -5110,14 +5111,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
const paths_file = debug_libc_paths_file orelse break :lci null;
if (!build_options.enable_debug_extensions) unreachable;
const lci = try arena.create(LibCInstallation);
- lci.* = try .parse(arena, paths_file, &resolved_target.result);
+ lci.* = try .parse(arena, io, paths_file, &resolved_target.result);
break :lci lci;
};
process.raiseFileDescriptorLimit();
const cwd_path = try introspect.getResolvedCwd(arena);
- const build_root = try findBuildRoot(arena, .{
+ const build_root = try findBuildRoot(arena, io, .{
.cwd_path = cwd_path,
.build_file = build_file,
});
@@ -5125,6 +5126,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
// This `init` calls `fatal` on error.
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
.{ .override = path: {
@@ -5134,7 +5136,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
{},
self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
child_argv.items[argv_index_zig_lib_dir] = dirs.zig_lib.path orelse cwd_path;
child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path;
@@ -5203,8 +5205,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
.parent = root_mod,
});
- var cleanup_build_dir: ?fs.Dir = null;
- defer if (cleanup_build_dir) |*dir| dir.close();
+ var cleanup_build_dir: ?Io.Dir = null;
+ defer if (cleanup_build_dir) |*dir| dir.close(io);
if (dev.env.supports(.fetch_command)) {
const fetch_prog_node = root_prog_node.start("Fetch Packages", 0);
@@ -5226,7 +5228,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
if (system_pkg_dir_path) |p| {
job_queue.global_cache = .{
.path = p,
- .handle = fs.cwd().openDir(p, .{}) catch |err| {
+ .handle = Io.Dir.cwd().openDir(io, p, .{}) catch |err| {
fatal("unable to open system package directory '{s}': {s}", .{
p, @errorName(err),
});
@@ -5285,17 +5287,18 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
if (fetch.error_bundle.root_list.items.len > 0) {
var errors = try fetch.error_bundle.toOwnedBundle("");
- errors.renderToStdErr(.{}, color);
+ errors.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
- if (fetch_only) return cleanExit();
+ if (fetch_only) return cleanExit(io);
var source_buf = std.array_list.Managed(u8).init(gpa);
defer source_buf.deinit();
try job_queue.createDependenciesSource(&source_buf);
const deps_mod = try createDependenciesModule(
arena,
+ io,
source_buf.items,
root_mod,
dirs,
@@ -5357,6 +5360,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
}
} else try createEmptyDependenciesModule(
arena,
+ io,
root_mod,
dirs,
config,
@@ -5415,16 +5419,15 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
child.stderr_behavior = .Inherit;
const term = t: {
- std.debug.lockStdErr();
- defer std.debug.unlockStdErr();
- break :t child.spawnAndWait() catch |err| {
- fatal("failed to spawn build runner {s}: {s}", .{ child_argv.items[0], @errorName(err) });
- };
+ _ = try io.lockStderr(&.{}, .no_color);
+ defer io.unlockStderr();
+ break :t child.spawnAndWait(io) catch |err|
+ fatal("failed to spawn build runner {s}: {t}", .{ child_argv.items[0], err });
};
switch (term) {
.Exited => |code| {
- if (code == 0) return cleanExit();
+ if (code == 0) return cleanExit(io);
// Indicates that the build runner has reported compile errors
// and this parent process does not need to report any further
// diagnostics.
@@ -5437,12 +5440,12 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
// that are missing.
const s = fs.path.sep_str;
const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce;
- const stdout = dirs.local_cache.handle.readFileAlloc(tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| {
+ const stdout = dirs.local_cache.handle.readFileAlloc(io, tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| {
fatal("unable to read results of configure phase from '{f}{s}': {s}", .{
dirs.local_cache, tmp_sub_path, @errorName(err),
});
};
- dirs.local_cache.handle.deleteFile(tmp_sub_path) catch {};
+ dirs.local_cache.handle.deleteFile(io, tmp_sub_path) catch {};
var it = mem.splitScalar(u8, stdout, '\n');
var any_errors = false;
@@ -5511,9 +5514,10 @@ fn jitCmd(
dev.check(.jit_command);
const color: Color = .auto;
- const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(.{
+ const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(io, .{
.disable_printing = (color == .off),
});
+ defer root_prog_node.end();
const target_query: std.Target.Query = .{};
const resolved_target: Package.Module.ResolvedTarget = .{
@@ -5523,9 +5527,8 @@ fn jitCmd(
.is_explicit_dynamic_linker = false,
};
- const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find self exe path: {s}", .{@errorName(err)});
- };
+ const self_exe_path = process.executablePathAlloc(io, arena) catch |err|
+ fatal("unable to find self exe path: {t}", .{err});
const optimize_mode: std.builtin.OptimizeMode = if (EnvVar.ZIG_DEBUG_CMD.isSet())
.Debug
@@ -5538,13 +5541,14 @@ fn jitCmd(
// This `init` calls `fatal` on error.
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
.global,
if (native_os == .wasi) wasi_preopens,
self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
const thread_limit = @min(
@max(std.Thread.getCpuCount() catch 1, 1),
@@ -5623,7 +5627,7 @@ fn jitCmd(
defer comp.destroy();
if (options.server) {
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var server: std.zig.Server = .{
.out = &stdout_writer.interface,
.in = undefined, // won't be receiving messages
@@ -5683,19 +5687,23 @@ fn jitCmd(
child.stdout_behavior = if (options.capture == null) .Inherit else .Pipe;
child.stderr_behavior = .Inherit;
- try child.spawn();
+ const term = t: {
+ _ = try io.lockStderr(&.{}, .no_color);
+ defer io.unlockStderr();
+ try child.spawn(io);
- if (options.capture) |ptr| {
- var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
- ptr.* = try stdout_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32)));
- }
+ if (options.capture) |ptr| {
+ var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
+ ptr.* = try stdout_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32)));
+ }
- const term = try child.wait();
+ break :t try child.wait(io);
+ };
switch (term) {
.Exited => |code| {
if (code == 0) {
if (options.capture != null) return;
- return cleanExit();
+ return cleanExit(io);
}
const cmd = try std.mem.join(arena, " ", child_argv.items);
fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd });
@@ -5818,9 +5826,9 @@ pub fn lldMain(
const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, .single_quotes = true });
/// Initialize the arguments from a Response File. "*.rsp"
-fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile {
+fn initArgIteratorResponseFile(allocator: Allocator, io: Io, resp_file_path: []const u8) !ArgIteratorResponseFile {
const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit
- const cmd_line = try fs.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes));
+ const cmd_line = try Io.Dir.cwd().readFileAlloc(io, resp_file_path, allocator, .limited(max_bytes));
errdefer allocator.free(cmd_line);
return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line);
@@ -5948,7 +5956,7 @@ pub const ClangArgIterator = struct {
};
}
- fn next(self: *ClangArgIterator) !void {
+ fn next(self: *ClangArgIterator, io: Io) !void {
assert(self.has_next);
assert(self.next_index < self.argv.len);
// In this state we know that the parameter we are looking at is a root parameter
@@ -5966,10 +5974,8 @@ pub const ClangArgIterator = struct {
const arena = self.arena;
const resp_file_path = arg[1..];
- self.arg_iterator_response_file =
- initArgIteratorResponseFile(arena, resp_file_path) catch |err| {
- fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) });
- };
+ self.arg_iterator_response_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err|
+ fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err });
// NOTE: The ArgIteratorResponseFile returns tokens from next() that are slices of an
// internal buffer. This internal buffer is arena allocated, so it is not cleaned up here.
@@ -6156,8 +6162,8 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_ast_check);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_ast_check);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "-t")) {
want_output_text = true;
} else if (mem.eql(u8, arg, "--zon")) {
@@ -6184,12 +6190,12 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const display_path = zig_source_path orelse "<stdin>";
const source: [:0]const u8 = s: {
var f = if (zig_source_path) |p| file: {
- break :file fs.cwd().openFile(p, .{}) catch |err| {
+ break :file Io.Dir.cwd().openFile(io, p, .{}) catch |err| {
fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
};
- } else fs.File.stdin();
- defer if (zig_source_path != null) f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ } else Io.File.stdin();
+ defer if (zig_source_path != null) f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :s std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err| {
fatal("unable to load file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
};
@@ -6207,7 +6213,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const tree = try Ast.parse(arena, source, mode);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
switch (mode) {
.zig => {
@@ -6218,7 +6224,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZirErrorMessages(zir, tree, source, display_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ try error_bundle.renderToStderr(io, .{}, color);
if (zir.loweringFailed()) {
process.exit(1);
}
@@ -6228,7 +6234,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
if (zir.hasCompileErrors()) {
process.exit(1);
} else {
- return cleanExit();
+ return cleanExit(io);
}
}
if (!build_options.enable_debug_extensions) {
@@ -6279,7 +6285,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
if (zir.hasCompileErrors()) {
process.exit(1);
} else {
- return cleanExit();
+ return cleanExit(io);
}
},
.zon => {
@@ -6289,12 +6295,12 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZoirErrorMessages(zoir, tree, source, display_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
if (!want_output_text) {
- return cleanExit();
+ return cleanExit(io);
}
if (!build_options.enable_debug_extensions) {
@@ -6303,7 +6309,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
try @import("print_zoir.zig").renderToWriter(zoir, arena, stdout_bw);
try stdout_bw.flush();
- return cleanExit();
+ return cleanExit(io);
},
}
}
@@ -6330,8 +6336,8 @@ fn cmdDetectCpu(io: Io, args: []const []const u8) !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(detect_cpu_usage);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, detect_cpu_usage);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "--llvm")) {
use_llvm = true;
} else {
@@ -6351,10 +6357,10 @@ fn cmdDetectCpu(io: Io, args: []const []const u8) !void {
const name = llvm.GetHostCPUName() orelse fatal("LLVM could not figure out the host cpu name", .{});
const features = llvm.GetHostCPUFeatures() orelse fatal("LLVM could not figure out the host cpu feature set", .{});
const cpu = try detectNativeCpuWithLLVM(builtin.cpu.arch, name, features);
- try printCpu(cpu);
+ try printCpu(io, cpu);
} else {
const host_target = std.zig.resolveTargetQueryOrFatal(io, .{});
- try printCpu(host_target.cpu);
+ try printCpu(io, host_target.cpu);
}
}
@@ -6421,8 +6427,8 @@ fn detectNativeCpuWithLLVM(
return result;
}
-fn printCpu(cpu: std.Target.Cpu) !void {
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+fn printCpu(io: Io, cpu: std.Target.Cpu) !void {
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
if (cpu.model.llvm_name) |llvm_name| {
@@ -6444,6 +6450,7 @@ fn printCpu(cpu: std.Target.Cpu) !void {
fn cmdDumpLlvmInts(
gpa: Allocator,
arena: Allocator,
+ io: Io,
args: []const []const u8,
) !void {
dev.check(.llvm_ints_command);
@@ -6471,7 +6478,7 @@ fn cmdDumpLlvmInts(
const dl = tm.createTargetDataLayout();
const context = llvm.Context.create();
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
for ([_]u16{ 1, 8, 16, 32, 64, 128, 256 }) |bits| {
const int_type = context.intType(bits);
@@ -6480,7 +6487,7 @@ fn cmdDumpLlvmInts(
}
try stdout_bw.flush();
- return cleanExit();
+ return cleanExit(io);
}
/// This is only enabled for debug builds.
@@ -6491,13 +6498,13 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void {
const cache_file = args[0];
- var f = fs.cwd().openFile(cache_file, .{}) catch |err| {
+ var f = Io.Dir.cwd().openFile(io, cache_file, .{}) catch |err| {
fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) });
};
- defer f.close();
+ defer f.close(io);
const zir = try Zcu.loadZirCache(arena, io, f);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
{
const instruction_bytes = zir.instructions.len *
@@ -6538,18 +6545,18 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
const new_source_path = args[1];
const old_source = source: {
- var f = fs.cwd().openFile(old_source_path, .{}) catch |err|
+ var f = Io.Dir.cwd().openFile(io, old_source_path, .{}) catch |err|
fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
- defer f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ defer f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
};
const new_source = source: {
- var f = fs.cwd().openFile(new_source_path, .{}) catch |err|
+ var f = Io.Dir.cwd().openFile(io, new_source_path, .{}) catch |err|
fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
- defer f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ defer f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
fatal("unable to read new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
};
@@ -6562,7 +6569,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZirErrorMessages(old_zir, old_tree, old_source, old_source_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
@@ -6574,14 +6581,14 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZirErrorMessages(new_zir, new_tree, new_source, new_source_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty;
try Zcu.mapOldZirToNew(arena, old_zir, new_zir, &inst_map);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
{
try stdout_bw.print("Instruction mappings:\n", .{});
@@ -6623,7 +6630,7 @@ fn warnAboutForeignBinaries(
const host_query: std.Target.Query = .{};
const host_target = std.zig.resolveTargetQueryOrFatal(io, host_query);
- switch (std.zig.system.getExternalExecutor(&host_target, target, .{ .link_libc = link_libc })) {
+ switch (std.zig.system.getExternalExecutor(io, &host_target, target, .{ .link_libc = link_libc })) {
.native => return,
.rosetta => {
const host_name = try host_target.zigTriple(arena);
@@ -6829,6 +6836,7 @@ const ClangSearchSanitizer = struct {
};
fn accessFrameworkPath(
+ io: Io,
test_path: *std.array_list.Managed(u8),
checked_paths: *std.array_list.Managed(u8),
framework_dir_path: []const u8,
@@ -6842,7 +6850,7 @@ fn accessFrameworkPath(
framework_dir_path, framework_name, framework_name, ext,
});
try checked_paths.print("\n {s}", .{test_path.items});
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| fatal("unable to search for {s} framework '{s}': {s}", .{
ext, test_path.items, @errorName(e),
@@ -6912,8 +6920,8 @@ fn cmdFetch(
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_fetch);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_fetch);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "--global-cache-dir")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
@@ -6946,7 +6954,7 @@ fn cmdFetch(
try http_client.initDefaultProxies(arena);
- var root_prog_node = std.Progress.start(.{
+ var root_prog_node = std.Progress.start(io, .{
.root_name = "Fetch",
});
defer root_prog_node.end();
@@ -6954,11 +6962,11 @@ fn cmdFetch(
var global_cache_directory: Directory = l: {
const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
break :l .{
- .handle = try fs.cwd().makeOpenPath(p, .{}),
+ .handle = try Io.Dir.cwd().createDirPathOpen(io, p, .{}),
.path = p,
};
};
- defer global_cache_directory.handle.close();
+ defer global_cache_directory.handle.close(io);
var job_queue: Package.Fetch.JobQueue = .{
.io = io,
@@ -7009,7 +7017,7 @@ fn cmdFetch(
if (fetch.error_bundle.root_list.items.len > 0) {
var errors = try fetch.error_bundle.toOwnedBundle("");
- errors.renderToStdErr(.{}, color);
+ errors.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
@@ -7021,10 +7029,10 @@ fn cmdFetch(
const name = switch (save) {
.no => {
- var stdout = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout = Io.File.stdout().writerStreaming(io, &stdout_buffer);
try stdout.interface.print("{s}\n", .{package_hash_slice});
try stdout.interface.flush();
- return cleanExit();
+ return cleanExit(io);
},
.yes, .exact => |name| name: {
if (name) |n| break :name n;
@@ -7036,14 +7044,14 @@ fn cmdFetch(
const cwd_path = try introspect.getResolvedCwd(arena);
- var build_root = try findBuildRoot(arena, .{
+ var build_root = try findBuildRoot(arena, io, .{
.cwd_path = cwd_path,
});
- defer build_root.deinit();
+ defer build_root.deinit(io);
// The name to use in case the manifest file needs to be created now.
const init_root_name = fs.path.basename(build_root.directory.path orelse cwd_path);
- var manifest, var ast = try loadManifest(gpa, arena, .{
+ var manifest, var ast = try loadManifest(gpa, arena, io, .{
.root_name = try sanitizeExampleName(arena, init_root_name),
.dir = build_root.directory.handle,
.color = color,
@@ -7159,15 +7167,16 @@ fn cmdFetch(
try ast.render(gpa, &aw.writer, fixups);
const rendered = aw.written();
- build_root.directory.handle.writeFile(.{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| {
+ build_root.directory.handle.writeFile(io, .{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| {
fatal("unable to write {s} file: {t}", .{ Package.Manifest.basename, err });
};
- return cleanExit();
+ return cleanExit(io);
}
fn createEmptyDependenciesModule(
arena: Allocator,
+ io: Io,
main_mod: *Package.Module,
dirs: Compilation.Directories,
global_options: Compilation.Config,
@@ -7176,6 +7185,7 @@ fn createEmptyDependenciesModule(
try Package.Fetch.JobQueue.createEmptyDependenciesSource(&source);
_ = try createDependenciesModule(
arena,
+ io,
source.items,
main_mod,
dirs,
@@ -7187,6 +7197,7 @@ fn createEmptyDependenciesModule(
/// build runner to obtain via `@import("@dependencies")`.
fn createDependenciesModule(
arena: Allocator,
+ io: Io,
source: []const u8,
main_mod: *Package.Module,
dirs: Compilation.Directories,
@@ -7197,9 +7208,9 @@ fn createDependenciesModule(
const rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
{
- var tmp_dir = try dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
- defer tmp_dir.close();
- try tmp_dir.writeFile(.{ .sub_path = basename, .data = source });
+ var tmp_dir = try dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{});
+ defer tmp_dir.close(io);
+ try tmp_dir.writeFile(io, .{ .sub_path = basename, .data = source });
}
var hh: Cache.HashHelper = .{};
@@ -7208,11 +7219,7 @@ fn createDependenciesModule(
const hex_digest = hh.final();
const o_dir_sub_path = try arena.dupe(u8, "o" ++ fs.path.sep_str ++ hex_digest);
- try Package.Fetch.renameTmpIntoCache(
- dirs.local_cache.handle,
- tmp_dir_sub_path,
- o_dir_sub_path,
- );
+ try Package.Fetch.renameTmpIntoCache(io, dirs.local_cache.handle, tmp_dir_sub_path, o_dir_sub_path);
const deps_mod = try Package.Module.create(arena, .{
.paths = .{
@@ -7232,10 +7239,10 @@ fn createDependenciesModule(
const BuildRoot = struct {
directory: Cache.Directory,
build_zig_basename: []const u8,
- cleanup_build_dir: ?fs.Dir,
+ cleanup_build_dir: ?Io.Dir,
- fn deinit(br: *BuildRoot) void {
- if (br.cleanup_build_dir) |*dir| dir.close();
+ fn deinit(br: *BuildRoot, io: Io) void {
+ if (br.cleanup_build_dir) |*dir| dir.close(io);
br.* = undefined;
}
};
@@ -7245,7 +7252,7 @@ const FindBuildRootOptions = struct {
cwd_path: ?[]const u8 = null,
};
-fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
+fn findBuildRoot(arena: Allocator, io: Io, options: FindBuildRootOptions) !BuildRoot {
const cwd_path = options.cwd_path orelse try introspect.getResolvedCwd(arena);
const build_zig_basename = if (options.build_file) |bf|
fs.path.basename(bf)
@@ -7254,7 +7261,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
if (options.build_file) |bf| {
if (fs.path.dirname(bf)) |dirname| {
- const dir = fs.cwd().openDir(dirname, .{}) catch |err| {
+ const dir = Io.Dir.cwd().openDir(io, dirname, .{}) catch |err| {
fatal("unable to open directory to build file from argument 'build-file', '{s}': {s}", .{ dirname, @errorName(err) });
};
return .{
@@ -7266,7 +7273,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
return .{
.build_zig_basename = build_zig_basename,
- .directory = .{ .path = null, .handle = fs.cwd() },
+ .directory = .{ .path = null, .handle = Io.Dir.cwd() },
.cleanup_build_dir = null,
};
}
@@ -7274,8 +7281,8 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
var dirname: []const u8 = cwd_path;
while (true) {
const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig_basename });
- if (fs.cwd().access(joined_path, .{})) |_| {
- const dir = fs.cwd().openDir(dirname, .{}) catch |err| {
+ if (Io.Dir.cwd().access(io, joined_path, .{})) |_| {
+ const dir = Io.Dir.cwd().openDir(io, dirname, .{}) catch |err| {
fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) });
};
return .{
@@ -7304,17 +7311,19 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
const LoadManifestOptions = struct {
root_name: []const u8,
- dir: fs.Dir,
+ dir: Io.Dir,
color: Color,
};
fn loadManifest(
gpa: Allocator,
arena: Allocator,
+ io: Io,
options: LoadManifestOptions,
) !struct { Package.Manifest, Ast } {
const manifest_bytes = while (true) {
break options.dir.readFileAllocOptions(
+ io,
Package.Manifest.basename,
arena,
.limited(Package.Manifest.max_bytes),
@@ -7322,7 +7331,7 @@ fn loadManifest(
0,
) catch |err| switch (err) {
error.FileNotFound => {
- writeSimpleTemplateFile(Package.Manifest.basename,
+ writeSimpleTemplateFile(io, Package.Manifest.basename,
\\.{{
\\ .name = .{s},
\\ .version = "{s}",
@@ -7348,7 +7357,7 @@ fn loadManifest(
errdefer ast.deinit(gpa);
if (ast.errors.len > 0) {
- try std.zig.printAstErrorsToStderr(gpa, ast, Package.Manifest.basename, options.color);
+ try std.zig.printAstErrorsToStderr(gpa, io, ast, Package.Manifest.basename, options.color);
process.exit(2);
}
@@ -7365,7 +7374,7 @@ fn loadManifest(
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, options.color);
+ error_bundle.renderToStderr(io, .{}, options.color) catch {};
process.exit(2);
}
@@ -7374,12 +7383,12 @@ fn loadManifest(
const Templates = struct {
zig_lib_directory: Cache.Directory,
- dir: fs.Dir,
+ dir: Io.Dir,
buffer: std.array_list.Managed(u8),
- fn deinit(templates: *Templates) void {
- templates.zig_lib_directory.handle.close();
- templates.dir.close();
+ fn deinit(templates: *Templates, io: Io) void {
+ templates.zig_lib_directory.handle.close(io);
+ templates.dir.close(io);
templates.buffer.deinit();
templates.* = undefined;
}
@@ -7387,20 +7396,21 @@ const Templates = struct {
fn write(
templates: *Templates,
arena: Allocator,
- out_dir: fs.Dir,
+ io: Io,
+ out_dir: Io.Dir,
root_name: []const u8,
template_path: []const u8,
fingerprint: Package.Fingerprint,
) !void {
if (fs.path.dirname(template_path)) |dirname| {
- out_dir.makePath(dirname) catch |err| {
- fatal("unable to make path '{s}': {s}", .{ dirname, @errorName(err) });
+ out_dir.createDirPath(io, dirname) catch |err| {
+ fatal("unable to make path '{s}': {t}", .{ dirname, err });
};
}
const max_bytes = 10 * 1024 * 1024;
- const contents = templates.dir.readFileAlloc(template_path, arena, .limited(max_bytes)) catch |err| {
- fatal("unable to read template file '{s}': {s}", .{ template_path, @errorName(err) });
+ const contents = templates.dir.readFileAlloc(io, template_path, arena, .limited(max_bytes)) catch |err| {
+ fatal("unable to read template file '{s}': {t}", .{ template_path, err });
};
templates.buffer.clearRetainingCapacity();
try templates.buffer.ensureUnusedCapacity(contents.len);
@@ -7428,39 +7438,39 @@ const Templates = struct {
i += 1;
}
- return out_dir.writeFile(.{
+ return out_dir.writeFile(io, .{
.sub_path = template_path,
.data = templates.buffer.items,
.flags = .{ .exclusive = true },
});
}
};
-fn writeSimpleTemplateFile(file_name: []const u8, comptime fmt: []const u8, args: anytype) !void {
- const f = try fs.cwd().createFile(file_name, .{ .exclusive = true });
- defer f.close();
+fn writeSimpleTemplateFile(io: Io, file_name: []const u8, comptime fmt: []const u8, args: anytype) !void {
+ const f = try Io.Dir.cwd().createFile(io, file_name, .{ .exclusive = true });
+ defer f.close(io);
var buf: [4096]u8 = undefined;
- var fw = f.writer(&buf);
+ var fw = f.writer(io, &buf);
try fw.interface.print(fmt, args);
try fw.interface.flush();
}
-fn findTemplates(gpa: Allocator, arena: Allocator) Templates {
+fn findTemplates(gpa: Allocator, arena: Allocator, io: Io) Templates {
const cwd_path = introspect.getResolvedCwd(arena) catch |err| {
- fatal("unable to get cwd: {s}", .{@errorName(err)});
+ fatal("unable to get cwd: {t}", .{err});
};
- const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find self exe path: {s}", .{@errorName(err)});
+ const self_exe_path = process.executablePathAlloc(io, arena) catch |err| {
+ fatal("unable to find self exe path: {t}", .{err});
};
- var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, cwd_path, self_exe_path) catch |err| {
- fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
+ var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, io, cwd_path, self_exe_path) catch |err| {
+ fatal("unable to find zig installation directory '{s}': {t}", .{ self_exe_path, err });
};
const s = fs.path.sep_str;
const template_sub_path = "init";
- const template_dir = zig_lib_directory.handle.openDir(template_sub_path, .{}) catch |err| {
+ const template_dir = zig_lib_directory.handle.openDir(io, template_sub_path, .{}) catch |err| {
const path = zig_lib_directory.path orelse ".";
- fatal("unable to open zig project template directory '{s}{s}{s}': {s}", .{
- path, s, template_sub_path, @errorName(err),
+ fatal("unable to open zig project template directory '{s}{s}{s}': {t}", .{
+ path, s, template_sub_path, err,
});
};
@@ -7574,17 +7584,18 @@ fn anyObjectLinkInputs(link_inputs: []const link.UnresolvedInput) bool {
return false;
}
-fn addLibDirectoryWarn(lib_directories: *std.ArrayList(Directory), path: []const u8) void {
- return addLibDirectoryWarn2(lib_directories, path, false);
+fn addLibDirectoryWarn(io: Io, lib_directories: *std.ArrayList(Directory), path: []const u8) void {
+ return addLibDirectoryWarn2(io, lib_directories, path, false);
}
fn addLibDirectoryWarn2(
+ io: Io,
lib_directories: *std.ArrayList(Directory),
path: []const u8,
ignore_not_found: bool,
) void {
lib_directories.appendAssumeCapacity(.{
- .handle = fs.cwd().openDir(path, .{}) catch |err| {
+ .handle = Io.Dir.cwd().openDir(io, path, .{}) catch |err| {
if (err == error.FileNotFound and ignore_not_found) return;
warn("unable to open library directory '{s}': {s}", .{ path, @errorName(err) });
return;
diff --git a/src/print_env.zig b/src/print_env.zig
index e1847688ad..3540a58d18 100644
--- a/src/print_env.zig
+++ b/src/print_env.zig
@@ -1,13 +1,17 @@
-const std = @import("std");
const builtin = @import("builtin");
-const build_options = @import("build_options");
-const Compilation = @import("Compilation.zig");
+
+const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const EnvVar = std.zig.EnvVar;
const fatal = std.process.fatal;
+const build_options = @import("build_options");
+const Compilation = @import("Compilation.zig");
+
pub fn cmdEnv(
arena: Allocator,
+ io: Io,
out: *std.Io.Writer,
args: []const []const u8,
wasi_preopens: switch (builtin.target.os.tag) {
@@ -21,20 +25,21 @@ pub fn cmdEnv(
const self_exe_path = switch (builtin.target.os.tag) {
.wasi => args[0],
- else => std.fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find zig self exe path: {s}", .{@errorName(err)});
+ else => std.process.executablePathAlloc(io, arena) catch |err| {
+ fatal("unable to find zig self exe path: {t}", .{err});
},
};
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
.global,
if (builtin.target.os.tag == .wasi) wasi_preopens,
if (builtin.target.os.tag != .wasi) self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
const zig_lib_dir = dirs.zig_lib.path orelse "";
const zig_std_dir = try dirs.zig_lib.join(arena, &.{"std"});
diff --git a/src/print_targets.zig b/src/print_targets.zig
index d9118b901b..a5e89506ad 100644
--- a/src/print_targets.zig
+++ b/src/print_targets.zig
@@ -1,35 +1,38 @@
const std = @import("std");
+const Io = std.Io;
const fs = std.fs;
const mem = std.mem;
const meta = std.meta;
const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
const Target = std.Target;
-const target = @import("target.zig");
const assert = std.debug.assert;
+
const glibc = @import("libs/glibc.zig");
const introspect = @import("introspect.zig");
+const target = @import("target.zig");
pub fn cmdTargets(
allocator: Allocator,
+ io: Io,
args: []const []const u8,
out: *std.Io.Writer,
native_target: *const Target,
) !void {
_ = args;
- var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| {
- fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)});
- };
- defer zig_lib_directory.handle.close();
+ var zig_lib_directory = introspect.findZigLibDir(allocator, io) catch |err|
+ fatal("unable to find zig installation directory: {t}", .{err});
+ defer zig_lib_directory.handle.close(io);
defer allocator.free(zig_lib_directory.path.?);
const abilists_contents = zig_lib_directory.handle.readFileAlloc(
+ io,
glibc.abilists_path,
allocator,
.limited(glibc.abilists_max_size),
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
- else => fatal("unable to read " ++ glibc.abilists_path ++ ": {s}", .{@errorName(err)}),
+ else => fatal("unable to read " ++ glibc.abilists_path ++ ": {t}", .{err}),
};
defer allocator.free(abilists_contents);
@@ -48,9 +51,7 @@ pub fn cmdTargets(
{
var libc_obj = try root_obj.beginTupleField("libc", .{});
for (std.zig.target.available_libcs) |libc| {
- const tmp = try std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{
- @tagName(libc.arch), @tagName(libc.os), @tagName(libc.abi),
- });
+ const tmp = try std.fmt.allocPrint(allocator, "{t}-{t}-{t}", .{ libc.arch, libc.os, libc.abi });
defer allocator.free(tmp);
try libc_obj.field(tmp, .{});
}
diff --git a/stage1/wasi.c b/stage1/wasi.c
index 83240d39b4..e4772735d9 100644
--- a/stage1/wasi.c
+++ b/stage1/wasi.c
@@ -939,6 +939,58 @@ uint32_t wasi_snapshot_preview1_path_remove_directory(uint32_t fd, uint32_t path
return wasi_errno_success;
}
+uint32_t wasi_snapshot_preview1_path_symlink(uint32_t old_path, uint32_t old_path_len, uint32_t fd, uint32_t new_path, uint32_t new_path_len) {
+ uint8_t *const m = *wasm_memory;
+ const char *old_path_ptr = (const char *)&m[old_path];
+ const char *new_path_ptr = (const char *)&m[new_path];
+#if LOG_TRACE
+ fprintf(stderr, "wasi_snapshot_preview1_path_symlink(\"%.*s\", %u, \"%.*s\")\n", (int)old_path_len, old_path_ptr, fd, (int)new_path_len, new_path_ptr);
+#endif
+ (void)old_path_ptr;
+ (void)old_path_len;
+ (void)fd;
+ (void)new_path_ptr;
+ (void)new_path_len;
+ panic("unimplemented: path_symlink");
+ return wasi_errno_success;
+}
+
+uint32_t wasi_snapshot_preview1_path_readlink(uint32_t fd, uint32_t path, uint32_t path_len, uint32_t buf, uint32_t buf_len, uint32_t out_len) {
+ uint8_t *const m = *wasm_memory;
+ const char *path_ptr = (const char *)&m[path];
+ char *buf_ptr = (char *)&m[buf];
+ uint32_t *out_len_ptr = (uint32_t *)&m[out_len];
+#if LOG_TRACE
+ fprintf(stderr, "wasi_snapshot_preview1_path_readlink(%u, \"%.*s\", 0x%X, %u, 0x%X)\n", fd, (int)path_len, path_ptr, buf, buf_len, out_len);
+#endif
+ (void)fd;
+ (void)path_ptr;
+ (void)path_len;
+ (void)buf_ptr;
+ (void)buf_len;
+ (void)out_len_ptr;
+ panic("unimplemented: path_readlink");
+ return wasi_errno_success;
+}
+
+uint32_t wasi_snapshot_preview1_path_link(uint32_t old_fd, uint32_t old_flags, uint32_t old_path, uint32_t old_path_len, uint32_t new_fd, uint32_t new_path, uint32_t new_path_len) {
+ uint8_t *const m = *wasm_memory;
+ const char *old_path_ptr = (const char *)&m[old_path];
+ const char *new_path_ptr = (const char *)&m[new_path];
+#if LOG_TRACE
+ fprintf(stderr, "wasi_snapshot_preview1_path_link(%u, 0x%X, \"%.*s\", %u, \"%.*s\")\n", old_fd, old_flags, (int)old_path_len, old_path_ptr, new_fd, (int)new_path_len, new_path_ptr);
+#endif
+ (void)old_fd;
+ (void)old_flags;
+ (void)old_path_ptr;
+ (void)old_path_len;
+ (void)new_fd;
+ (void)new_path_ptr;
+ (void)new_path_len;
+ panic("unimplemented: path_link");
+ return wasi_errno_success;
+}
+
uint32_t wasi_snapshot_preview1_path_unlink_file(uint32_t fd, uint32_t path, uint32_t path_len) {
uint8_t *const m = *wasm_memory;
const char *path_ptr = (const char *)&m[path];
@@ -1038,6 +1090,15 @@ uint32_t wasi_snapshot_preview1_fd_seek(uint32_t fd, uint64_t in_offset, uint32_
return wasi_errno_success;
}
+uint32_t wasi_snapshot_preview1_fd_sync(uint32_t fd) {
+#if LOG_TRACE
+ fprintf(stderr, "wasi_snapshot_preview1_fd_sync(%u)\n", fd);
+#endif
+ (void)fd;
+ panic("unimplemented: fd_sync");
+ return wasi_errno_success;
+}
+
uint32_t wasi_snapshot_preview1_poll_oneoff(uint32_t in, uint32_t out, uint32_t nsubscriptions, uint32_t res_nevents) {
(void)in;
(void)out;
diff --git a/test/cases/disable_stack_tracing.zig b/test/cases/disable_stack_tracing.zig
index 044eaf7012..36620130c9 100644
--- a/test/cases/disable_stack_tracing.zig
+++ b/test/cases/disable_stack_tracing.zig
@@ -5,16 +5,16 @@ pub const std_options: std.Options = .{
pub fn main() !void {
var st_buf: [8]usize = undefined;
var buf: [1024]u8 = undefined;
- var stdout = std.fs.File.stdout().writer(&buf);
+ var stdout = std.Io.File.stdout().writer(std.Options.debug_io, &buf);
const captured_st = try foo(&stdout.interface, &st_buf);
- try std.debug.writeStackTrace(&captured_st, &stdout.interface, .no_color);
+ try std.debug.writeStackTrace(&captured_st, .{ .writer = &stdout.interface, .mode = .no_color });
try stdout.interface.print("stack trace index: {d}\n", .{captured_st.index});
try stdout.interface.flush();
}
fn foo(w: *std.Io.Writer, st_buf: []usize) !std.builtin.StackTrace {
- try std.debug.writeCurrentStackTrace(.{}, w, .no_color);
+ try std.debug.writeCurrentStackTrace(.{}, .{ .writer = w, .mode = .no_color });
return std.debug.captureCurrentStackTrace(.{}, st_buf);
}
diff --git a/test/incremental/add_decl b/test/incremental/add_decl
index 9efd274b9e..99f6b2d34f 100644
--- a/test/incremental/add_decl
+++ b/test/incremental/add_decl
@@ -7,57 +7,63 @@
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(foo);
+ try std.Io.File.stdout().writeStreamingAll(io, foo);
}
const foo = "good morning\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good morning\n"
#update=add new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(foo);
+ try std.Io.File.stdout().writeStreamingAll(io, foo);
}
const foo = "good morning\n";
const bar = "good evening\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good morning\n"
#update=reference new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(bar);
+ try std.Io.File.stdout().writeStreamingAll(io, bar);
}
const foo = "good morning\n";
const bar = "good evening\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good evening\n"
#update=reference missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(qux);
+ try std.Io.File.stdout().writeStreamingAll(io, qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
-#expect_error=main.zig:3:39: error: use of undeclared identifier 'qux'
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
+#expect_error=main.zig:3:52: error: use of undeclared identifier 'qux'
#update=add missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(qux);
+ try std.Io.File.stdout().writeStreamingAll(io, qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
const qux = "good night\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good night\n"
#update=remove unused declarations
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(qux);
+ try std.Io.File.stdout().writeStreamingAll(io, qux);
}
const qux = "good night\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good night\n"
diff --git a/test/incremental/add_decl_namespaced b/test/incremental/add_decl_namespaced
index 1025ae24e1..6fc22f10b9 100644
--- a/test/incremental/add_decl_namespaced
+++ b/test/incremental/add_decl_namespaced
@@ -7,58 +7,64 @@
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@This().foo);
+ try std.Io.File.stdout().writeStreamingAll(io, @This().foo);
}
const foo = "good morning\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good morning\n"
#update=add new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@This().foo);
+ try std.Io.File.stdout().writeStreamingAll(io, @This().foo);
}
const foo = "good morning\n";
const bar = "good evening\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good morning\n"
#update=reference new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@This().bar);
+ try std.Io.File.stdout().writeStreamingAll(io, @This().bar);
}
const foo = "good morning\n";
const bar = "good evening\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good evening\n"
#update=reference missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@This().qux);
+ try std.Io.File.stdout().writeStreamingAll(io, @This().qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
-#expect_error=main.zig:3:46: error: root source file struct 'main' has no member named 'qux'
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
+#expect_error=main.zig:3:59: error: root source file struct 'main' has no member named 'qux'
#expect_error=main.zig:1:1: note: struct declared here
#update=add missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@This().qux);
+ try std.Io.File.stdout().writeStreamingAll(io, @This().qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
const qux = "good night\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good night\n"
#update=remove unused declarations
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@This().qux);
+ try std.Io.File.stdout().writeStreamingAll(io, @This().qux);
}
const qux = "good night\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="good night\n"
diff --git a/test/incremental/bad_import b/test/incremental/bad_import
index 9b6be8b176..20bdb9ae82 100644
--- a/test/incremental/bad_import
+++ b/test/incremental/bad_import
@@ -8,9 +8,10 @@
#file=main.zig
pub fn main() !void {
_ = @import("foo.zig");
- try std.fs.File.stdout().writeAll("success\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "success\n");
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#file=foo.zig
comptime {
_ = @import("bad.zig");
@@ -30,7 +31,8 @@ comptime {
#file=main.zig
pub fn main() !void {
//_ = @import("foo.zig");
- try std.fs.File.stdout().writeAll("success\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "success\n");
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="success\n"
diff --git a/test/incremental/change_embed_file b/test/incremental/change_embed_file
index 85d861ab93..84c9c334c7 100644
--- a/test/incremental/change_embed_file
+++ b/test/incremental/change_embed_file
@@ -8,8 +8,9 @@
const std = @import("std");
const string = @embedFile("string.txt");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(string);
+ try std.Io.File.stdout().writeStreamingAll(io, string);
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#file=string.txt
Hello, World!
#expect_stdout="Hello, World!\n"
@@ -28,8 +29,9 @@ Hello again, World!
const std = @import("std");
const string = @embedFile("string.txt");
pub fn main() !void {
- try std.fs.File.stdout().writeAll("a hardcoded string\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "a hardcoded string\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="a hardcoded string\n"
#update=re-introduce reference to file
@@ -37,8 +39,9 @@ pub fn main() !void {
const std = @import("std");
const string = @embedFile("string.txt");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(string);
+ try std.Io.File.stdout().writeStreamingAll(io, string);
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:2:27: error: unable to open 'string.txt': FileNotFound
#update=recreate file
diff --git a/test/incremental/change_enum_tag_type b/test/incremental/change_enum_tag_type
index 906f910271..06b80ac04d 100644
--- a/test/incremental/change_enum_tag_type
+++ b/test/incremental/change_enum_tag_type
@@ -15,10 +15,11 @@ const Foo = enum(Tag) {
pub fn main() !void {
var val: Foo = undefined;
val = .a;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{s}\n", .{@tagName(val)});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="a\n"
#update=too many enum fields
#file=main.zig
@@ -33,7 +34,7 @@ const Foo = enum(Tag) {
pub fn main() !void {
var val: Foo = undefined;
val = .a;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{s}\n", .{@tagName(val)});
}
comptime {
@@ -42,6 +43,7 @@ comptime {
std.debug.assert(@TypeOf(@intFromEnum(Foo.e)) == Tag);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:7:5: error: enumeration value '4' too large for type 'u2'
#update=increase tag size
#file=main.zig
@@ -56,8 +58,9 @@ const Foo = enum(Tag) {
pub fn main() !void {
var val: Foo = undefined;
val = .a;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{s}\n", .{@tagName(val)});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="a\n"
diff --git a/test/incremental/change_exports b/test/incremental/change_exports
index b0850626d6..a36afb4ee1 100644
--- a/test/incremental/change_exports
+++ b/test/incremental/change_exports
@@ -17,10 +17,11 @@ pub fn main() !void {
extern const bar: u32;
};
S.foo();
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{}\n", .{S.bar});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="123\n"
#update=add conflict
@@ -39,10 +40,11 @@ pub fn main() !void {
extern const other: u32;
};
S.foo();
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{} {}\n", .{ S.bar, S.other });
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:6:5: error: exported symbol collision: foo
#expect_error=main.zig:1:1: note: other symbol here
@@ -62,10 +64,11 @@ pub fn main() !void {
extern const other: u32;
};
S.foo();
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{} {}\n", .{ S.bar, S.other });
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="123 456\n"
#update=put exports in decl
@@ -87,10 +90,11 @@ pub fn main() !void {
extern const other: u32;
};
S.foo();
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{} {}\n", .{ S.bar, S.other });
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="123 456\n"
#update=remove reference to exporting decl
@@ -133,10 +137,11 @@ pub fn main() !void {
extern const other: u32;
};
S.foo();
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{} {}\n", .{ S.bar, S.other });
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="123 456\n"
#update=reintroduce reference to exporting decl, introducing conflict
@@ -158,10 +163,11 @@ pub fn main() !void {
extern const other: u32;
};
S.foo();
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{} {}\n", .{ S.bar, S.other });
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:5:5: error: exported symbol collision: bar
#expect_error=main.zig:2:1: note: other symbol here
#expect_error=main.zig:6:5: error: exported symbol collision: other
diff --git a/test/incremental/change_fn_type b/test/incremental/change_fn_type
index df788684cd..b4286545e3 100644
--- a/test/incremental/change_fn_type
+++ b/test/incremental/change_fn_type
@@ -8,10 +8,11 @@ pub fn main() !void {
try foo(123);
}
fn foo(x: u8) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
return stdout_writer.interface.print("{d}\n", .{x});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="123\n"
#update=change function type
@@ -20,10 +21,11 @@ pub fn main() !void {
try foo(123);
}
fn foo(x: i64) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
return stdout_writer.interface.print("{d}\n", .{x});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="123\n"
#update=change function argument
@@ -32,8 +34,9 @@ pub fn main() !void {
try foo(-42);
}
fn foo(x: i64) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
return stdout_writer.interface.print("{d}\n", .{x});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="-42\n"
diff --git a/test/incremental/change_generic_line_number b/test/incremental/change_generic_line_number
index f1920c67e6..2d731b071c 100644
--- a/test/incremental/change_generic_line_number
+++ b/test/incremental/change_generic_line_number
@@ -4,10 +4,11 @@
#update=initial version
#file=main.zig
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
fn Printer(message: []const u8) type {
return struct {
fn print() !void {
- try std.fs.File.stdout().writeAll(message);
+ try std.Io.File.stdout().writeStreamingAll(io, message);
}
};
}
@@ -19,11 +20,12 @@ pub fn main() !void {
#update=change line number
#file=main.zig
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
fn Printer(message: []const u8) type {
return struct {
fn print() !void {
- try std.fs.File.stdout().writeAll(message);
+ try std.Io.File.stdout().writeStreamingAll(io, message);
}
};
}
diff --git a/test/incremental/change_line_number b/test/incremental/change_line_number
index 5c809b8fa9..7bafbbfbdd 100644
--- a/test/incremental/change_line_number
+++ b/test/incremental/change_line_number
@@ -5,14 +5,16 @@
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll("foo\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "foo\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="foo\n"
#update=change line number
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll("foo\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "foo\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="foo\n"
diff --git a/test/incremental/change_panic_handler b/test/incremental/change_panic_handler
index 070384887a..ebce3bc312 100644
--- a/test/incremental/change_panic_handler
+++ b/test/incremental/change_panic_handler
@@ -12,11 +12,12 @@ pub fn main() !u8 {
}
pub const panic = std.debug.FullPanic(myPanic);
fn myPanic(msg: []const u8, _: ?usize) noreturn {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
stdout_writer.interface.print("panic message: {s}\n", .{msg}) catch {};
std.process.exit(0);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="panic message: integer overflow\n"
#update=change the panic handler body
@@ -29,11 +30,12 @@ pub fn main() !u8 {
}
pub const panic = std.debug.FullPanic(myPanic);
fn myPanic(msg: []const u8, _: ?usize) noreturn {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
stdout_writer.interface.print("new panic message: {s}\n", .{msg}) catch {};
std.process.exit(0);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="new panic message: integer overflow\n"
#update=change the panic handler function value
@@ -46,9 +48,10 @@ pub fn main() !u8 {
}
pub const panic = std.debug.FullPanic(myPanicNew);
fn myPanicNew(msg: []const u8, _: ?usize) noreturn {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
stdout_writer.interface.print("third panic message: {s}\n", .{msg}) catch {};
std.process.exit(0);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="third panic message: integer overflow\n"
diff --git a/test/incremental/change_panic_handler_explicit b/test/incremental/change_panic_handler_explicit
index 774b18bbfd..366bffca45 100644
--- a/test/incremental/change_panic_handler_explicit
+++ b/test/incremental/change_panic_handler_explicit
@@ -42,11 +42,12 @@ pub const panic = struct {
pub const noreturnReturned = no_panic.noreturnReturned;
};
fn myPanic(msg: []const u8, _: ?usize) noreturn {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
stdout_writer.interface.print("panic message: {s}\n", .{msg}) catch {};
std.process.exit(0);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="panic message: integer overflow\n"
#update=change the panic handler body
@@ -89,11 +90,12 @@ pub const panic = struct {
pub const noreturnReturned = no_panic.noreturnReturned;
};
fn myPanic(msg: []const u8, _: ?usize) noreturn {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
stdout_writer.interface.print("new panic message: {s}\n", .{msg}) catch {};
std.process.exit(0);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="new panic message: integer overflow\n"
#update=change the panic handler function value
@@ -136,9 +138,10 @@ pub const panic = struct {
pub const noreturnReturned = no_panic.noreturnReturned;
};
fn myPanicNew(msg: []const u8, _: ?usize) noreturn {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
stdout_writer.interface.print("third panic message: {s}\n", .{msg}) catch {};
std.process.exit(0);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="third panic message: integer overflow\n"
diff --git a/test/incremental/change_shift_op b/test/incremental/change_shift_op
index 41b5d19266..af849c0d5b 100644
--- a/test/incremental/change_shift_op
+++ b/test/incremental/change_shift_op
@@ -9,10 +9,11 @@ pub fn main() !void {
try foo(0x1300);
}
fn foo(x: u16) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("0x{x}\n", .{x << 4});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="0x3000\n"
#update=change to right shift
#file=main.zig
@@ -20,8 +21,9 @@ pub fn main() !void {
try foo(0x1300);
}
fn foo(x: u16) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("0x{x}\n", .{x >> 4});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="0x130\n"
diff --git a/test/incremental/change_struct_same_fields b/test/incremental/change_struct_same_fields
index 7af1161326..3ba715f906 100644
--- a/test/incremental/change_struct_same_fields
+++ b/test/incremental/change_struct_same_fields
@@ -11,13 +11,14 @@ pub fn main() !void {
try foo(&val);
}
fn foo(val: *const S) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print(
"{d} {d}\n",
.{ val.x, val.y },
);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="100 200\n"
#update=change struct layout
@@ -28,13 +29,14 @@ pub fn main() !void {
try foo(&val);
}
fn foo(val: *const S) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print(
"{d} {d}\n",
.{ val.x, val.y },
);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="100 200\n"
#update=change values
@@ -45,11 +47,12 @@ pub fn main() !void {
try foo(&val);
}
fn foo(val: *const S) !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print(
"{d} {d}\n",
.{ val.x, val.y },
);
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="1234 5678\n"
diff --git a/test/incremental/change_zon_file b/test/incremental/change_zon_file
index 62f73dd3bf..a966df5471 100644
--- a/test/incremental/change_zon_file
+++ b/test/incremental/change_zon_file
@@ -8,8 +8,9 @@
const std = @import("std");
const message: []const u8 = @import("message.zon");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(message);
+ try std.Io.File.stdout().writeStreamingAll(io, message);
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#file=message.zon
"Hello, World!\n"
#expect_stdout="Hello, World!\n"
@@ -29,8 +30,9 @@ pub fn main() !void {
const std = @import("std");
const message: []const u8 = @import("message.zon");
pub fn main() !void {
- try std.fs.File.stdout().writeAll("a hardcoded string\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "a hardcoded string\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=message.zon:1:1: error: unable to load 'message.zon': FileNotFound
#expect_error=main.zig:2:37: note: file imported here
@@ -44,6 +46,7 @@ pub fn main() !void {
const std = @import("std");
const message: []const u8 = @import("message.zon");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(message);
+ try std.Io.File.stdout().writeStreamingAll(io, message);
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="We're back, World!\n"
diff --git a/test/incremental/change_zon_file_no_result_type b/test/incremental/change_zon_file_no_result_type
index 498543e4f1..6b3aa73dc6 100644
--- a/test/incremental/change_zon_file_no_result_type
+++ b/test/incremental/change_zon_file_no_result_type
@@ -6,8 +6,9 @@
#update=initial version
#file=main.zig
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@import("foo.zon").message);
+ try std.Io.File.stdout().writeStreamingAll(io, @import("foo.zon").message);
}
#file=foo.zon
.{
diff --git a/test/incremental/compile_log b/test/incremental/compile_log
index 697bb26569..19ff7237f2 100644
--- a/test/incremental/compile_log
+++ b/test/incremental/compile_log
@@ -8,17 +8,19 @@
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World!\n"
#update=add compile log
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
@compileLog("this is a log");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:4:5: error: found compile log statement
#expect_compile_log=@as(*const [13:0]u8, "this is a log")
@@ -26,6 +28,7 @@ pub fn main() !void {
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World!\n"
diff --git a/test/incremental/fix_astgen_failure b/test/incremental/fix_astgen_failure
index 8b1b3adbf7..dca371f521 100644
--- a/test/incremental/fix_astgen_failure
+++ b/test/incremental/fix_astgen_failure
@@ -10,28 +10,31 @@ pub fn main() !void {
}
#file=foo.zig
pub fn hello() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
#expect_error=foo.zig:2:9: error: use of undeclared identifier 'std'
#update=fix the error
#file=foo.zig
const std = @import("std");
pub fn hello() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World!\n"
#update=add new error
#file=foo.zig
const std = @import("std");
pub fn hello() !void {
- try std.fs.File.stdout().writeAll(hello_str);
+ try std.Io.File.stdout().writeStreamingAll(io, hello_str);
}
-#expect_error=foo.zig:3:39: error: use of undeclared identifier 'hello_str'
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
+#expect_error=foo.zig:3:52: error: use of undeclared identifier 'hello_str'
#update=fix the new error
#file=foo.zig
const std = @import("std");
const hello_str = "Hello, World! Again!\n";
pub fn hello() !void {
- try std.fs.File.stdout().writeAll(hello_str);
+ try std.Io.File.stdout().writeStreamingAll(io, hello_str);
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World! Again!\n"
diff --git a/test/incremental/function_becomes_inline b/test/incremental/function_becomes_inline
index 240d7a54af..4021575842 100644
--- a/test/incremental/function_becomes_inline
+++ b/test/incremental/function_becomes_inline
@@ -8,9 +8,10 @@ pub fn main() !void {
try foo();
}
fn foo() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World!\n"
#update=make function inline
@@ -19,9 +20,10 @@ pub fn main() !void {
try foo();
}
inline fn foo() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World!\n"
#update=change string
@@ -30,7 +32,8 @@ pub fn main() !void {
try foo();
}
inline fn foo() !void {
- try std.fs.File.stdout().writeAll("Hello, `inline` World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, `inline` World!\n");
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, `inline` World!\n"
diff --git a/test/incremental/hello b/test/incremental/hello
index dc6f02177f..48659e1879 100644
--- a/test/incremental/hello
+++ b/test/incremental/hello
@@ -6,14 +6,16 @@
#update=initial version
#file=main.zig
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
pub fn main() !void {
- try std.fs.File.stdout().writeAll("good morning\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "good morning\n");
}
#expect_stdout="good morning\n"
#update=change the string
#file=main.zig
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
pub fn main() !void {
- try std.fs.File.stdout().writeAll("おはようございます\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "おはようございます\n");
}
#expect_stdout="おはようございます\n"
diff --git a/test/incremental/make_decl_pub b/test/incremental/make_decl_pub
index b25b117160..b193deb68c 100644
--- a/test/incremental/make_decl_pub
+++ b/test/incremental/make_decl_pub
@@ -12,8 +12,9 @@ pub fn main() !void {
#file=foo.zig
const std = @import("std");
fn hello() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:3:12: error: 'hello' is not marked 'pub'
#expect_error=foo.zig:2:1: note: declared here
@@ -21,6 +22,7 @@ fn hello() !void {
#file=foo.zig
const std = @import("std");
pub fn hello() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World!\n"
diff --git a/test/incremental/modify_inline_fn b/test/incremental/modify_inline_fn
index d485d8ffd5..19e201f1d9 100644
--- a/test/incremental/modify_inline_fn
+++ b/test/incremental/modify_inline_fn
@@ -8,20 +8,22 @@
const std = @import("std");
pub fn main() !void {
const str = getStr();
- try std.fs.File.stdout().writeAll(str);
+ try std.Io.File.stdout().writeStreamingAll(io, str);
}
inline fn getStr() []const u8 {
return "foo\n";
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="foo\n"
#update=change the string
#file=main.zig
const std = @import("std");
pub fn main() !void {
const str = getStr();
- try std.fs.File.stdout().writeAll(str);
+ try std.Io.File.stdout().writeStreamingAll(io, str);
}
inline fn getStr() []const u8 {
return "bar\n";
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="bar\n"
diff --git a/test/incremental/move_src b/test/incremental/move_src
index 4f43e8ea6a..b79a25df8d 100644
--- a/test/incremental/move_src
+++ b/test/incremental/move_src
@@ -7,7 +7,7 @@
#file=main.zig
const std = @import("std");
pub fn main() !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{d} {d}\n", .{ foo(), bar() });
}
fn foo() u32 {
@@ -16,13 +16,14 @@ fn foo() u32 {
fn bar() u32 {
return 123;
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="7 123\n"
#update=add newline
#file=main.zig
const std = @import("std");
pub fn main() !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{d} {d}\n", .{ foo(), bar() });
}
@@ -32,4 +33,5 @@ fn foo() u32 {
fn bar() u32 {
return 123;
}
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="8 123\n"
diff --git a/test/incremental/no_change_preserves_tag_names b/test/incremental/no_change_preserves_tag_names
index 623496119d..dc89face50 100644
--- a/test/incremental/no_change_preserves_tag_names
+++ b/test/incremental/no_change_preserves_tag_names
@@ -7,15 +7,17 @@
#file=main.zig
const std = @import("std");
var some_enum: enum { first, second } = .first;
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@tagName(some_enum));
+ try std.Io.File.stdout().writeStreamingAll(io, @tagName(some_enum));
}
#expect_stdout="first"
#update=no change
#file=main.zig
const std = @import("std");
var some_enum: enum { first, second } = .first;
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
pub fn main() !void {
- try std.fs.File.stdout().writeAll(@tagName(some_enum));
+ try std.Io.File.stdout().writeStreamingAll(io, @tagName(some_enum));
}
#expect_stdout="first"
diff --git a/test/incremental/recursive_function_becomes_non_recursive b/test/incremental/recursive_function_becomes_non_recursive
index a5a03749b8..5cee1bfbcf 100644
--- a/test/incremental/recursive_function_becomes_non_recursive
+++ b/test/incremental/recursive_function_becomes_non_recursive
@@ -9,11 +9,12 @@ pub fn main() !void {
try foo(false);
}
fn foo(recurse: bool) !void {
- const stdout = std.fs.File.stdout();
+ const stdout = std.Io.File.stdout();
if (recurse) return foo(true);
- try stdout.writeAll("non-recursive path\n");
+ try stdout.writeStreamingAll(io, "non-recursive path\n");
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="non-recursive path\n"
#update=eliminate recursion and change argument
@@ -22,9 +23,10 @@ pub fn main() !void {
try foo(true);
}
fn foo(recurse: bool) !void {
- const stdout = std.fs.File.stdout();
- if (recurse) return stdout.writeAll("x==1\n");
- try stdout.writeAll("non-recursive path\n");
+ const stdout = std.Io.File.stdout();
+ if (recurse) return stdout.writeStreamingAll(io, "x==1\n");
+ try stdout.writeStreamingAll(io, "non-recursive path\n");
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="x==1\n"
diff --git a/test/incremental/remove_enum_field b/test/incremental/remove_enum_field
index 02daf2a0fb..c964285707 100644
--- a/test/incremental/remove_enum_field
+++ b/test/incremental/remove_enum_field
@@ -10,10 +10,11 @@ const MyEnum = enum(u8) {
bar = 2,
};
pub fn main() !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{}\n", .{@intFromEnum(MyEnum.foo)});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="1\n"
#update=remove enum field
#file=main.zig
@@ -22,9 +23,10 @@ const MyEnum = enum(u8) {
bar = 2,
};
pub fn main() !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
try stdout_writer.interface.print("{}\n", .{@intFromEnum(MyEnum.foo)});
}
const std = @import("std");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:7:69: error: enum 'main.MyEnum' has no member named 'foo'
#expect_error=main.zig:1:16: note: enum declared here
diff --git a/test/incremental/unreferenced_error b/test/incremental/unreferenced_error
index 505fb3d5f4..c9a3277487 100644
--- a/test/incremental/unreferenced_error
+++ b/test/incremental/unreferenced_error
@@ -7,36 +7,40 @@
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(a);
+ try std.Io.File.stdout().writeStreamingAll(io, a);
}
const a = "Hello, World!\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hello, World!\n"
#update=introduce compile error
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(a);
+ try std.Io.File.stdout().writeStreamingAll(io, a);
}
const a = @compileError("bad a");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_error=main.zig:5:11: error: bad a
#update=remove error reference
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(b);
+ try std.Io.File.stdout().writeStreamingAll(io, b);
}
const a = @compileError("bad a");
const b = "Hi there!\n";
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Hi there!\n"
#update=introduce and remove reference to error
#file=main.zig
const std = @import("std");
pub fn main() !void {
- try std.fs.File.stdout().writeAll(a);
+ try std.Io.File.stdout().writeStreamingAll(io, a);
}
const a = "Back to a\n";
const b = @compileError("bad b");
+const io = std.Io.Threaded.global_single_threaded.ioBasic();
#expect_stdout="Back to a\n"
diff --git a/test/link/bss/main.zig b/test/link/bss/main.zig
index 2785a8360f..0d69f97450 100644
--- a/test/link/bss/main.zig
+++ b/test/link/bss/main.zig
@@ -4,7 +4,7 @@ const std = @import("std");
var buffer: [0x1000000]u64 = [1]u64{0} ** 0x1000000;
pub fn main() anyerror!void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(std.Options.debug_io, &.{});
buffer[0x10] = 1;
diff --git a/test/link/elf.zig b/test/link/elf.zig
index 824fc76e92..d5b0eb7519 100644
--- a/test/link/elf.zig
+++ b/test/link/elf.zig
@@ -1323,7 +1323,7 @@ fn testGcSectionsZig(b: *Build, opts: Options) *Step {
\\extern var live_var2: i32;
\\extern fn live_fn2() void;
\\pub fn main() void {
- \\ var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ \\ var stdout_writer = std.Io.File.stdout().writerStreaming(std.Options.debug_io, &.{});
\\ stdout_writer.interface.print("{d} {d}\n", .{ live_var1, live_var2 }) catch @panic("fail");
\\ live_fn2();
\\}
@@ -1365,7 +1365,7 @@ fn testGcSectionsZig(b: *Build, opts: Options) *Step {
\\extern var live_var2: i32;
\\extern fn live_fn2() void;
\\pub fn main() void {
- \\ var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ \\ var stdout_writer = std.Io.File.stdout().writerStreaming(std.Options.debug_io, &.{});
\\ stdout_writer.interface.print("{d} {d}\n", .{ live_var1, live_var2 }) catch @panic("fail");
\\ live_fn2();
\\}
diff --git a/test/link/macho.zig b/test/link/macho.zig
index 4fc0cad0ee..ccfecefa44 100644
--- a/test/link/macho.zig
+++ b/test/link/macho.zig
@@ -716,7 +716,7 @@ fn testHelloZig(b: *Build, opts: Options) *Step {
const exe = addExecutable(b, opts, .{ .name = "main", .zig_source_bytes =
\\const std = @import("std");
\\pub fn main() void {
- \\ std.fs.File.stdout().writeAll("Hello world!\n") catch @panic("fail");
+ \\ std.Io.File.stdout().writeStreamingAll(std.Options.debug_io, "Hello world!\n") catch @panic("fail");
\\}
});
@@ -868,9 +868,10 @@ fn testLayout(b: *Build, opts: Options) *Step {
}
fn testLinkDirectlyCppTbd(b: *Build, opts: Options) *Step {
+ const io = b.graph.io;
const test_step = addTestStep(b, "link-directly-cpp-tbd", opts);
- const sdk = std.zig.system.darwin.getSdk(b.allocator, &opts.target.result) orelse
+ const sdk = std.zig.system.darwin.getSdk(b.allocator, io, &opts.target.result) orelse
@panic("macOS SDK is required to run the test");
const exe = addExecutable(b, opts, .{
@@ -2371,7 +2372,7 @@ fn testTlsZig(b: *Build, opts: Options) *Step {
\\threadlocal var x: i32 = 0;
\\threadlocal var y: i32 = -1;
\\pub fn main() void {
- \\ var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ \\ var stdout_writer = std.Io.File.stdout().writerStreaming(std.Options.debug_io, &.{});
\\ stdout_writer.interface.print("{d} {d}\n", .{x, y}) catch unreachable;
\\ x -= 1;
\\ y += 1;
diff --git a/test/link/wasm/extern/main.zig b/test/link/wasm/extern/main.zig
index 9635f64a40..51c5f84181 100644
--- a/test/link/wasm/extern/main.zig
+++ b/test/link/wasm/extern/main.zig
@@ -3,6 +3,6 @@ const std = @import("std");
extern const foo: u32;
pub fn main() void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(std.Options.debug_io, &.{});
stdout_writer.interface.print("Result: {d}", .{foo}) catch {};
}
diff --git a/test/src/Cases.zig b/test/src/Cases.zig
index b1fece44d9..230b100b68 100644
--- a/test/src/Cases.zig
+++ b/test/src/Cases.zig
@@ -1,6 +1,8 @@
const Cases = @This();
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const getExternalExecutor = std.zig.system.getExternalExecutor;
@@ -8,6 +10,7 @@ const ArrayList = std.ArrayList;
gpa: Allocator,
arena: Allocator,
+io: Io,
cases: std.array_list.Managed(Case),
pub const IncrementalCase = struct {
@@ -313,7 +316,7 @@ pub fn addCompile(
/// Each file should include a test manifest as a contiguous block of comments at
/// the end of the file. The first line should be the test type, followed by a set of
/// key-value config values, followed by a blank line, then the expected output.
-pub fn addFromDir(ctx: *Cases, dir: std.fs.Dir, b: *std.Build) void {
+pub fn addFromDir(ctx: *Cases, dir: Io.Dir, b: *std.Build) void {
var current_file: []const u8 = "none";
ctx.addFromDirInner(dir, &current_file, b) catch |err| {
std.debug.panicExtra(
@@ -326,16 +329,17 @@ pub fn addFromDir(ctx: *Cases, dir: std.fs.Dir, b: *std.Build) void {
fn addFromDirInner(
ctx: *Cases,
- iterable_dir: std.fs.Dir,
+ iterable_dir: Io.Dir,
/// This is kept up to date with the currently being processed file so
/// that if any errors occur the caller knows it happened during this file.
current_file: *[]const u8,
b: *std.Build,
) !void {
+ const io = ctx.io;
var it = try iterable_dir.walk(ctx.arena);
var filenames: ArrayList([]const u8) = .empty;
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
if (entry.kind != .file) continue;
// Ignore stuff such as .swp files
@@ -347,7 +351,7 @@ fn addFromDirInner(
current_file.* = filename;
const max_file_size = 10 * 1024 * 1024;
- const src = try iterable_dir.readFileAllocOptions(filename, ctx.arena, .limited(max_file_size), .@"1", 0);
+ const src = try iterable_dir.readFileAllocOptions(io, filename, ctx.arena, .limited(max_file_size), .@"1", 0);
// Parse the manifest
var manifest = try TestManifest.parse(ctx.arena, src);
@@ -376,6 +380,12 @@ fn addFromDirInner(
// Other backends don't support new liveness format
continue;
}
+
+ if (backend == .selfhosted and target.cpu.arch == .aarch64) {
+ // https://codeberg.org/ziglang/zig/pulls/30232#issuecomment-9182045
+ continue;
+ }
+
if (backend == .selfhosted and target.os.tag == .macos and
target.cpu.arch == .x86_64 and builtin.cpu.arch == .aarch64)
{
@@ -427,9 +437,10 @@ fn addFromDirInner(
}
}
-pub fn init(gpa: Allocator, arena: Allocator) Cases {
+pub fn init(gpa: Allocator, arena: Allocator, io: Io) Cases {
return .{
.gpa = gpa,
+ .io = io,
.cases = .init(gpa),
.arena = arena,
};
@@ -457,6 +468,7 @@ pub fn lowerToBuildSteps(
parent_step: *std.Build.Step,
options: CaseTestOptions,
) void {
+ const io = self.io;
const host = b.resolveTargetQuery(.{});
const cases_dir_path = b.build_root.join(b.allocator, &.{ "test", "cases" }) catch @panic("OOM");
@@ -591,7 +603,7 @@ pub fn lowerToBuildSteps(
},
.Execution => |expected_stdout| no_exec: {
const run = if (case.target.result.ofmt == .c) run_step: {
- if (getExternalExecutor(&host.result, &case.target.result, .{ .link_libc = true }) != .native) {
+ if (getExternalExecutor(io, &host.result, &case.target.result, .{ .link_libc = true }) != .native) {
// We wouldn't be able to run the compiled C code.
break :no_exec;
}
diff --git a/test/src/convert-stack-trace.zig b/test/src/convert-stack-trace.zig
index 91be53a8e5..d23623c396 100644
--- a/test/src/convert-stack-trace.zig
+++ b/test/src/convert-stack-trace.zig
@@ -34,20 +34,20 @@ pub fn main() !void {
const gpa = arena;
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
var read_buf: [1024]u8 = undefined;
var write_buf: [1024]u8 = undefined;
- const in_file = try std.fs.cwd().openFile(args[1], .{});
- defer in_file.close();
+ const in_file = try std.Io.Dir.cwd().openFile(io, args[1], .{});
+ defer in_file.close(io);
- const out_file: std.fs.File = .stdout();
+ const out_file: std.Io.File = .stdout();
var in_fr = in_file.reader(io, &read_buf);
- var out_fw = out_file.writer(&write_buf);
+ var out_fw = out_file.writer(io, &write_buf);
const w = &out_fw.interface;
diff --git a/test/standalone/child_process/child.zig b/test/standalone/child_process/child.zig
index 2e74f30882..80e2edaa7f 100644
--- a/test/standalone/child_process/child.zig
+++ b/test/standalone/child_process/child.zig
@@ -8,7 +8,7 @@ pub fn main() !void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const arena = arena_state.allocator();
- var threaded: std.Io.Threaded = .init(arena);
+ var threaded: std.Io.Threaded = .init(arena, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -26,28 +26,28 @@ fn run(allocator: std.mem.Allocator, io: Io) !void {
const hello_arg = "hello arg";
const a1 = args.next() orelse unreachable;
if (!std.mem.eql(u8, a1, hello_arg)) {
- testError("first arg: '{s}'; want '{s}'", .{ a1, hello_arg });
+ testError(io, "first arg: '{s}'; want '{s}'", .{ a1, hello_arg });
}
if (args.next()) |a2| {
- testError("expected only one arg; got more: {s}", .{a2});
+ testError(io, "expected only one arg; got more: {s}", .{a2});
}
// test stdout pipe; parent verifies
- try std.fs.File.stdout().writeAll("hello from stdout");
+ try std.Io.File.stdout().writeStreamingAll(io, "hello from stdout");
// test stdin pipe from parent
const hello_stdin = "hello from stdin";
var buf: [hello_stdin.len]u8 = undefined;
- const stdin: std.fs.File = .stdin();
+ const stdin: std.Io.File = .stdin();
var reader = stdin.reader(io, &.{});
const n = try reader.interface.readSliceShort(&buf);
if (!std.mem.eql(u8, buf[0..n], hello_stdin)) {
- testError("stdin: '{s}'; want '{s}'", .{ buf[0..n], hello_stdin });
+ testError(io, "stdin: '{s}'; want '{s}'", .{ buf[0..n], hello_stdin });
}
}
-fn testError(comptime fmt: []const u8, args: anytype) void {
- var stderr_writer = std.fs.File.stderr().writer(&.{});
+fn testError(io: Io, comptime fmt: []const u8, args: anytype) void {
+ var stderr_writer = std.Io.File.stderr().writer(io, &.{});
const stderr = &stderr_writer.interface;
stderr.print("CHILD TEST ERROR: ", .{}) catch {};
stderr.print(fmt, args) catch {};
diff --git a/test/standalone/child_process/main.zig b/test/standalone/child_process/main.zig
index 5970cdd952..98d38bdee3 100644
--- a/test/standalone/child_process/main.zig
+++ b/test/standalone/child_process/main.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
pub fn main() !void {
// make sure safety checks are enabled even in release modes
@@ -20,7 +21,7 @@ pub fn main() !void {
};
defer if (needs_free) gpa.free(child_path);
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -28,10 +29,10 @@ pub fn main() !void {
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Inherit;
- try child.spawn();
+ try child.spawn(io);
const child_stdin = child.stdin.?;
- try child_stdin.writeAll("hello from stdin"); // verified in child
- child_stdin.close();
+ try child_stdin.writeStreamingAll(io, "hello from stdin"); // verified in child
+ child_stdin.close(io);
child.stdin = null;
const hello_stdout = "hello from stdout";
@@ -39,30 +40,30 @@ pub fn main() !void {
var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
const n = try stdout_reader.interface.readSliceShort(&buf);
if (!std.mem.eql(u8, buf[0..n], hello_stdout)) {
- testError("child stdout: '{s}'; want '{s}'", .{ buf[0..n], hello_stdout });
+ testError(io, "child stdout: '{s}'; want '{s}'", .{ buf[0..n], hello_stdout });
}
- switch (try child.wait()) {
+ switch (try child.wait(io)) {
.Exited => |code| {
const child_ok_code = 42; // set by child if no test errors
if (code != child_ok_code) {
- testError("child exit code: {d}; want {d}", .{ code, child_ok_code });
+ testError(io, "child exit code: {d}; want {d}", .{ code, child_ok_code });
}
},
- else => |term| testError("abnormal child exit: {}", .{term}),
+ else => |term| testError(io, "abnormal child exit: {}", .{term}),
}
if (parent_test_error) return error.ParentTestError;
// Check that FileNotFound is consistent across platforms when trying to spawn an executable that doesn't exist
const missing_child_path = try std.mem.concat(gpa, u8, &.{ child_path, "_intentionally_missing" });
defer gpa.free(missing_child_path);
- try std.testing.expectError(error.FileNotFound, std.process.Child.run(.{ .allocator = gpa, .argv = &.{missing_child_path} }));
+ try std.testing.expectError(error.FileNotFound, std.process.Child.run(gpa, io, .{ .argv = &.{missing_child_path} }));
}
var parent_test_error = false;
-fn testError(comptime fmt: []const u8, args: anytype) void {
- var stderr_writer = std.fs.File.stderr().writer(&.{});
+fn testError(io: Io, comptime fmt: []const u8, args: anytype) void {
+ var stderr_writer = Io.File.stderr().writer(io, &.{});
const stderr = &stderr_writer.interface;
stderr.print("PARENT TEST ERROR: ", .{}) catch {};
stderr.print(fmt, args) catch {};
diff --git a/test/standalone/cmakedefine/check.zig b/test/standalone/cmakedefine/check.zig
index 782e7f4dc3..c2f89ad112 100644
--- a/test/standalone/cmakedefine/check.zig
+++ b/test/standalone/cmakedefine/check.zig
@@ -9,8 +9,10 @@ pub fn main() !void {
const actual_path = args[1];
const expected_path = args[2];
- const actual = try std.fs.cwd().readFileAlloc(actual_path, arena, .limited(1024 * 1024));
- const expected = try std.fs.cwd().readFileAlloc(expected_path, arena, .limited(1024 * 1024));
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
+
+ const actual = try std.Io.Dir.cwd().readFileAlloc(io, actual_path, arena, .limited(1024 * 1024));
+ const expected = try std.Io.Dir.cwd().readFileAlloc(io, expected_path, arena, .limited(1024 * 1024));
// The actual output starts with a comment which we should strip out before comparing.
const comment_str = "/* This file was generated by ConfigHeader using the Zig Build System. */\n";
diff --git a/test/standalone/coff_dwarf/main.zig b/test/standalone/coff_dwarf/main.zig
index e7590f3f07..7e314d5b28 100644
--- a/test/standalone/coff_dwarf/main.zig
+++ b/test/standalone/coff_dwarf/main.zig
@@ -11,7 +11,7 @@ pub fn main() void {
var di: std.debug.SelfInfo = .init;
defer di.deinit(gpa);
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
diff --git a/test/standalone/dirname/build.zig b/test/standalone/dirname/build.zig
index 0da85e2923..b850680ba9 100644
--- a/test/standalone/dirname/build.zig
+++ b/test/standalone/dirname/build.zig
@@ -59,13 +59,15 @@ pub fn build(b: *std.Build) void {
// Absolute path:
const abs_path = setup_abspath: {
+ // TODO this is a bad pattern, don't do this
+ const io = b.graph.io;
const temp_dir = b.makeTempPath();
- var dir = std.fs.cwd().openDir(temp_dir, .{}) catch @panic("failed to open temp dir");
- defer dir.close();
+ var dir = std.Io.Dir.cwd().openDir(io, temp_dir, .{}) catch @panic("failed to open temp dir");
+ defer dir.close(io);
- var file = dir.createFile("foo.txt", .{}) catch @panic("failed to create file");
- file.close();
+ var file = dir.createFile(io, "foo.txt", .{}) catch @panic("failed to create file");
+ file.close(io);
break :setup_abspath std.Build.LazyPath{ .cwd_relative = temp_dir };
};
diff --git a/test/standalone/dirname/exists_in.zig b/test/standalone/dirname/exists_in.zig
index 7aec1f423d..ba2de2777f 100644
--- a/test/standalone/dirname/exists_in.zig
+++ b/test/standalone/dirname/exists_in.zig
@@ -34,8 +34,10 @@ fn run(allocator: std.mem.Allocator) !void {
return error.BadUsage;
};
- var dir = try std.fs.cwd().openDir(dir_path, .{});
- defer dir.close();
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
- _ = try dir.statFile(relpath);
+ var dir = try std.Io.Dir.cwd().openDir(io, dir_path, .{});
+ defer dir.close(io);
+
+ _ = try dir.statFile(io, relpath, .{});
}
diff --git a/test/standalone/dirname/touch.zig b/test/standalone/dirname/touch.zig
index 43fcabf91e..134d53d2fc 100644
--- a/test/standalone/dirname/touch.zig
+++ b/test/standalone/dirname/touch.zig
@@ -26,14 +26,16 @@ fn run(allocator: std.mem.Allocator) !void {
return error.BadUsage;
};
- const dir_path = std.fs.path.dirname(path) orelse unreachable;
- const basename = std.fs.path.basename(path);
+ const dir_path = std.Io.Dir.path.dirname(path) orelse unreachable;
+ const basename = std.Io.Dir.path.basename(path);
- var dir = try std.fs.cwd().openDir(dir_path, .{});
- defer dir.close();
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
- _ = dir.statFile(basename) catch {
- var file = try dir.createFile(basename, .{});
- file.close();
+ var dir = try std.Io.Dir.cwd().openDir(io, dir_path, .{});
+ defer dir.close(io);
+
+ _ = dir.statFile(io, basename, .{}) catch {
+ var file = try dir.createFile(io, basename, .{});
+ file.close(io);
};
}
diff --git a/test/standalone/entry_point/check_differ.zig b/test/standalone/entry_point/check_differ.zig
index 63d1ec0294..29b333632f 100644
--- a/test/standalone/entry_point/check_differ.zig
+++ b/test/standalone/entry_point/check_differ.zig
@@ -6,8 +6,10 @@ pub fn main() !void {
const args = try std.process.argsAlloc(arena);
if (args.len != 3) return error.BadUsage; // usage: 'check_differ <path a> <path b>'
- const contents_1 = try std.fs.cwd().readFileAlloc(args[1], arena, .limited(1024 * 1024 * 64)); // 64 MiB ought to be plenty
- const contents_2 = try std.fs.cwd().readFileAlloc(args[2], arena, .limited(1024 * 1024 * 64)); // 64 MiB ought to be plenty
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
+
+ const contents_1 = try std.Io.Dir.cwd().readFileAlloc(io, args[1], arena, .limited(1024 * 1024 * 64)); // 64 MiB ought to be plenty
+ const contents_2 = try std.Io.Dir.cwd().readFileAlloc(io, args[2], arena, .limited(1024 * 1024 * 64)); // 64 MiB ought to be plenty
if (std.mem.eql(u8, contents_1, contents_2)) {
return error.FilesMatch;
diff --git a/test/standalone/env_vars/main.zig b/test/standalone/env_vars/main.zig
index 12b911404a..b85105642e 100644
--- a/test/standalone/env_vars/main.zig
+++ b/test/standalone/env_vars/main.zig
@@ -3,6 +3,8 @@ const builtin = @import("builtin");
// Note: the environment variables under test are set by the build.zig
pub fn main() !void {
+ @setEvalBranchQuota(10000);
+
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
defer _ = gpa.deinit();
const allocator = gpa.allocator();
diff --git a/test/standalone/glibc_compat/glibc_runtime_check.zig b/test/standalone/glibc_compat/glibc_runtime_check.zig
index 82f4a54ee1..78b1f7efb3 100644
--- a/test/standalone/glibc_compat/glibc_runtime_check.zig
+++ b/test/standalone/glibc_compat/glibc_runtime_check.zig
@@ -28,10 +28,10 @@ extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: [*]const u8) c_int;
// PR #17034 - fstat moved between libc_nonshared and libc
fn checkStat() !void {
- const cwdFd = std.fs.cwd().fd;
+ const cwd_fd = std.Io.Dir.cwd().handle;
var buf: [256]u8 = @splat(0);
- var result = fstatat(cwdFd, "a_file_that_definitely_does_not_exist", &buf, 0);
+ var result = fstatat(cwd_fd, "a_file_that_definitely_does_not_exist", &buf, 0);
assert(result == -1);
assert(std.posix.errno(result) == .NOENT);
diff --git a/test/standalone/install_headers/check_exists.zig b/test/standalone/install_headers/check_exists.zig
index 62706749aa..50ad4d0818 100644
--- a/test/standalone/install_headers/check_exists.zig
+++ b/test/standalone/install_headers/check_exists.zig
@@ -11,8 +11,10 @@ pub fn main() !void {
var arg_it = try std.process.argsWithAllocator(arena);
_ = arg_it.next();
- const cwd = std.fs.cwd();
- const cwd_realpath = try cwd.realpathAlloc(arena, ".");
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
+
+ const cwd = std.Io.Dir.cwd();
+ const cwd_realpath = try cwd.realPathFileAlloc(io, ".", arena);
while (arg_it.next()) |file_path| {
if (file_path.len > 0 and file_path[0] == '!') {
@@ -20,7 +22,7 @@ pub fn main() !void {
"exclusive file check '{s}{c}{s}' failed",
.{ cwd_realpath, std.fs.path.sep, file_path[1..] },
);
- if (std.fs.cwd().statFile(file_path[1..])) |_| {
+ if (cwd.statFile(io, file_path[1..], .{})) |_| {
return error.FileFound;
} else |err| switch (err) {
error.FileNotFound => {},
@@ -31,7 +33,7 @@ pub fn main() !void {
"inclusive file check '{s}{c}{s}' failed",
.{ cwd_realpath, std.fs.path.sep, file_path },
);
- _ = try std.fs.cwd().statFile(file_path);
+ _ = try cwd.statFile(io, file_path, .{});
}
}
}
diff --git a/test/standalone/ios/build.zig b/test/standalone/ios/build.zig
index b07b5b17ea..b87d55993b 100644
--- a/test/standalone/ios/build.zig
+++ b/test/standalone/ios/build.zig
@@ -23,7 +23,9 @@ pub fn build(b: *std.Build) void {
}),
});
- if (std.zig.system.darwin.getSdk(b.allocator, &target.result)) |sdk| {
+ const io = b.graph.io;
+
+ if (std.zig.system.darwin.getSdk(b.allocator, io, &target.result)) |sdk| {
b.sysroot = sdk;
exe.root_module.addSystemIncludePath(.{ .cwd_relative = b.pathJoin(&.{ sdk, "/usr/include" }) });
exe.root_module.addSystemFrameworkPath(.{ .cwd_relative = b.pathJoin(&.{ sdk, "/System/Library/Frameworks" }) });
diff --git a/test/standalone/libfuzzer/main.zig b/test/standalone/libfuzzer/main.zig
index b275b6d593..0bc093d870 100644
--- a/test/standalone/libfuzzer/main.zig
+++ b/test/standalone/libfuzzer/main.zig
@@ -15,13 +15,13 @@ pub fn main() !void {
defer args.deinit();
_ = args.skip(); // executable name
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
const cache_dir_path = args.next() orelse @panic("expected cache directory path argument");
- var cache_dir = try std.fs.cwd().openDir(cache_dir_path, .{});
- defer cache_dir.close();
+ var cache_dir = try std.Io.Dir.cwd().openDir(io, cache_dir_path, .{});
+ defer cache_dir.close(io);
abi.fuzzer_init(.fromSlice(cache_dir_path));
abi.fuzzer_init_test(testOne, .fromSlice("test"));
@@ -30,8 +30,8 @@ pub fn main() !void {
const pc_digest = abi.fuzzer_coverage().id;
const coverage_file_path = "v/" ++ std.fmt.hex(pc_digest);
- const coverage_file = try cache_dir.openFile(coverage_file_path, .{});
- defer coverage_file.close();
+ const coverage_file = try cache_dir.openFile(io, coverage_file_path, .{});
+ defer coverage_file.close(io);
var read_buf: [@sizeOf(abi.SeenPcsHeader)]u8 = undefined;
var r = coverage_file.reader(io, &read_buf);
@@ -42,6 +42,6 @@ pub fn main() !void {
const expected_len = @sizeOf(abi.SeenPcsHeader) +
try std.math.divCeil(usize, pcs_header.pcs_len, @bitSizeOf(usize)) * @sizeOf(usize) +
pcs_header.pcs_len * @sizeOf(usize);
- if (try coverage_file.getEndPos() != expected_len)
+ if (try coverage_file.length(io) != expected_len)
return error.WrongEnd;
}
diff --git a/test/standalone/posix/cwd.zig b/test/standalone/posix/cwd.zig
index 43dcc63bfe..3bd1ac066a 100644
--- a/test/standalone/posix/cwd.zig
+++ b/test/standalone/posix/cwd.zig
@@ -1,21 +1,30 @@
-const std = @import("std");
const builtin = @import("builtin");
+const std = @import("std");
+const Io = std.Io;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+
const path_max = std.fs.max_path_bytes;
pub fn main() !void {
- if (builtin.target.os.tag == .wasi) {
- // WASI doesn't support changing the working directory at all.
- return;
+ switch (builtin.target.os.tag) {
+ .wasi => return, // WASI doesn't support changing the working directory at all.
+ .windows => return, // POSIX is not implemented by Windows
+ else => {},
}
- var Allocator = std.heap.DebugAllocator(.{}){};
- const a = Allocator.allocator();
- defer std.debug.assert(Allocator.deinit() == .ok);
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .{};
+ defer assert(debug_allocator.deinit() == .ok);
+ const gpa = debug_allocator.allocator();
+
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
try test_chdir_self();
try test_chdir_absolute();
- try test_chdir_relative(a);
+ try test_chdir_relative(gpa, io);
}
// get current working directory and expect it to match given path
@@ -46,20 +55,20 @@ fn test_chdir_absolute() !void {
try expect_cwd(parent);
}
-fn test_chdir_relative(a: std.mem.Allocator) !void {
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
+fn test_chdir_relative(gpa: Allocator, io: Io) !void {
+ var tmp = tmpDir(io, .{});
+ defer tmp.cleanup(io);
// Use the tmpDir parent_dir as the "base" for the test. Then cd into the child
- try tmp.parent_dir.setAsCwd();
+ try std.process.setCurrentDir(io, tmp.parent_dir);
// Capture base working directory path, to build expected full path
var base_cwd_buf: [path_max]u8 = undefined;
const base_cwd = try std.posix.getcwd(base_cwd_buf[0..]);
const relative_dir_name = &tmp.sub_path;
- const expected_path = try std.fs.path.resolve(a, &.{ base_cwd, relative_dir_name });
- defer a.free(expected_path);
+ const expected_path = try std.fs.path.resolve(gpa, &.{ base_cwd, relative_dir_name });
+ defer gpa.free(expected_path);
// change current working directory to new test directory
try std.posix.chdir(relative_dir_name);
@@ -68,8 +77,46 @@ fn test_chdir_relative(a: std.mem.Allocator) !void {
const new_cwd = try std.posix.getcwd(new_cwd_buf[0..]);
// On Windows, fs.path.resolve returns an uppercase drive letter, but the drive letter returned by getcwd may be lowercase
- const resolved_cwd = try std.fs.path.resolve(a, &.{new_cwd});
- defer a.free(resolved_cwd);
+ const resolved_cwd = try std.fs.path.resolve(gpa, &.{new_cwd});
+ defer gpa.free(resolved_cwd);
try std.testing.expectEqualStrings(expected_path, resolved_cwd);
}
+
+pub fn tmpDir(io: Io, opts: Io.Dir.OpenOptions) TmpDir {
+ var random_bytes: [TmpDir.random_bytes_count]u8 = undefined;
+ std.crypto.random.bytes(&random_bytes);
+ var sub_path: [TmpDir.sub_path_len]u8 = undefined;
+ _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
+
+ const cwd = Io.Dir.cwd();
+ var cache_dir = cwd.createDirPathOpen(io, ".zig-cache", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir");
+ defer cache_dir.close(io);
+ const parent_dir = cache_dir.createDirPathOpen(io, "tmp", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir");
+ const dir = parent_dir.createDirPathOpen(io, &sub_path, .{ .open_options = opts }) catch
+ @panic("unable to make tmp dir for testing: unable to make and open the tmp dir");
+
+ return .{
+ .dir = dir,
+ .parent_dir = parent_dir,
+ .sub_path = sub_path,
+ };
+}
+
+pub const TmpDir = struct {
+ dir: Io.Dir,
+ parent_dir: Io.Dir,
+ sub_path: [sub_path_len]u8,
+
+ const random_bytes_count = 12;
+ const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
+
+ pub fn cleanup(self: *TmpDir, io: Io) void {
+ self.dir.close(io);
+ self.parent_dir.deleteTree(io, &self.sub_path) catch {};
+ self.parent_dir.close(io);
+ self.* = undefined;
+ }
+};
diff --git a/test/standalone/posix/relpaths.zig b/test/standalone/posix/relpaths.zig
index 40e2e09464..b5b2bb5f61 100644
--- a/test/standalone/posix/relpaths.zig
+++ b/test/standalone/posix/relpaths.zig
@@ -1,71 +1,32 @@
// Test relative paths through POSIX APIS. These tests have to change the cwd, so
// they shouldn't be Zig unit tests.
-const std = @import("std");
const builtin = @import("builtin");
+const std = @import("std");
+const Io = std.Io;
+
pub fn main() !void {
if (builtin.target.os.tag == .wasi) return; // Can link, but can't change into tmpDir
- var Allocator = std.heap.DebugAllocator(.{}){};
- const a = Allocator.allocator();
- defer std.debug.assert(Allocator.deinit() == .ok);
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
+ const gpa = debug_allocator.allocator();
+ defer std.debug.assert(debug_allocator.deinit() == .ok);
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
-
- // Want to test relative paths, so cd into the tmpdir for these tests
- try tmp.dir.setAsCwd();
-
- try test_symlink(a, tmp);
- try test_link(tmp);
-}
-
-fn test_symlink(a: std.mem.Allocator, tmp: std.testing.TmpDir) !void {
- const target_name = "symlink-target";
- const symlink_name = "symlinker";
-
- // Create the target file
- try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "nonsense" });
-
- if (builtin.target.os.tag == .windows) {
- const wtarget_name = try std.unicode.wtf8ToWtf16LeAllocZ(a, target_name);
- const wsymlink_name = try std.unicode.wtf8ToWtf16LeAllocZ(a, symlink_name);
- defer a.free(wtarget_name);
- defer a.free(wsymlink_name);
-
- std.os.windows.CreateSymbolicLink(tmp.dir.fd, wsymlink_name, wtarget_name, false) catch |err| switch (err) {
- // Symlink requires admin privileges on windows, so this test can legitimately fail.
- error.AccessDenied => return,
- else => return err,
- };
- } else {
- try std.posix.symlink(target_name, symlink_name);
- }
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
- var buffer: [std.fs.max_path_bytes]u8 = undefined;
- const given = try std.posix.readlink(symlink_name, buffer[0..]);
- try std.testing.expectEqualStrings(target_name, given);
-}
+ var tmp = tmpDir(io, .{});
+ defer tmp.cleanup(io);
-fn getLinkInfo(fd: std.posix.fd_t) !struct { std.posix.ino_t, std.posix.nlink_t } {
- if (builtin.target.os.tag == .linux) {
- const stx = try std.os.linux.wrapped.statx(
- fd,
- "",
- std.posix.AT.EMPTY_PATH,
- .{ .INO = true, .NLINK = true },
- );
- std.debug.assert(stx.mask.INO);
- std.debug.assert(stx.mask.NLINK);
- return .{ stx.ino, stx.nlink };
- }
+ // Want to test relative paths, so cd into the tmpdir for these tests
+ try std.process.setCurrentDir(io, tmp.dir);
- const st = try std.posix.fstat(fd);
- return .{ st.ino, st.nlink };
+ try test_link(io, tmp);
}
-fn test_link(tmp: std.testing.TmpDir) !void {
+fn test_link(io: Io, tmp: TmpDir) !void {
switch (builtin.target.os.tag) {
.linux, .illumos => {},
else => return,
@@ -74,29 +35,67 @@ fn test_link(tmp: std.testing.TmpDir) !void {
const target_name = "link-target";
const link_name = "newlink";
- try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "example" });
+ try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" });
// Test 1: create the relative link from inside tmp
- try std.posix.link(target_name, link_name);
+ try Io.Dir.hardLink(.cwd(), target_name, .cwd(), link_name, io, .{});
// Verify
- const efd = try tmp.dir.openFile(target_name, .{});
- defer efd.close();
+ const efd = try tmp.dir.openFile(io, target_name, .{});
+ defer efd.close(io);
- const nfd = try tmp.dir.openFile(link_name, .{});
- defer nfd.close();
+ const nfd = try tmp.dir.openFile(io, link_name, .{});
+ defer nfd.close(io);
{
- const eino, _ = try getLinkInfo(efd.handle);
- const nino, const nlink = try getLinkInfo(nfd.handle);
- try std.testing.expectEqual(eino, nino);
- try std.testing.expectEqual(@as(std.posix.nlink_t, 2), nlink);
+ const e_stat = try efd.stat(io);
+ const n_stat = try nfd.stat(io);
+ try std.testing.expectEqual(e_stat.inode, n_stat.inode);
+ try std.testing.expectEqual(2, n_stat.nlink);
}
// Test 2: Remove the link and see the stats update
- try std.posix.unlink(link_name);
+ try Io.Dir.cwd().deleteFile(io, link_name);
{
- _, const elink = try getLinkInfo(efd.handle);
- try std.testing.expectEqual(@as(std.posix.nlink_t, 1), elink);
+ const e_stat = try efd.stat(io);
+ try std.testing.expectEqual(1, e_stat.nlink);
}
}
+
+pub fn tmpDir(io: Io, opts: Io.Dir.OpenOptions) TmpDir {
+ var random_bytes: [TmpDir.random_bytes_count]u8 = undefined;
+ std.crypto.random.bytes(&random_bytes);
+ var sub_path: [TmpDir.sub_path_len]u8 = undefined;
+ _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
+
+ const cwd = Io.Dir.cwd();
+ var cache_dir = cwd.createDirPathOpen(io, ".zig-cache", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir");
+ defer cache_dir.close(io);
+ const parent_dir = cache_dir.createDirPathOpen(io, "tmp", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir");
+ const dir = parent_dir.createDirPathOpen(io, &sub_path, .{ .open_options = opts }) catch
+ @panic("unable to make tmp dir for testing: unable to make and open the tmp dir");
+
+ return .{
+ .dir = dir,
+ .parent_dir = parent_dir,
+ .sub_path = sub_path,
+ };
+}
+
+pub const TmpDir = struct {
+ dir: Io.Dir,
+ parent_dir: Io.Dir,
+ sub_path: [sub_path_len]u8,
+
+ const random_bytes_count = 12;
+ const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
+
+ pub fn cleanup(self: *TmpDir, io: Io) void {
+ self.dir.close(io);
+ self.parent_dir.deleteTree(io, &self.sub_path) catch {};
+ self.parent_dir.close(io);
+ self.* = undefined;
+ }
+};
diff --git a/test/standalone/run_cwd/check_file_exists.zig b/test/standalone/run_cwd/check_file_exists.zig
index 640fc99a7a..a885c7dafd 100644
--- a/test/standalone/run_cwd/check_file_exists.zig
+++ b/test/standalone/run_cwd/check_file_exists.zig
@@ -8,7 +8,9 @@ pub fn main() !void {
if (args.len != 2) return error.BadUsage;
const path = args[1];
- std.fs.cwd().access(path, .{}) catch return error.AccessFailed;
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
+
+ std.Io.Dir.cwd().access(io, path, .{}) catch return error.AccessFailed;
}
const std = @import("std");
diff --git a/test/standalone/run_output_caching/main.zig b/test/standalone/run_output_caching/main.zig
index e4e6332f11..9786101d32 100644
--- a/test/standalone/run_output_caching/main.zig
+++ b/test/standalone/run_output_caching/main.zig
@@ -1,10 +1,11 @@
const std = @import("std");
pub fn main() !void {
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
var args = try std.process.argsWithAllocator(std.heap.page_allocator);
_ = args.skip();
const filename = args.next().?;
- const file = try std.fs.cwd().createFile(filename, .{});
- defer file.close();
- try file.writeAll(filename);
+ const file = try std.Io.Dir.cwd().createFile(io, filename, .{});
+ defer file.close(io);
+ try file.writeStreamingAll(io, filename);
}
diff --git a/test/standalone/run_output_paths/create_file.zig b/test/standalone/run_output_paths/create_file.zig
index 260c36f10c..7efa8e051a 100644
--- a/test/standalone/run_output_paths/create_file.zig
+++ b/test/standalone/run_output_paths/create_file.zig
@@ -1,16 +1,17 @@
const std = @import("std");
pub fn main() !void {
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
var args = try std.process.argsWithAllocator(std.heap.page_allocator);
_ = args.skip();
const dir_name = args.next().?;
- const dir = try std.fs.cwd().openDir(if (std.mem.startsWith(u8, dir_name, "--dir="))
+ const dir = try std.Io.Dir.cwd().openDir(io, if (std.mem.startsWith(u8, dir_name, "--dir="))
dir_name["--dir=".len..]
else
dir_name, .{});
const file_name = args.next().?;
- const file = try dir.createFile(file_name, .{});
- var file_writer = file.writer(&.{});
+ const file = try dir.createFile(io, file_name, .{});
+ var file_writer = file.writer(io, &.{});
try file_writer.interface.print(
\\{s}
\\{s}
diff --git a/test/standalone/self_exe_symlink/build.zig b/test/standalone/self_exe_symlink/build.zig
index 651740c04b..137848b953 100644
--- a/test/standalone/self_exe_symlink/build.zig
+++ b/test/standalone/self_exe_symlink/build.zig
@@ -9,10 +9,6 @@ pub fn build(b: *std.Build) void {
const optimize: std.builtin.OptimizeMode = .Debug;
const target = b.graph.host;
- // The test requires getFdPath in order to to get the path of the
- // File returned by openSelfExe
- if (!std.os.isGetFdPathSupportedOnTarget(target.result.os)) return;
-
const main = b.addExecutable(.{
.name = "main",
.root_module = b.createModule(.{
diff --git a/test/standalone/self_exe_symlink/create-symlink.zig b/test/standalone/self_exe_symlink/create-symlink.zig
index dac5891ba8..d725207320 100644
--- a/test/standalone/self_exe_symlink/create-symlink.zig
+++ b/test/standalone/self_exe_symlink/create-symlink.zig
@@ -14,5 +14,8 @@ pub fn main() anyerror!void {
// If `exe_path` is relative to our cwd, we need to convert it to be relative to the dirname of `symlink_path`.
const exe_rel_path = try std.fs.path.relative(allocator, std.fs.path.dirname(symlink_path) orelse ".", exe_path);
defer allocator.free(exe_rel_path);
- try std.fs.cwd().symLink(exe_rel_path, symlink_path, .{});
+
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
+
+ try std.Io.Dir.cwd().symLink(io, exe_rel_path, symlink_path, .{});
}
diff --git a/test/standalone/self_exe_symlink/main.zig b/test/standalone/self_exe_symlink/main.zig
index b74c4c7f95..fa2c3380b5 100644
--- a/test/standalone/self_exe_symlink/main.zig
+++ b/test/standalone/self_exe_symlink/main.zig
@@ -1,17 +1,22 @@
const std = @import("std");
pub fn main() !void {
- var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
- defer std.debug.assert(gpa.deinit() == .ok);
- const allocator = gpa.allocator();
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
+ defer if (debug_allocator.deinit() == .leak) @panic("found memory leaks");
+ const gpa = debug_allocator.allocator();
- const self_path = try std.fs.selfExePathAlloc(allocator);
- defer allocator.free(self_path);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
+ const self_path = try std.process.executablePathAlloc(io, gpa);
+ defer gpa.free(self_path);
+
+ var self_exe = try std.process.openExecutable(io, .{});
+ defer self_exe.close(io);
- var self_exe = try std.fs.openSelfExe(.{});
- defer self_exe.close();
var buf: [std.fs.max_path_bytes]u8 = undefined;
- const self_exe_path = try std.os.getFdPath(self_exe.handle, &buf);
+ const self_exe_path = buf[0..try self_exe.realPath(io, &buf)];
try std.testing.expectEqualStrings(self_exe_path, self_path);
}
diff --git a/test/standalone/simple/cat/main.zig b/test/standalone/simple/cat/main.zig
index 9ea980aecc..0135ac4b50 100644
--- a/test/standalone/simple/cat/main.zig
+++ b/test/standalone/simple/cat/main.zig
@@ -1,5 +1,5 @@
const std = @import("std");
-const fs = std.fs;
+const Io = std.Io;
const mem = std.mem;
const warn = std.log.warn;
const fatal = std.process.fatal;
@@ -9,7 +9,7 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
- var threaded: std.Io.Threaded = .init(arena);
+ var threaded: std.Io.Threaded = .init(arena, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -18,11 +18,11 @@ pub fn main() !void {
const exe = args[0];
var catted_anything = false;
var stdout_buffer: [4096]u8 = undefined;
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
- var stdin_reader = fs.File.stdin().readerStreaming(io, &.{});
+ var stdin_reader = Io.File.stdin().readerStreaming(io, &.{});
- const cwd = fs.cwd();
+ const cwd = Io.Dir.cwd();
for (args[1..]) |arg| {
if (mem.eql(u8, arg, "-")) {
@@ -32,8 +32,8 @@ pub fn main() !void {
} else if (mem.startsWith(u8, arg, "-")) {
return usage(exe);
} else {
- const file = cwd.openFile(arg, .{}) catch |err| fatal("unable to open file: {t}\n", .{err});
- defer file.close();
+ const file = cwd.openFile(io, arg, .{}) catch |err| fatal("unable to open file: {t}\n", .{err});
+ defer file.close(io);
catted_anything = true;
var file_reader = file.reader(io, &.{});
diff --git a/test/standalone/simple/guess_number/main.zig b/test/standalone/simple/guess_number/main.zig
index d477de2b78..b98d109f21 100644
--- a/test/standalone/simple/guess_number/main.zig
+++ b/test/standalone/simple/guess_number/main.zig
@@ -1,10 +1,23 @@
const builtin = @import("builtin");
const std = @import("std");
+// See https://github.com/ziglang/zig/issues/24510
+// for the plan to simplify this code.
pub fn main() !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
+ defer _ = debug_allocator.deinit();
+ const gpa = debug_allocator.allocator();
+
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
const out = &stdout_writer.interface;
- const stdin: std.fs.File = .stdin();
+
+ var line_buffer: [20]u8 = undefined;
+ var stdin_reader: std.Io.File.Reader = .init(.stdin(), io, &line_buffer);
+ const in = &stdin_reader.interface;
try out.writeAll("Welcome to the Guess Number Game in Zig.\n");
@@ -12,13 +25,15 @@ pub fn main() !void {
while (true) {
try out.writeAll("\nGuess a number between 1 and 100: ");
- var line_buf: [20]u8 = undefined;
- const amt = try stdin.read(&line_buf);
- if (amt == line_buf.len) {
- try out.writeAll("Input too long.\n");
- continue;
- }
- const line = std.mem.trimEnd(u8, line_buf[0..amt], "\r\n");
+ const untrimmed_line = in.takeSentinel('\n') catch |err| switch (err) {
+ error.StreamTooLong => {
+ try out.writeAll("Line too long.\n");
+ _ = try in.discardDelimiterInclusive('\n');
+ continue;
+ },
+ else => |e| return e,
+ };
+ const line = std.mem.trimEnd(u8, untrimmed_line, "\r\n");
const guess = std.fmt.parseUnsigned(u8, line, 10) catch {
try out.writeAll("Invalid number.\n");
diff --git a/test/standalone/simple/hello_world/hello.zig b/test/standalone/simple/hello_world/hello.zig
index 3b2b910687..a031d6c6f0 100644
--- a/test/standalone/simple/hello_world/hello.zig
+++ b/test/standalone/simple/hello_world/hello.zig
@@ -1,5 +1,15 @@
const std = @import("std");
+// See https://github.com/ziglang/zig/issues/24510
+// for the plan to simplify this code.
pub fn main() !void {
- try std.fs.File.stdout().writeAll("Hello, World!\n");
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
+ defer _ = debug_allocator.deinit();
+ const gpa = debug_allocator.allocator();
+
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
+ try std.Io.File.stdout().writeStreamingAll(io, "Hello, World!\n");
}
diff --git a/test/standalone/windows_argv/build.zig b/test/standalone/windows_argv/build.zig
index df988d2371..afe6dd80e5 100644
--- a/test/standalone/windows_argv/build.zig
+++ b/test/standalone/windows_argv/build.zig
@@ -67,7 +67,7 @@ pub fn build(b: *std.Build) !void {
// Only target the MSVC ABI if MSVC/Windows SDK is available
const has_msvc = has_msvc: {
- const sdk = std.zig.WindowsSdk.find(b.allocator, builtin.cpu.arch) catch |err| switch (err) {
+ const sdk = std.zig.WindowsSdk.find(b.allocator, b.graph.io, builtin.cpu.arch) catch |err| switch (err) {
error.OutOfMemory => @panic("oom"),
else => break :has_msvc false,
};
diff --git a/test/standalone/windows_bat_args/echo-args.zig b/test/standalone/windows_bat_args/echo-args.zig
index 054c4a6975..6aeb43d56c 100644
--- a/test/standalone/windows_bat_args/echo-args.zig
+++ b/test/standalone/windows_bat_args/echo-args.zig
@@ -5,7 +5,9 @@ pub fn main() !void {
defer arena_state.deinit();
const arena = arena_state.allocator();
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ const io = std.Options.debug_io;
+
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
const stdout = &stdout_writer.interface;
var args = try std.process.argsAlloc(arena);
for (args[1..], 1..) |arg, i| {
diff --git a/test/standalone/windows_bat_args/fuzz.zig b/test/standalone/windows_bat_args/fuzz.zig
index 8b9895b52d..28749259f7 100644
--- a/test/standalone/windows_bat_args/fuzz.zig
+++ b/test/standalone/windows_bat_args/fuzz.zig
@@ -1,5 +1,7 @@
-const std = @import("std");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
pub fn main() anyerror!void {
@@ -7,6 +9,10 @@ pub fn main() anyerror!void {
defer std.debug.assert(debug_alloc_inst.deinit() == .ok);
const gpa = debug_alloc_inst.allocator();
+ var threaded: Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
var it = try std.process.argsWithAllocator(gpa);
defer it.deinit();
_ = it.next() orelse unreachable; // skip binary name
@@ -36,11 +42,11 @@ pub fn main() anyerror!void {
std.debug.print("rand seed: {}\n", .{seed});
}
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
+ var tmp = tmpDir(io, .{});
+ defer tmp.cleanup(io);
- try tmp.dir.setAsCwd();
- defer tmp.parent_dir.setAsCwd() catch {};
+ try std.process.setCurrentDir(io, tmp.dir);
+ defer std.process.setCurrentDir(io, tmp.parent_dir) catch {};
// `child_exe_path_orig` might be relative; make it relative to our new cwd.
const child_exe_path = try std.fs.path.resolve(gpa, &.{ "..\\..\\..", child_exe_path_orig });
@@ -56,15 +62,15 @@ pub fn main() anyerror!void {
const preamble_len = buf.items.len;
try buf.appendSlice(gpa, " %*");
- try tmp.dir.writeFile(.{ .sub_path = "args1.bat", .data = buf.items });
+ try tmp.dir.writeFile(io, .{ .sub_path = "args1.bat", .data = buf.items });
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(gpa, " %1 %2 %3 %4 %5 %6 %7 %8 %9");
- try tmp.dir.writeFile(.{ .sub_path = "args2.bat", .data = buf.items });
+ try tmp.dir.writeFile(io, .{ .sub_path = "args2.bat", .data = buf.items });
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(gpa, " \"%~1\" \"%~2\" \"%~3\" \"%~4\" \"%~5\" \"%~6\" \"%~7\" \"%~8\" \"%~9\"");
- try tmp.dir.writeFile(.{ .sub_path = "args3.bat", .data = buf.items });
+ try tmp.dir.writeFile(io, .{ .sub_path = "args3.bat", .data = buf.items });
buf.shrinkRetainingCapacity(preamble_len);
var i: u64 = 0;
@@ -72,19 +78,19 @@ pub fn main() anyerror!void {
const rand_arg = try randomArg(gpa, rand);
defer gpa.free(rand_arg);
- try testExec(gpa, &.{rand_arg}, null);
+ try testExec(gpa, io, &.{rand_arg}, null);
i += 1;
}
}
-fn testExec(gpa: std.mem.Allocator, args: []const []const u8, env: ?*std.process.EnvMap) !void {
- try testExecBat(gpa, "args1.bat", args, env);
- try testExecBat(gpa, "args2.bat", args, env);
- try testExecBat(gpa, "args3.bat", args, env);
+fn testExec(gpa: Allocator, io: Io, args: []const []const u8, env: ?*std.process.EnvMap) !void {
+ try testExecBat(gpa, io, "args1.bat", args, env);
+ try testExecBat(gpa, io, "args2.bat", args, env);
+ try testExecBat(gpa, io, "args3.bat", args, env);
}
-fn testExecBat(gpa: std.mem.Allocator, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
+fn testExecBat(gpa: Allocator, io: Io, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
const argv = try gpa.alloc([]const u8, 1 + args.len);
defer gpa.free(argv);
argv[0] = bat;
@@ -92,8 +98,7 @@ fn testExecBat(gpa: std.mem.Allocator, bat: []const u8, args: []const []const u8
const can_have_trailing_empty_args = std.mem.eql(u8, bat, "args3.bat");
- const result = try std.process.Child.run(.{
- .allocator = gpa,
+ const result = try std.process.Child.run(gpa, io, .{
.env_map = env,
.argv = argv,
});
@@ -163,3 +168,41 @@ fn randomArg(gpa: Allocator, rand: std.Random) ![]const u8 {
return buf.toOwnedSlice(gpa);
}
+
+pub fn tmpDir(io: Io, opts: Io.Dir.OpenOptions) TmpDir {
+ var random_bytes: [TmpDir.random_bytes_count]u8 = undefined;
+ std.crypto.random.bytes(&random_bytes);
+ var sub_path: [TmpDir.sub_path_len]u8 = undefined;
+ _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
+
+ const cwd = Io.Dir.cwd();
+ var cache_dir = cwd.createDirPathOpen(io, ".zig-cache", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir");
+ defer cache_dir.close(io);
+ const parent_dir = cache_dir.createDirPathOpen(io, "tmp", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir");
+ const dir = parent_dir.createDirPathOpen(io, &sub_path, .{ .open_options = opts }) catch
+ @panic("unable to make tmp dir for testing: unable to make and open the tmp dir");
+
+ return .{
+ .dir = dir,
+ .parent_dir = parent_dir,
+ .sub_path = sub_path,
+ };
+}
+
+pub const TmpDir = struct {
+ dir: Io.Dir,
+ parent_dir: Io.Dir,
+ sub_path: [sub_path_len]u8,
+
+ const random_bytes_count = 12;
+ const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
+
+ pub fn cleanup(self: *TmpDir, io: Io) void {
+ self.dir.close(io);
+ self.parent_dir.deleteTree(io, &self.sub_path) catch {};
+ self.parent_dir.close(io);
+ self.* = undefined;
+ }
+};
diff --git a/test/standalone/windows_bat_args/test.zig b/test/standalone/windows_bat_args/test.zig
index 4690d983f3..e0d1abe806 100644
--- a/test/standalone/windows_bat_args/test.zig
+++ b/test/standalone/windows_bat_args/test.zig
@@ -1,20 +1,25 @@
const std = @import("std");
+const Io = std.Io;
+const Allocator = std.mem.Allocator;
pub fn main() anyerror!void {
var debug_alloc_inst: std.heap.DebugAllocator(.{}) = .init;
defer std.debug.assert(debug_alloc_inst.deinit() == .ok);
const gpa = debug_alloc_inst.allocator();
+ var threaded: Io.Threaded = .init(gpa, .{});
+ const io = threaded.io();
+
var it = try std.process.argsWithAllocator(gpa);
defer it.deinit();
_ = it.next() orelse unreachable; // skip binary name
const child_exe_path_orig = it.next() orelse unreachable;
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
+ var tmp = tmpDir(io, .{});
+ defer tmp.cleanup(io);
- try tmp.dir.setAsCwd();
- defer tmp.parent_dir.setAsCwd() catch {};
+ try std.process.setCurrentDir(io, tmp.dir);
+ defer std.process.setCurrentDir(io, tmp.parent_dir) catch {};
// `child_exe_path_orig` might be relative; make it relative to our new cwd.
const child_exe_path = try std.fs.path.resolve(gpa, &.{ "..\\..\\..", child_exe_path_orig });
@@ -30,53 +35,53 @@ pub fn main() anyerror!void {
const preamble_len = buf.items.len;
try buf.appendSlice(gpa, " %*");
- try tmp.dir.writeFile(.{ .sub_path = "args1.bat", .data = buf.items });
+ try tmp.dir.writeFile(io, .{ .sub_path = "args1.bat", .data = buf.items });
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(gpa, " %1 %2 %3 %4 %5 %6 %7 %8 %9");
- try tmp.dir.writeFile(.{ .sub_path = "args2.bat", .data = buf.items });
+ try tmp.dir.writeFile(io, .{ .sub_path = "args2.bat", .data = buf.items });
buf.shrinkRetainingCapacity(preamble_len);
try buf.appendSlice(gpa, " \"%~1\" \"%~2\" \"%~3\" \"%~4\" \"%~5\" \"%~6\" \"%~7\" \"%~8\" \"%~9\"");
- try tmp.dir.writeFile(.{ .sub_path = "args3.bat", .data = buf.items });
+ try tmp.dir.writeFile(io, .{ .sub_path = "args3.bat", .data = buf.items });
buf.shrinkRetainingCapacity(preamble_len);
// Test cases are from https://github.com/rust-lang/rust/blob/master/tests/ui/std/windows-bat-args.rs
- try testExecError(error.InvalidBatchScriptArg, gpa, &.{"\x00"});
- try testExecError(error.InvalidBatchScriptArg, gpa, &.{"\n"});
- try testExecError(error.InvalidBatchScriptArg, gpa, &.{"\r"});
- try testExec(gpa, &.{ "a", "b" }, null);
- try testExec(gpa, &.{ "c is for cat", "d is for dog" }, null);
- try testExec(gpa, &.{ "\"", " \"" }, null);
- try testExec(gpa, &.{ "\\", "\\" }, null);
- try testExec(gpa, &.{">file.txt"}, null);
- try testExec(gpa, &.{"whoami.exe"}, null);
- try testExec(gpa, &.{"&a.exe"}, null);
- try testExec(gpa, &.{"&echo hello "}, null);
- try testExec(gpa, &.{ "&echo hello", "&whoami", ">file.txt" }, null);
- try testExec(gpa, &.{"!TMP!"}, null);
- try testExec(gpa, &.{"key=value"}, null);
- try testExec(gpa, &.{"\"key=value\""}, null);
- try testExec(gpa, &.{"key = value"}, null);
- try testExec(gpa, &.{"key=[\"value\"]"}, null);
- try testExec(gpa, &.{ "", "a=b" }, null);
- try testExec(gpa, &.{"key=\"foo bar\""}, null);
- try testExec(gpa, &.{"key=[\"my_value]"}, null);
- try testExec(gpa, &.{"key=[\"my_value\",\"other-value\"]"}, null);
- try testExec(gpa, &.{"key\\=value"}, null);
- try testExec(gpa, &.{"key=\"&whoami\""}, null);
- try testExec(gpa, &.{"key=\"value\"=5"}, null);
- try testExec(gpa, &.{"key=[\">file.txt\"]"}, null);
- try testExec(gpa, &.{"%hello"}, null);
- try testExec(gpa, &.{"%PATH%"}, null);
- try testExec(gpa, &.{"%%cd:~,%"}, null);
- try testExec(gpa, &.{"%PATH%PATH%"}, null);
- try testExec(gpa, &.{"\">file.txt"}, null);
- try testExec(gpa, &.{"abc\"&echo hello"}, null);
- try testExec(gpa, &.{"123\">file.txt"}, null);
- try testExec(gpa, &.{"\"&echo hello&whoami.exe"}, null);
- try testExec(gpa, &.{ "\"hello^\"world\"", "hello &echo oh no >file.txt" }, null);
- try testExec(gpa, &.{"&whoami.exe"}, null);
+ try testExecError(error.InvalidBatchScriptArg, gpa, io, &.{"\x00"});
+ try testExecError(error.InvalidBatchScriptArg, gpa, io, &.{"\n"});
+ try testExecError(error.InvalidBatchScriptArg, gpa, io, &.{"\r"});
+ try testExec(gpa, io, &.{ "a", "b" }, null);
+ try testExec(gpa, io, &.{ "c is for cat", "d is for dog" }, null);
+ try testExec(gpa, io, &.{ "\"", " \"" }, null);
+ try testExec(gpa, io, &.{ "\\", "\\" }, null);
+ try testExec(gpa, io, &.{">file.txt"}, null);
+ try testExec(gpa, io, &.{"whoami.exe"}, null);
+ try testExec(gpa, io, &.{"&a.exe"}, null);
+ try testExec(gpa, io, &.{"&echo hello "}, null);
+ try testExec(gpa, io, &.{ "&echo hello", "&whoami", ">file.txt" }, null);
+ try testExec(gpa, io, &.{"!TMP!"}, null);
+ try testExec(gpa, io, &.{"key=value"}, null);
+ try testExec(gpa, io, &.{"\"key=value\""}, null);
+ try testExec(gpa, io, &.{"key = value"}, null);
+ try testExec(gpa, io, &.{"key=[\"value\"]"}, null);
+ try testExec(gpa, io, &.{ "", "a=b" }, null);
+ try testExec(gpa, io, &.{"key=\"foo bar\""}, null);
+ try testExec(gpa, io, &.{"key=[\"my_value]"}, null);
+ try testExec(gpa, io, &.{"key=[\"my_value\",\"other-value\"]"}, null);
+ try testExec(gpa, io, &.{"key\\=value"}, null);
+ try testExec(gpa, io, &.{"key=\"&whoami\""}, null);
+ try testExec(gpa, io, &.{"key=\"value\"=5"}, null);
+ try testExec(gpa, io, &.{"key=[\">file.txt\"]"}, null);
+ try testExec(gpa, io, &.{"%hello"}, null);
+ try testExec(gpa, io, &.{"%PATH%"}, null);
+ try testExec(gpa, io, &.{"%%cd:~,%"}, null);
+ try testExec(gpa, io, &.{"%PATH%PATH%"}, null);
+ try testExec(gpa, io, &.{"\">file.txt"}, null);
+ try testExec(gpa, io, &.{"abc\"&echo hello"}, null);
+ try testExec(gpa, io, &.{"123\">file.txt"}, null);
+ try testExec(gpa, io, &.{"\"&echo hello&whoami.exe"}, null);
+ try testExec(gpa, io, &.{ "\"hello^\"world\"", "hello &echo oh no >file.txt" }, null);
+ try testExec(gpa, io, &.{"&whoami.exe"}, null);
// Ensure that trailing space and . characters can't lead to unexpected bat/cmd script execution.
// In many Windows APIs (including CreateProcess), trailing space and . characters are stripped
@@ -94,14 +99,14 @@ pub fn main() anyerror!void {
// > "args1.bat .. "
// '"args1.bat .. "' is not recognized as an internal or external command,
// operable program or batch file.
- try std.testing.expectError(error.FileNotFound, testExecBat(gpa, "args1.bat .. ", &.{"abc"}, null));
+ try std.testing.expectError(error.FileNotFound, testExecBat(gpa, io, "args1.bat .. ", &.{"abc"}, null));
const absolute_with_trailing = blk: {
- const absolute_path = try std.fs.realpathAlloc(gpa, "args1.bat");
+ const absolute_path = try Io.Dir.cwd().realPathFileAlloc(io, "args1.bat", gpa);
defer gpa.free(absolute_path);
break :blk try std.mem.concat(gpa, u8, &.{ absolute_path, " .. " });
};
defer gpa.free(absolute_with_trailing);
- try std.testing.expectError(error.FileNotFound, testExecBat(gpa, absolute_with_trailing, &.{"abc"}, null));
+ try std.testing.expectError(error.FileNotFound, testExecBat(gpa, io, absolute_with_trailing, &.{"abc"}, null));
var env = env: {
var env = try std.process.getEnvMap(gpa);
@@ -115,23 +120,23 @@ pub fn main() anyerror!void {
break :env env;
};
defer env.deinit();
- try testExec(gpa, &.{"%FOO%"}, &env);
+ try testExec(gpa, io, &.{"%FOO%"}, &env);
// Ensure that none of the `>file.txt`s have caused file.txt to be created
- try std.testing.expectError(error.FileNotFound, tmp.dir.access("file.txt", .{}));
+ try std.testing.expectError(error.FileNotFound, tmp.dir.access(io, "file.txt", .{}));
}
-fn testExecError(err: anyerror, gpa: std.mem.Allocator, args: []const []const u8) !void {
- return std.testing.expectError(err, testExec(gpa, args, null));
+fn testExecError(err: anyerror, gpa: Allocator, io: Io, args: []const []const u8) !void {
+ return std.testing.expectError(err, testExec(gpa, io, args, null));
}
-fn testExec(gpa: std.mem.Allocator, args: []const []const u8, env: ?*std.process.EnvMap) !void {
- try testExecBat(gpa, "args1.bat", args, env);
- try testExecBat(gpa, "args2.bat", args, env);
- try testExecBat(gpa, "args3.bat", args, env);
+fn testExec(gpa: Allocator, io: Io, args: []const []const u8, env: ?*std.process.EnvMap) !void {
+ try testExecBat(gpa, io, "args1.bat", args, env);
+ try testExecBat(gpa, io, "args2.bat", args, env);
+ try testExecBat(gpa, io, "args3.bat", args, env);
}
-fn testExecBat(gpa: std.mem.Allocator, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
+fn testExecBat(gpa: Allocator, io: Io, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
const argv = try gpa.alloc([]const u8, 1 + args.len);
defer gpa.free(argv);
argv[0] = bat;
@@ -139,8 +144,7 @@ fn testExecBat(gpa: std.mem.Allocator, bat: []const u8, args: []const []const u8
const can_have_trailing_empty_args = std.mem.eql(u8, bat, "args3.bat");
- const result = try std.process.Child.run(.{
- .allocator = gpa,
+ const result = try std.process.Child.run(gpa, io, .{
.env_map = env,
.argv = argv,
});
@@ -160,3 +164,41 @@ fn testExecBat(gpa: std.mem.Allocator, bat: []const u8, args: []const []const u8
i += 1;
}
}
+
+pub fn tmpDir(io: Io, opts: Io.Dir.OpenOptions) TmpDir {
+ var random_bytes: [TmpDir.random_bytes_count]u8 = undefined;
+ std.crypto.random.bytes(&random_bytes);
+ var sub_path: [TmpDir.sub_path_len]u8 = undefined;
+ _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
+
+ const cwd = Io.Dir.cwd();
+ var cache_dir = cwd.createDirPathOpen(io, ".zig-cache", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir");
+ defer cache_dir.close(io);
+ const parent_dir = cache_dir.createDirPathOpen(io, "tmp", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir");
+ const dir = parent_dir.createDirPathOpen(io, &sub_path, .{ .open_options = opts }) catch
+ @panic("unable to make tmp dir for testing: unable to make and open the tmp dir");
+
+ return .{
+ .dir = dir,
+ .parent_dir = parent_dir,
+ .sub_path = sub_path,
+ };
+}
+
+pub const TmpDir = struct {
+ dir: Io.Dir,
+ parent_dir: Io.Dir,
+ sub_path: [sub_path_len]u8,
+
+ const random_bytes_count = 12;
+ const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
+
+ pub fn cleanup(self: *TmpDir, io: Io) void {
+ self.dir.close(io);
+ self.parent_dir.deleteTree(io, &self.sub_path) catch {};
+ self.parent_dir.close(io);
+ self.* = undefined;
+ }
+};
diff --git a/test/standalone/windows_paths/relative.zig b/test/standalone/windows_paths/relative.zig
index 8301549667..7b6a51b283 100644
--- a/test/standalone/windows_paths/relative.zig
+++ b/test/standalone/windows_paths/relative.zig
@@ -10,10 +10,14 @@ pub fn main() !void {
if (args.len < 3) return error.MissingArgs;
+ var threaded: std.Io.Threaded = .init(allocator, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const relative = try std.fs.path.relative(allocator, args[1], args[2]);
defer allocator.free(relative);
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
const stdout = &stdout_writer.interface;
try stdout.writeAll(relative);
}
diff --git a/test/standalone/windows_paths/test.zig b/test/standalone/windows_paths/test.zig
index 2ec23417e6..ed4069dc61 100644
--- a/test/standalone/windows_paths/test.zig
+++ b/test/standalone/windows_paths/test.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
pub fn main() anyerror!void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
@@ -9,6 +10,8 @@ pub fn main() anyerror!void {
if (args.len < 2) return error.MissingArgs;
+ const io = std.Io.Threaded.global_single_threaded.ioBasic();
+
const exe_path = args[1];
const cwd_path = try std.process.getCwdAlloc(arena);
@@ -33,39 +36,39 @@ pub fn main() anyerror!void {
// With the special =X: environment variable set, drive-relative paths that
// don't match the CWD's drive letter are resolved against that env var.
- try checkRelative(arena, "..\\..\\bar", &.{ exe_path, drive_rel, drive_abs }, null, &alt_drive_env_map);
- try checkRelative(arena, "..\\baz\\foo", &.{ exe_path, drive_abs, drive_rel }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "..\\..\\bar", &.{ exe_path, drive_rel, drive_abs }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "..\\baz\\foo", &.{ exe_path, drive_abs, drive_rel }, null, &alt_drive_env_map);
// Without that environment variable set, drive-relative paths that don't match the
// CWD's drive letter are resolved against the root of the drive.
- try checkRelative(arena, "..\\bar", &.{ exe_path, drive_rel, drive_abs }, null, &empty_env);
- try checkRelative(arena, "..\\foo", &.{ exe_path, drive_abs, drive_rel }, null, &empty_env);
+ try checkRelative(arena, io, "..\\bar", &.{ exe_path, drive_rel, drive_abs }, null, &empty_env);
+ try checkRelative(arena, io, "..\\foo", &.{ exe_path, drive_abs, drive_rel }, null, &empty_env);
// Bare drive-relative path with no components
- try checkRelative(arena, "bar", &.{ exe_path, drive_rel[0..2], drive_abs }, null, &empty_env);
- try checkRelative(arena, "..", &.{ exe_path, drive_abs, drive_rel[0..2] }, null, &empty_env);
+ try checkRelative(arena, io, "bar", &.{ exe_path, drive_rel[0..2], drive_abs }, null, &empty_env);
+ try checkRelative(arena, io, "..", &.{ exe_path, drive_abs, drive_rel[0..2] }, null, &empty_env);
// Bare drive-relative path with no components, drive-CWD set
- try checkRelative(arena, "..\\bar", &.{ exe_path, drive_rel[0..2], drive_abs }, null, &alt_drive_env_map);
- try checkRelative(arena, "..\\baz", &.{ exe_path, drive_abs, drive_rel[0..2] }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "..\\bar", &.{ exe_path, drive_rel[0..2], drive_abs }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "..\\baz", &.{ exe_path, drive_abs, drive_rel[0..2] }, null, &alt_drive_env_map);
// Bare drive-relative path relative to the CWD should be equivalent if drive-CWD is set
- try checkRelative(arena, "", &.{ exe_path, alt_drive_cwd, drive_rel[0..2] }, null, &alt_drive_env_map);
- try checkRelative(arena, "", &.{ exe_path, drive_rel[0..2], alt_drive_cwd }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "", &.{ exe_path, alt_drive_cwd, drive_rel[0..2] }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "", &.{ exe_path, drive_rel[0..2], alt_drive_cwd }, null, &alt_drive_env_map);
// Bare drive-relative should always be equivalent to itself
- try checkRelative(arena, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &alt_drive_env_map);
- try checkRelative(arena, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &alt_drive_env_map);
- try checkRelative(arena, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &empty_env);
- try checkRelative(arena, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &empty_env);
+ try checkRelative(arena, io, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &alt_drive_env_map);
+ try checkRelative(arena, io, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &empty_env);
+ try checkRelative(arena, io, "", &.{ exe_path, drive_rel[0..2], drive_rel[0..2] }, null, &empty_env);
}
if (parsed_cwd_path.kind == .unc_absolute) {
const drive_abs_path = try std.fmt.allocPrint(arena, "{c}:\\foo\\bar", .{alt_drive_letter});
{
- try checkRelative(arena, drive_abs_path, &.{ exe_path, cwd_path, drive_abs_path }, null, &empty_env);
- try checkRelative(arena, cwd_path, &.{ exe_path, drive_abs_path, cwd_path }, null, &empty_env);
+ try checkRelative(arena, io, drive_abs_path, &.{ exe_path, cwd_path, drive_abs_path }, null, &empty_env);
+ try checkRelative(arena, io, cwd_path, &.{ exe_path, drive_abs_path, cwd_path }, null, &empty_env);
}
} else if (parsed_cwd_path.kind == .drive_absolute) {
const cur_drive_letter = parsed_cwd_path.root[0];
@@ -73,14 +76,14 @@ pub fn main() anyerror!void {
const unc_cwd = try std.fmt.allocPrint(arena, "\\\\127.0.0.1\\{c}$\\{s}", .{ cur_drive_letter, path_beyond_root });
{
- try checkRelative(arena, cwd_path, &.{ exe_path, unc_cwd, cwd_path }, null, &empty_env);
- try checkRelative(arena, unc_cwd, &.{ exe_path, cwd_path, unc_cwd }, null, &empty_env);
+ try checkRelative(arena, io, cwd_path, &.{ exe_path, unc_cwd, cwd_path }, null, &empty_env);
+ try checkRelative(arena, io, unc_cwd, &.{ exe_path, cwd_path, unc_cwd }, null, &empty_env);
}
{
const drive_abs = cwd_path;
const drive_rel = parsed_cwd_path.root[0..2];
- try checkRelative(arena, "", &.{ exe_path, drive_abs, drive_rel }, null, &empty_env);
- try checkRelative(arena, "", &.{ exe_path, drive_rel, drive_abs }, null, &empty_env);
+ try checkRelative(arena, io, "", &.{ exe_path, drive_abs, drive_rel }, null, &empty_env);
+ try checkRelative(arena, io, "", &.{ exe_path, drive_rel, drive_abs }, null, &empty_env);
}
} else {
return error.UnexpectedPathType;
@@ -89,13 +92,13 @@ pub fn main() anyerror!void {
fn checkRelative(
allocator: std.mem.Allocator,
+ io: Io,
expected_stdout: []const u8,
argv: []const []const u8,
cwd: ?[]const u8,
env_map: ?*const std.process.EnvMap,
) !void {
- const result = try std.process.Child.run(.{
- .allocator = allocator,
+ const result = try std.process.Child.run(allocator, io, .{
.argv = argv,
.cwd = cwd,
.env_map = env_map,
diff --git a/test/standalone/windows_spawn/hello.zig b/test/standalone/windows_spawn/hello.zig
index fb4a827e23..51e6aaf8bc 100644
--- a/test/standalone/windows_spawn/hello.zig
+++ b/test/standalone/windows_spawn/hello.zig
@@ -1,7 +1,8 @@
const std = @import("std");
pub fn main() !void {
- var stdout_writer = std.fs.File.stdout().writerStreaming(&.{});
+ const io = std.Options.debug_io;
+ var stdout_writer = std.Io.File.stdout().writerStreaming(io, &.{});
const stdout = &stdout_writer.interface;
try stdout.writeAll("hello from exe\n");
}
diff --git a/test/standalone/windows_spawn/main.zig b/test/standalone/windows_spawn/main.zig
index 10ee35f4df..c9522bf4de 100644
--- a/test/standalone/windows_spawn/main.zig
+++ b/test/standalone/windows_spawn/main.zig
@@ -1,29 +1,35 @@
const std = @import("std");
+const Io = std.Io;
+const Allocator = std.mem.Allocator;
const windows = std.os.windows;
const utf16Literal = std.unicode.utf8ToUtf16LeStringLiteral;
pub fn main() anyerror!void {
- var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
- defer if (gpa.deinit() == .leak) @panic("found memory leaks");
- const allocator = gpa.allocator();
+ var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
+ defer if (debug_allocator.deinit() == .leak) @panic("found memory leaks");
+ const gpa = debug_allocator.allocator();
- var it = try std.process.argsWithAllocator(allocator);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
+ var it = try std.process.argsWithAllocator(gpa);
defer it.deinit();
_ = it.next() orelse unreachable; // skip binary name
const hello_exe_cache_path = it.next() orelse unreachable;
- var tmp = std.testing.tmpDir(.{});
- defer tmp.cleanup();
+ var tmp = tmpDir(io, .{});
+ defer tmp.cleanup(io);
- const tmp_absolute_path = try tmp.dir.realpathAlloc(allocator, ".");
- defer allocator.free(tmp_absolute_path);
- const tmp_absolute_path_w = try std.unicode.utf8ToUtf16LeAllocZ(allocator, tmp_absolute_path);
- defer allocator.free(tmp_absolute_path_w);
- const cwd_absolute_path = try std.fs.cwd().realpathAlloc(allocator, ".");
- defer allocator.free(cwd_absolute_path);
- const tmp_relative_path = try std.fs.path.relative(allocator, cwd_absolute_path, tmp_absolute_path);
- defer allocator.free(tmp_relative_path);
+ const tmp_absolute_path = try tmp.dir.realPathFileAlloc(io, ".", gpa);
+ defer gpa.free(tmp_absolute_path);
+ const tmp_absolute_path_w = try std.unicode.utf8ToUtf16LeAllocZ(gpa, tmp_absolute_path);
+ defer gpa.free(tmp_absolute_path_w);
+ const cwd_absolute_path = try Io.Dir.cwd().realPathFileAlloc(io, ".", gpa);
+ defer gpa.free(cwd_absolute_path);
+ const tmp_relative_path = try std.fs.path.relative(gpa, cwd_absolute_path, tmp_absolute_path);
+ defer gpa.free(tmp_relative_path);
// Clear PATH
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
@@ -38,10 +44,10 @@ pub fn main() anyerror!void {
) == windows.TRUE);
// No PATH, so it should fail to find anything not in the cwd
- try testExecError(error.FileNotFound, allocator, "something_missing");
+ try testExecError(error.FileNotFound, gpa, io, "something_missing");
// make sure we don't get error.BadPath traversing out of cwd with a relative path
- try testExecError(error.FileNotFound, allocator, "..\\.\\.\\.\\\\..\\more_missing");
+ try testExecError(error.FileNotFound, gpa, io, "..\\.\\.\\.\\\\..\\more_missing");
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
utf16Literal("PATH"),
@@ -49,82 +55,82 @@ pub fn main() anyerror!void {
) == windows.TRUE);
// Move hello.exe into the tmp dir which is now added to the path
- try std.fs.cwd().copyFile(hello_exe_cache_path, tmp.dir, "hello.exe", .{});
+ try Io.Dir.cwd().copyFile(hello_exe_cache_path, tmp.dir, "hello.exe", io, .{});
// with extension should find the .exe (case insensitive)
- try testExec(allocator, "HeLLo.exe", "hello from exe\n");
+ try testExec(gpa, io, "HeLLo.exe", "hello from exe\n");
// without extension should find the .exe (case insensitive)
- try testExec(allocator, "heLLo", "hello from exe\n");
+ try testExec(gpa, io, "heLLo", "hello from exe\n");
// with invalid cwd
- try std.testing.expectError(error.FileNotFound, testExecWithCwd(allocator, "hello.exe", "missing_dir", ""));
+ try std.testing.expectError(error.FileNotFound, testExecWithCwd(gpa, io, "hello.exe", "missing_dir", ""));
// now add a .bat
- try tmp.dir.writeFile(.{ .sub_path = "hello.bat", .data = "@echo hello from bat" });
+ try tmp.dir.writeFile(io, .{ .sub_path = "hello.bat", .data = "@echo hello from bat" });
// and a .cmd
- try tmp.dir.writeFile(.{ .sub_path = "hello.cmd", .data = "@echo hello from cmd" });
+ try tmp.dir.writeFile(io, .{ .sub_path = "hello.cmd", .data = "@echo hello from cmd" });
// with extension should find the .bat (case insensitive)
- try testExec(allocator, "heLLo.bat", "hello from bat\r\n");
+ try testExec(gpa, io, "heLLo.bat", "hello from bat\r\n");
// with extension should find the .cmd (case insensitive)
- try testExec(allocator, "heLLo.cmd", "hello from cmd\r\n");
+ try testExec(gpa, io, "heLLo.cmd", "hello from cmd\r\n");
// without extension should find the .exe (since its first in PATHEXT)
- try testExec(allocator, "heLLo", "hello from exe\n");
+ try testExec(gpa, io, "heLLo", "hello from exe\n");
// now rename the exe to not have an extension
- try renameExe(tmp.dir, "hello.exe", "hello");
+ try renameExe(tmp.dir, io, "hello.exe", "hello");
// with extension should now fail
- try testExecError(error.FileNotFound, allocator, "hello.exe");
+ try testExecError(error.FileNotFound, gpa, io, "hello.exe");
// without extension should succeed (case insensitive)
- try testExec(allocator, "heLLo", "hello from exe\n");
+ try testExec(gpa, io, "heLLo", "hello from exe\n");
- try tmp.dir.makeDir("something");
- try renameExe(tmp.dir, "hello", "something/hello.exe");
+ try tmp.dir.createDir(io, "something", .default_dir);
+ try renameExe(tmp.dir, io, "hello", "something/hello.exe");
- const relative_path_no_ext = try std.fs.path.join(allocator, &.{ tmp_relative_path, "something/hello" });
- defer allocator.free(relative_path_no_ext);
+ const relative_path_no_ext = try std.fs.path.join(gpa, &.{ tmp_relative_path, "something/hello" });
+ defer gpa.free(relative_path_no_ext);
// Giving a full relative path to something/hello should work
- try testExec(allocator, relative_path_no_ext, "hello from exe\n");
+ try testExec(gpa, io, relative_path_no_ext, "hello from exe\n");
// But commands with path separators get excluded from PATH searching, so this will fail
- try testExecError(error.FileNotFound, allocator, "something/hello");
+ try testExecError(error.FileNotFound, gpa, io, "something/hello");
// Now that .BAT is the first PATHEXT that should be found, this should succeed
- try testExec(allocator, "heLLo", "hello from bat\r\n");
+ try testExec(gpa, io, "heLLo", "hello from bat\r\n");
// Add a hello.exe that is not a valid executable
- try tmp.dir.writeFile(.{ .sub_path = "hello.exe", .data = "invalid" });
+ try tmp.dir.writeFile(io, .{ .sub_path = "hello.exe", .data = "invalid" });
// Trying to execute it with extension will give InvalidExe. This is a special
// case for .EXE extensions, where if they ever try to get executed but they are
// invalid, that gets treated as a fatal error wherever they are found and InvalidExe
// is returned immediately.
- try testExecError(error.InvalidExe, allocator, "hello.exe");
+ try testExecError(error.InvalidExe, gpa, io, "hello.exe");
// Same thing applies to the command with no extension--even though there is a
// hello.bat that could be executed, it should stop after it tries executing
// hello.exe and getting InvalidExe.
- try testExecError(error.InvalidExe, allocator, "hello");
+ try testExecError(error.InvalidExe, gpa, io, "hello");
// If we now rename hello.exe to have no extension, it will behave differently
- try renameExe(tmp.dir, "hello.exe", "hello");
+ try renameExe(tmp.dir, io, "hello.exe", "hello");
// Now, trying to execute it without an extension should treat InvalidExe as recoverable
// and skip over it and find hello.bat and execute that
- try testExec(allocator, "hello", "hello from bat\r\n");
+ try testExec(gpa, io, "hello", "hello from bat\r\n");
// If we rename the invalid exe to something else
- try renameExe(tmp.dir, "hello", "goodbye");
+ try renameExe(tmp.dir, io, "hello", "goodbye");
// Then we should now get FileNotFound when trying to execute 'goodbye',
// since that is what the original error will be after searching for 'goodbye'
// in the cwd. It will try to execute 'goodbye' from the PATH but the InvalidExe error
// should be ignored in this case.
- try testExecError(error.FileNotFound, allocator, "goodbye");
+ try testExecError(error.FileNotFound, gpa, io, "goodbye");
// Now let's set the tmp dir as the cwd and set the path only include the "something" sub dir
- try tmp.dir.setAsCwd();
- defer tmp.parent_dir.setAsCwd() catch {};
- const something_subdir_abs_path = try std.mem.concatWithSentinel(allocator, u16, &.{ tmp_absolute_path_w, utf16Literal("\\something") }, 0);
- defer allocator.free(something_subdir_abs_path);
+ try std.process.setCurrentDir(io, tmp.dir);
+ defer std.process.setCurrentDir(io, tmp.parent_dir) catch {};
+ const something_subdir_abs_path = try std.mem.concatWithSentinel(gpa, u16, &.{ tmp_absolute_path_w, utf16Literal("\\something") }, 0);
+ defer gpa.free(something_subdir_abs_path);
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
utf16Literal("PATH"),
@@ -133,37 +139,37 @@ pub fn main() anyerror!void {
// Now trying to execute goodbye should give error.InvalidExe since it's the original
// error that we got when trying within the cwd
- try testExecError(error.InvalidExe, allocator, "goodbye");
+ try testExecError(error.InvalidExe, gpa, io, "goodbye");
// hello should still find the .bat
- try testExec(allocator, "hello", "hello from bat\r\n");
+ try testExec(gpa, io, "hello", "hello from bat\r\n");
// If we rename something/hello.exe to something/goodbye.exe
- try renameExe(tmp.dir, "something/hello.exe", "something/goodbye.exe");
+ try renameExe(tmp.dir, io, "something/hello.exe", "something/goodbye.exe");
// And try to execute goodbye, then the one in something should be found
// since the one in cwd is an invalid executable
- try testExec(allocator, "goodbye", "hello from exe\n");
+ try testExec(gpa, io, "goodbye", "hello from exe\n");
// If we use an absolute path to execute the invalid goodbye
- const goodbye_abs_path = try std.mem.join(allocator, "\\", &.{ tmp_absolute_path, "goodbye" });
- defer allocator.free(goodbye_abs_path);
+ const goodbye_abs_path = try std.mem.join(gpa, "\\", &.{ tmp_absolute_path, "goodbye" });
+ defer gpa.free(goodbye_abs_path);
// then the PATH should not be searched and we should get InvalidExe
- try testExecError(error.InvalidExe, allocator, goodbye_abs_path);
+ try testExecError(error.InvalidExe, gpa, io, goodbye_abs_path);
// If we try to exec but provide a cwd that is an absolute path, the PATH
// should still be searched and the goodbye.exe in something should be found.
- try testExecWithCwd(allocator, "goodbye", tmp_absolute_path, "hello from exe\n");
+ try testExecWithCwd(gpa, io, "goodbye", tmp_absolute_path, "hello from exe\n");
// introduce some extra path separators into the path which is dealt with inside the spawn call.
const denormed_something_subdir_size = std.mem.replacementSize(u16, something_subdir_abs_path, utf16Literal("\\"), utf16Literal("\\\\\\\\"));
- const denormed_something_subdir_abs_path = try allocator.allocSentinel(u16, denormed_something_subdir_size, 0);
- defer allocator.free(denormed_something_subdir_abs_path);
+ const denormed_something_subdir_abs_path = try gpa.allocSentinel(u16, denormed_something_subdir_size, 0);
+ defer gpa.free(denormed_something_subdir_abs_path);
_ = std.mem.replace(u16, something_subdir_abs_path, utf16Literal("\\"), utf16Literal("\\\\\\\\"), denormed_something_subdir_abs_path);
- const denormed_something_subdir_wtf8 = try std.unicode.wtf16LeToWtf8Alloc(allocator, denormed_something_subdir_abs_path);
- defer allocator.free(denormed_something_subdir_wtf8);
+ const denormed_something_subdir_wtf8 = try std.unicode.wtf16LeToWtf8Alloc(gpa, denormed_something_subdir_abs_path);
+ defer gpa.free(denormed_something_subdir_wtf8);
// clear the path to ensure that the match comes from the cwd
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
@@ -171,21 +177,21 @@ pub fn main() anyerror!void {
null,
) == windows.TRUE);
- try testExecWithCwd(allocator, "goodbye", denormed_something_subdir_wtf8, "hello from exe\n");
+ try testExecWithCwd(gpa, io, "goodbye", denormed_something_subdir_wtf8, "hello from exe\n");
// normalization should also work if the non-normalized path is found in the PATH var.
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
utf16Literal("PATH"),
denormed_something_subdir_abs_path,
) == windows.TRUE);
- try testExec(allocator, "goodbye", "hello from exe\n");
+ try testExec(gpa, io, "goodbye", "hello from exe\n");
// now make sure we can launch executables "outside" of the cwd
- var subdir_cwd = try tmp.dir.openDir(denormed_something_subdir_wtf8, .{});
- defer subdir_cwd.close();
+ var subdir_cwd = try tmp.dir.openDir(io, denormed_something_subdir_wtf8, .{});
+ defer subdir_cwd.close(io);
- try renameExe(tmp.dir, "something/goodbye.exe", "hello.exe");
- try subdir_cwd.setAsCwd();
+ try renameExe(tmp.dir, io, "something/goodbye.exe", "hello.exe");
+ try std.process.setCurrentDir(io, subdir_cwd);
// clear the PATH again
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
@@ -194,33 +200,32 @@ pub fn main() anyerror!void {
) == windows.TRUE);
// while we're at it make sure non-windows separators work fine
- try testExec(allocator, "../hello", "hello from exe\n");
+ try testExec(gpa, io, "../hello", "hello from exe\n");
}
-fn testExecError(err: anyerror, allocator: std.mem.Allocator, command: []const u8) !void {
- return std.testing.expectError(err, testExec(allocator, command, ""));
+fn testExecError(err: anyerror, gpa: Allocator, io: Io, command: []const u8) !void {
+ return std.testing.expectError(err, testExec(gpa, io, command, ""));
}
-fn testExec(allocator: std.mem.Allocator, command: []const u8, expected_stdout: []const u8) !void {
- return testExecWithCwd(allocator, command, null, expected_stdout);
+fn testExec(gpa: Allocator, io: Io, command: []const u8, expected_stdout: []const u8) !void {
+ return testExecWithCwd(gpa, io, command, null, expected_stdout);
}
-fn testExecWithCwd(allocator: std.mem.Allocator, command: []const u8, cwd: ?[]const u8, expected_stdout: []const u8) !void {
- const result = try std.process.Child.run(.{
- .allocator = allocator,
+fn testExecWithCwd(gpa: Allocator, io: Io, command: []const u8, cwd: ?[]const u8, expected_stdout: []const u8) !void {
+ const result = try std.process.Child.run(gpa, io, .{
.argv = &[_][]const u8{command},
.cwd = cwd,
});
- defer allocator.free(result.stdout);
- defer allocator.free(result.stderr);
+ defer gpa.free(result.stdout);
+ defer gpa.free(result.stderr);
try std.testing.expectEqualStrings("", result.stderr);
try std.testing.expectEqualStrings(expected_stdout, result.stdout);
}
-fn renameExe(dir: std.fs.Dir, old_sub_path: []const u8, new_sub_path: []const u8) !void {
+fn renameExe(dir: Io.Dir, io: Io, old_sub_path: []const u8, new_sub_path: []const u8) !void {
var attempt: u5 = 0;
- while (true) break dir.rename(old_sub_path, new_sub_path) catch |err| switch (err) {
+ while (true) break dir.rename(old_sub_path, dir, new_sub_path, io) catch |err| switch (err) {
error.AccessDenied => {
if (attempt == 13) return error.AccessDenied;
// give the kernel a chance to finish closing the executable handle
@@ -231,3 +236,41 @@ fn renameExe(dir: std.fs.Dir, old_sub_path: []const u8, new_sub_path: []const u8
else => |e| return e,
};
}
+
+pub fn tmpDir(io: Io, opts: Io.Dir.OpenOptions) TmpDir {
+ var random_bytes: [TmpDir.random_bytes_count]u8 = undefined;
+ std.crypto.random.bytes(&random_bytes);
+ var sub_path: [TmpDir.sub_path_len]u8 = undefined;
+ _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
+
+ const cwd = Io.Dir.cwd();
+ var cache_dir = cwd.createDirPathOpen(io, ".zig-cache", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir");
+ defer cache_dir.close(io);
+ const parent_dir = cache_dir.createDirPathOpen(io, "tmp", .{}) catch
+ @panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir");
+ const dir = parent_dir.createDirPathOpen(io, &sub_path, .{ .open_options = opts }) catch
+ @panic("unable to make tmp dir for testing: unable to make and open the tmp dir");
+
+ return .{
+ .dir = dir,
+ .parent_dir = parent_dir,
+ .sub_path = sub_path,
+ };
+}
+
+pub const TmpDir = struct {
+ dir: Io.Dir,
+ parent_dir: Io.Dir,
+ sub_path: [sub_path_len]u8,
+
+ const random_bytes_count = 12;
+ const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
+
+ pub fn cleanup(self: *TmpDir, io: Io) void {
+ self.dir.close(io);
+ self.parent_dir.deleteTree(io, &self.sub_path) catch {};
+ self.parent_dir.close(io);
+ self.* = undefined;
+ }
+};
diff --git a/test/tests.zig b/test/tests.zig
index 0911ae2e46..aa3c018a62 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -187,29 +187,30 @@ const test_targets = blk: {
.link_libc = true,
},
- .{
- .target = .{
- .cpu_arch = .aarch64,
- .os_tag = .linux,
- .abi = .none,
- },
- .use_llvm = false,
- .use_lld = false,
- .optimize_mode = .ReleaseFast,
- .strip = true,
- },
- .{
- .target = .{
- .cpu_arch = .aarch64,
- .cpu_model = .{ .explicit = &std.Target.aarch64.cpu.neoverse_n1 },
- .os_tag = .linux,
- .abi = .none,
- },
- .use_llvm = false,
- .use_lld = false,
- .optimize_mode = .ReleaseFast,
- .strip = true,
- },
+ // Disabled due to https://codeberg.org/ziglang/zig/pulls/30232#issuecomment-9203351
+ //.{
+ // .target = .{
+ // .cpu_arch = .aarch64,
+ // .os_tag = .linux,
+ // .abi = .none,
+ // },
+ // .use_llvm = false,
+ // .use_lld = false,
+ // .optimize_mode = .ReleaseFast,
+ // .strip = true,
+ //},
+ //.{
+ // .target = .{
+ // .cpu_arch = .aarch64,
+ // .cpu_model = .{ .explicit = &std.Target.aarch64.cpu.neoverse_n1 },
+ // .os_tag = .linux,
+ // .abi = .none,
+ // },
+ // .use_llvm = false,
+ // .use_lld = false,
+ // .optimize_mode = .ReleaseFast,
+ // .strip = true,
+ //},
.{
.target = .{
@@ -1204,17 +1205,18 @@ const test_targets = blk: {
},
},
- .{
- .target = .{
- .cpu_arch = .aarch64,
- .os_tag = .macos,
- .abi = .none,
- },
- .use_llvm = false,
- .use_lld = false,
- .optimize_mode = .ReleaseFast,
- .strip = true,
- },
+ // Disabled due to https://codeberg.org/ziglang/zig/pulls/30232#issuecomment-9203351
+ //.{
+ // .target = .{
+ // .cpu_arch = .aarch64,
+ // .os_tag = .macos,
+ // .abi = .none,
+ // },
+ // .use_llvm = false,
+ // .use_lld = false,
+ // .optimize_mode = .ReleaseFast,
+ // .strip = true,
+ //},
.{
.target = .{
@@ -2024,6 +2026,7 @@ pub fn addLinkTests(
pub fn addCliTests(b: *std.Build) *Step {
const step = b.step("test-cli", "Test the command line interface");
const s = std.fs.path.sep_str;
+ const io = b.graph.io;
{
// Test `zig init`.
@@ -2132,14 +2135,14 @@ pub fn addCliTests(b: *std.Build) *Step {
const tmp_path = b.makeTempPath();
const unformatted_code = " // no reason for indent";
- var dir = std.fs.cwd().openDir(tmp_path, .{}) catch @panic("unhandled");
- defer dir.close();
- dir.writeFile(.{ .sub_path = "fmt1.zig", .data = unformatted_code }) catch @panic("unhandled");
- dir.writeFile(.{ .sub_path = "fmt2.zig", .data = unformatted_code }) catch @panic("unhandled");
- dir.makeDir("subdir") catch @panic("unhandled");
- var subdir = dir.openDir("subdir", .{}) catch @panic("unhandled");
- defer subdir.close();
- subdir.writeFile(.{ .sub_path = "fmt3.zig", .data = unformatted_code }) catch @panic("unhandled");
+ var dir = std.Io.Dir.cwd().openDir(io, tmp_path, .{}) catch @panic("unhandled");
+ defer dir.close(io);
+ dir.writeFile(io, .{ .sub_path = "fmt1.zig", .data = unformatted_code }) catch @panic("unhandled");
+ dir.writeFile(io, .{ .sub_path = "fmt2.zig", .data = unformatted_code }) catch @panic("unhandled");
+ dir.createDir(io, "subdir", .default_dir) catch @panic("unhandled");
+ var subdir = dir.openDir(io, "subdir", .{}) catch @panic("unhandled");
+ defer subdir.close(io);
+ subdir.writeFile(io, .{ .sub_path = "fmt3.zig", .data = unformatted_code }) catch @panic("unhandled");
// Test zig fmt affecting only the appropriate files.
const run1 = b.addSystemCommand(&.{ b.graph.zig_exe, "fmt", "fmt1.zig" });
@@ -2629,11 +2632,12 @@ pub fn addCases(
) !void {
const arena = b.allocator;
const gpa = b.allocator;
+ const io = b.graph.io;
- var cases = @import("src/Cases.zig").init(gpa, arena);
+ var cases = @import("src/Cases.zig").init(gpa, arena, io);
- var dir = try b.build_root.handle.openDir("test/cases", .{ .iterate = true });
- defer dir.close();
+ var dir = try b.build_root.handle.openDir(io, "test/cases", .{ .iterate = true });
+ defer dir.close(io);
cases.addFromDir(dir, b);
try @import("cases.zig").addCases(&cases, build_options, b);
@@ -2678,7 +2682,9 @@ pub fn addDebuggerTests(b: *std.Build, options: DebuggerContext.Options) ?*Step
return step;
}
-pub fn addIncrementalTests(b: *std.Build, test_step: *Step) !void {
+pub fn addIncrementalTests(b: *std.Build, test_step: *Step, test_filters: []const []const u8) !void {
+ const io = b.graph.io;
+
const incr_check = b.addExecutable(.{
.name = "incr-check",
.root_module = b.createModule(.{
@@ -2688,12 +2694,17 @@ pub fn addIncrementalTests(b: *std.Build, test_step: *Step) !void {
}),
});
- var dir = try b.build_root.handle.openDir("test/incremental", .{ .iterate = true });
- defer dir.close();
+ var dir = try b.build_root.handle.openDir(io, "test/incremental", .{ .iterate = true });
+ defer dir.close(io);
var it = try dir.walk(b.graph.arena);
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
if (entry.kind != .file) continue;
+ if (std.mem.endsWith(u8, entry.basename, ".swp")) continue;
+
+ for (test_filters) |test_filter| {
+ if (std.mem.indexOf(u8, entry.path, test_filter)) |_| break;
+ } else if (test_filters.len > 0) continue;
const run = b.addRunArtifact(incr_check);
run.setName(b.fmt("incr-check '{s}'", .{entry.basename}));
@@ -2702,6 +2713,11 @@ pub fn addIncrementalTests(b: *std.Build, test_step: *Step) !void {
run.addFileArg(b.path("test/incremental/").path(b, entry.path));
run.addArgs(&.{ "--zig-lib-dir", b.fmt("{f}", .{b.graph.zig_lib_directory}) });
+ if (b.enable_qemu) run.addArg("-fqemu");
+ if (b.enable_wine) run.addArg("-fwine");
+ if (b.enable_wasmtime) run.addArg("-fwasmtime");
+ if (b.enable_darling) run.addArg("-fdarling");
+
run.addCheck(.{ .expect_term = .{ .Exited = 0 } });
test_step.dependOn(&run.step);
diff --git a/tools/docgen.zig b/tools/docgen.zig
index d23892e06c..ac0d26b995 100644
--- a/tools/docgen.zig
+++ b/tools/docgen.zig
@@ -1,6 +1,8 @@
-const std = @import("std");
const builtin = @import("builtin");
-const fs = std.fs;
+
+const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const process = std.process;
const Progress = std.Progress;
const print = std.debug.print;
@@ -8,7 +10,6 @@ const mem = std.mem;
const testing = std.testing;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
-const getExternalExecutor = std.zig.system.getExternalExecutor;
const fatal = std.process.fatal;
const Writer = std.Io.Writer;
@@ -38,7 +39,7 @@ pub fn main() !void {
const gpa = arena;
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -49,7 +50,7 @@ pub fn main() !void {
while (args_it.next()) |arg| {
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage);
+ try Io.File.stdout().writeStreamingAll(io, usage);
process.exit(0);
} else if (mem.eql(u8, arg, "--code-dir")) {
if (args_it.next()) |param| {
@@ -72,16 +73,16 @@ pub fn main() !void {
const output_path = opt_output orelse fatal("missing output file", .{});
const code_dir_path = opt_code_dir orelse fatal("missing --code-dir argument", .{});
- var in_file = try fs.cwd().openFile(input_path, .{});
- defer in_file.close();
+ var in_file = try Dir.cwd().openFile(io, input_path, .{});
+ defer in_file.close(io);
- var out_file = try fs.cwd().createFile(output_path, .{});
- defer out_file.close();
+ var out_file = try Dir.cwd().createFile(io, output_path, .{});
+ defer out_file.close(io);
var out_file_buffer: [4096]u8 = undefined;
- var out_file_writer = out_file.writer(&out_file_buffer);
+ var out_file_writer = out_file.writer(io, &out_file_buffer);
- var code_dir = try fs.cwd().openDir(code_dir_path, .{});
- defer code_dir.close();
+ var code_dir = try Dir.cwd().openDir(io, code_dir_path, .{});
+ defer code_dir.close(io);
var in_file_reader = in_file.reader(io, &.{});
const input_file_bytes = try in_file_reader.interface.allocRemaining(arena, .limited(max_doc_file_size));
@@ -89,7 +90,7 @@ pub fn main() !void {
var tokenizer = Tokenizer.init(input_path, input_file_bytes);
var toc = try genToc(arena, &tokenizer);
- try genHtml(arena, &tokenizer, &toc, code_dir, &out_file_writer.interface);
+ try genHtml(arena, io, &tokenizer, &toc, code_dir, &out_file_writer.interface);
try out_file_writer.end();
}
@@ -988,9 +989,10 @@ fn printShell(out: *Writer, shell_content: []const u8, escape: bool) !void {
fn genHtml(
allocator: Allocator,
+ io: Io,
tokenizer: *Tokenizer,
toc: *Toc,
- code_dir: std.fs.Dir,
+ code_dir: Dir,
out: *Writer,
) !void {
for (toc.nodes) |node| {
@@ -1042,11 +1044,11 @@ fn genHtml(
},
.Code => |code| {
const out_basename = try std.fmt.allocPrint(allocator, "{s}.out", .{
- fs.path.stem(code.name),
+ Dir.path.stem(code.name),
});
defer allocator.free(out_basename);
- const contents = code_dir.readFileAlloc(out_basename, allocator, .limited(std.math.maxInt(u32))) catch |err| {
+ const contents = code_dir.readFileAlloc(io, out_basename, allocator, .limited(std.math.maxInt(u32))) catch |err| {
return parseError(tokenizer, code.token, "unable to open '{s}': {t}", .{ out_basename, err });
};
defer allocator.free(contents);
diff --git a/tools/doctest.zig b/tools/doctest.zig
index 63b7e50778..3a67210a59 100644
--- a/tools/doctest.zig
+++ b/tools/doctest.zig
@@ -2,10 +2,10 @@ const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
+const Dir = std.Io.Dir;
const Writer = std.Io.Writer;
const fatal = std.process.fatal;
const mem = std.mem;
-const fs = std.fs;
const process = std.process;
const Allocator = std.mem.Allocator;
const testing = std.testing;
@@ -40,7 +40,7 @@ pub fn main() !void {
const gpa = arena;
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -53,7 +53,7 @@ pub fn main() !void {
while (args_it.next()) |arg| {
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try std.fs.File.stdout().writeAll(usage);
+ try Io.File.stdout().writeStreamingAll(io, usage);
process.exit(0);
} else if (mem.eql(u8, arg, "-i")) {
opt_input = args_it.next() orelse fatal("expected parameter after -i", .{});
@@ -78,37 +78,37 @@ pub fn main() !void {
const zig_path = opt_zig orelse fatal("missing zig compiler path (--zig)", .{});
const cache_root = opt_cache_root orelse fatal("missing cache root path (--cache-root)", .{});
- const source_bytes = try fs.cwd().readFileAlloc(input_path, arena, .limited(std.math.maxInt(u32)));
+ const source_bytes = try Dir.cwd().readFileAlloc(io, input_path, arena, .limited(std.math.maxInt(u32)));
const code = try parseManifest(arena, source_bytes);
const source = stripManifest(source_bytes);
const tmp_dir_path = try std.fmt.allocPrint(arena, "{s}/tmp/{x}", .{
cache_root, std.crypto.random.int(u64),
});
- fs.cwd().makePath(tmp_dir_path) catch |err|
- fatal("unable to create tmp dir '{s}': {s}", .{ tmp_dir_path, @errorName(err) });
- defer fs.cwd().deleteTree(tmp_dir_path) catch |err| std.log.err("unable to delete '{s}': {s}", .{
- tmp_dir_path, @errorName(err),
+ Dir.cwd().createDirPath(io, tmp_dir_path) catch |err|
+ fatal("unable to create tmp dir '{s}': {t}", .{ tmp_dir_path, err });
+ defer Dir.cwd().deleteTree(io, tmp_dir_path) catch |err| std.log.err("unable to delete '{s}': {t}", .{
+ tmp_dir_path, err,
});
- var out_file = try fs.cwd().createFile(output_path, .{});
- defer out_file.close();
+ var out_file = try Dir.cwd().createFile(io, output_path, .{});
+ defer out_file.close(io);
var out_file_buffer: [4096]u8 = undefined;
- var out_file_writer = out_file.writer(&out_file_buffer);
+ var out_file_writer = out_file.writer(io, &out_file_buffer);
const out = &out_file_writer.interface;
- try printSourceBlock(arena, out, source, fs.path.basename(input_path));
+ try printSourceBlock(arena, out, source, Dir.path.basename(input_path));
try printOutput(
arena,
io,
out,
code,
tmp_dir_path,
- try std.fs.path.relative(arena, tmp_dir_path, zig_path),
- try std.fs.path.relative(arena, tmp_dir_path, input_path),
+ try Dir.path.relative(arena, tmp_dir_path, zig_path),
+ try Dir.path.relative(arena, tmp_dir_path, input_path),
if (opt_zig_lib_dir) |zig_lib_dir|
- try std.fs.path.relative(arena, tmp_dir_path, zig_lib_dir)
+ try Dir.path.relative(arena, tmp_dir_path, zig_lib_dir)
else
null,
);
@@ -141,7 +141,7 @@ fn printOutput(
defer shell_buffer.deinit();
const shell_out = &shell_buffer.writer;
- const code_name = std.fs.path.stem(input_path);
+ const code_name = Dir.path.stem(input_path);
switch (code.id) {
.exe => |expected_outcome| code_block: {
@@ -201,8 +201,7 @@ fn printOutput(
try shell_out.print("\n", .{});
if (expected_outcome == .build_fail) {
- const result = try process.Child.run(.{
- .allocator = arena,
+ const result = try process.Child.run(arena, io, .{
.argv = build_args.items,
.cwd = tmp_dir_path,
.env_map = &env_map,
@@ -227,7 +226,7 @@ fn printOutput(
try shell_out.writeAll(colored_stderr);
break :code_block;
}
- const exec_result = run(arena, &env_map, tmp_dir_path, build_args.items) catch
+ const exec_result = run(arena, io, &env_map, tmp_dir_path, build_args.items) catch
fatal("example failed to compile", .{});
if (code.verbose_cimport) {
@@ -258,8 +257,7 @@ fn printOutput(
var exited_with_signal = false;
const result = if (expected_outcome == .fail) blk: {
- const result = try process.Child.run(.{
- .allocator = arena,
+ const result = try process.Child.run(arena, io, .{
.argv = run_args,
.env_map = &env_map,
.cwd = tmp_dir_path,
@@ -278,7 +276,7 @@ fn printOutput(
}
break :blk result;
} else blk: {
- break :blk run(arena, &env_map, tmp_dir_path, run_args) catch
+ break :blk run(arena, io, &env_map, tmp_dir_path, run_args) catch
fatal("example crashed", .{});
};
@@ -327,7 +325,7 @@ fn printOutput(
.arch_os_abi = triple,
});
const target = try std.zig.system.resolveTargetQuery(io, target_query);
- switch (getExternalExecutor(&host, &target, .{
+ switch (getExternalExecutor(io, &host, &target, .{
.link_libc = code.link_libc,
})) {
.native => {},
@@ -347,7 +345,7 @@ fn printOutput(
}
}
- const result = run(arena, &env_map, tmp_dir_path, test_args.items) catch
+ const result = run(arena, io, &env_map, tmp_dir_path, test_args.items) catch
fatal("test failed", .{});
const escaped_stderr = try escapeHtml(arena, result.stderr);
const escaped_stdout = try escapeHtml(arena, result.stdout);
@@ -378,8 +376,7 @@ fn printOutput(
try test_args.append("-lc");
try shell_out.print("-lc ", .{});
}
- const result = try process.Child.run(.{
- .allocator = arena,
+ const result = try process.Child.run(arena, io, .{
.argv = test_args.items,
.env_map = &env_map,
.cwd = tmp_dir_path,
@@ -435,8 +432,7 @@ fn printOutput(
},
}
- const result = try process.Child.run(.{
- .allocator = arena,
+ const result = try process.Child.run(arena, io, .{
.argv = test_args.items,
.env_map = &env_map,
.cwd = tmp_dir_path,
@@ -512,8 +508,7 @@ fn printOutput(
}
if (maybe_error_match) |error_match| {
- const result = try process.Child.run(.{
- .allocator = arena,
+ const result = try process.Child.run(arena, io, .{
.argv = build_args.items,
.env_map = &env_map,
.cwd = tmp_dir_path,
@@ -541,7 +536,7 @@ fn printOutput(
const colored_stderr = try termColor(arena, escaped_stderr);
try shell_out.print("\n{s} ", .{colored_stderr});
} else {
- _ = run(arena, &env_map, tmp_dir_path, build_args.items) catch fatal("example failed to compile", .{});
+ _ = run(arena, io, &env_map, tmp_dir_path, build_args.items) catch fatal("example failed to compile", .{});
}
try shell_out.writeAll("\n");
},
@@ -600,7 +595,7 @@ fn printOutput(
try test_args.append(option);
try shell_out.print("{s} ", .{option});
}
- const result = run(arena, &env_map, tmp_dir_path, test_args.items) catch fatal("test failed", .{});
+ const result = run(arena, io, &env_map, tmp_dir_path, test_args.items) catch fatal("test failed", .{});
const escaped_stderr = try escapeHtml(arena, result.stderr);
const escaped_stdout = try escapeHtml(arena, result.stdout);
try shell_out.print("\n{s}{s}\n", .{ escaped_stderr, escaped_stdout });
@@ -1132,12 +1127,12 @@ fn in(slice: []const u8, number: u8) bool {
fn run(
allocator: Allocator,
+ io: Io,
env_map: *process.EnvMap,
cwd: []const u8,
args: []const []const u8,
) !process.Child.RunResult {
- const result = try process.Child.run(.{
- .allocator = allocator,
+ const result = try process.Child.run(allocator, io, .{
.argv = args,
.env_map = env_map,
.cwd = cwd,
diff --git a/tools/dump-cov.zig b/tools/dump-cov.zig
index 3dd91de612..1a8ebb324e 100644
--- a/tools/dump-cov.zig
+++ b/tools/dump-cov.zig
@@ -2,6 +2,7 @@
//! including file:line:column information for each PC.
const std = @import("std");
+const Io = std.Io;
const fatal = std.process.fatal;
const Path = std.Build.Cache.Path;
const assert = std.debug.assert;
@@ -16,7 +17,7 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -51,12 +52,13 @@ pub fn main() !void {
var coverage: std.debug.Coverage = .init;
defer coverage.deinit(gpa);
- var debug_info = std.debug.Info.load(gpa, exe_path, &coverage, target.ofmt, target.cpu.arch) catch |err| {
- fatal("failed to load debug info for {f}: {s}", .{ exe_path, @errorName(err) });
+ var debug_info = std.debug.Info.load(gpa, io, exe_path, &coverage, target.ofmt, target.cpu.arch) catch |err| {
+ fatal("failed to load debug info for {f}: {t}", .{ exe_path, err });
};
defer debug_info.deinit(gpa);
const cov_bytes = cov_path.root_dir.handle.readFileAllocOptions(
+ io,
cov_path.sub_path,
arena,
.limited(1 << 30),
@@ -67,7 +69,7 @@ pub fn main() !void {
};
var stdout_buffer: [4000]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
const header: *SeenPcsHeader = @ptrCast(cov_bytes);
@@ -83,7 +85,7 @@ pub fn main() !void {
std.mem.sortUnstable(usize, sorted_pcs, {}, std.sort.asc(usize));
const source_locations = try arena.alloc(std.debug.Coverage.SourceLocation, sorted_pcs.len);
- try debug_info.resolveAddresses(gpa, sorted_pcs, source_locations);
+ try debug_info.resolveAddresses(gpa, io, sorted_pcs, source_locations);
const seen_pcs = header.seenBits();
diff --git a/tools/fetch_them_macos_headers.zig b/tools/fetch_them_macos_headers.zig
index 2a2a2452e7..c55a569e9f 100644
--- a/tools/fetch_them_macos_headers.zig
+++ b/tools/fetch_them_macos_headers.zig
@@ -1,10 +1,9 @@
const std = @import("std");
const Io = std.Io;
-const fs = std.fs;
+const Dir = std.Io.Dir;
const mem = std.mem;
const process = std.process;
const assert = std.debug.assert;
-const tmpDir = std.testing.tmpDir;
const fatal = std.process.fatal;
const info = std.log.info;
@@ -86,19 +85,19 @@ pub fn main() anyerror!void {
} else try argv.append(arg);
}
- var threaded: Io.Threaded = .init(gpa);
+ var threaded: Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
const sysroot_path = sysroot orelse blk: {
const target = try std.zig.system.resolveTargetQuery(io, .{});
- break :blk std.zig.system.darwin.getSdk(allocator, &target) orelse
+ break :blk std.zig.system.darwin.getSdk(allocator, io, &target) orelse
fatal("no SDK found; you can provide one explicitly with '--sysroot' flag", .{});
};
- var sdk_dir = try std.fs.cwd().openDir(sysroot_path, .{});
- defer sdk_dir.close();
- const sdk_info = try sdk_dir.readFileAlloc("SDKSettings.json", allocator, .limited(std.math.maxInt(u32)));
+ var sdk_dir = try Dir.cwd().openDir(io, sysroot_path, .{});
+ defer sdk_dir.close(io);
+ const sdk_info = try sdk_dir.readFileAlloc(io, "SDKSettings.json", allocator, .limited(std.math.maxInt(u32)));
const parsed_json = try std.json.parseFromSlice(struct {
DefaultProperties: struct { MACOSX_DEPLOYMENT_TARGET: []const u8 },
@@ -111,15 +110,14 @@ pub fn main() anyerror!void {
const os_ver: OsVer = @enumFromInt(version.major);
info("found SDK deployment target macOS {f} aka '{t}'", .{ version, os_ver });
- var tmp = tmpDir(.{});
- defer tmp.cleanup();
+ const tmp_dir: Io.Dir = .cwd();
for (&[_]Arch{ .aarch64, .x86_64 }) |arch| {
const target: Target = .{
.arch = arch,
.os_ver = os_ver,
};
- try fetchTarget(allocator, io, argv.items, sysroot_path, target, version, tmp);
+ try fetchTarget(allocator, io, argv.items, sysroot_path, target, version, tmp_dir);
}
}
@@ -130,13 +128,13 @@ fn fetchTarget(
sysroot: []const u8,
target: Target,
ver: Version,
- tmp: std.testing.TmpDir,
+ tmp_dir: Io.Dir,
) !void {
const tmp_filename = "macos-headers";
const headers_list_filename = "macos-headers.o.d";
- const tmp_path = try tmp.dir.realpathAlloc(arena, ".");
- const tmp_file_path = try fs.path.join(arena, &[_][]const u8{ tmp_path, tmp_filename });
- const headers_list_path = try fs.path.join(arena, &[_][]const u8{ tmp_path, headers_list_filename });
+ const tmp_path = try tmp_dir.realPathFileAlloc(io, ".", arena);
+ const tmp_file_path = try Dir.path.join(arena, &[_][]const u8{ tmp_path, tmp_filename });
+ const headers_list_path = try Dir.path.join(arena, &[_][]const u8{ tmp_path, headers_list_filename });
const macos_version = try std.fmt.allocPrint(arena, "-mmacosx-version-min={d}.{d}", .{
ver.major,
@@ -166,20 +164,17 @@ fn fetchTarget(
});
try cc_argv.appendSlice(args);
- const res = try std.process.Child.run(.{
- .allocator = arena,
- .argv = cc_argv.items,
- });
+ const res = try std.process.Child.run(arena, io, .{ .argv = cc_argv.items });
if (res.stderr.len != 0) {
std.log.err("{s}", .{res.stderr});
}
// Read in the contents of `macos-headers.o.d`
- const headers_list_file = try tmp.dir.openFile(headers_list_filename, .{});
- defer headers_list_file.close();
+ const headers_list_file = try tmp_dir.openFile(io, headers_list_filename, .{});
+ defer headers_list_file.close(io);
- var headers_dir = fs.cwd().openDir(headers_source_prefix, .{}) catch |err| switch (err) {
+ var headers_dir = Dir.cwd().openDir(io, headers_source_prefix, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
=> fatal("path '{s}' not found or not a directory. Did you accidentally delete it?", .{
@@ -187,13 +182,13 @@ fn fetchTarget(
}),
else => return err,
};
- defer headers_dir.close();
+ defer headers_dir.close(io);
const dest_path = try target.fullName(arena);
- try headers_dir.deleteTree(dest_path);
+ try headers_dir.deleteTree(io, dest_path);
- var dest_dir = try headers_dir.makeOpenPath(dest_path, .{});
- var dirs = std.StringHashMap(fs.Dir).init(arena);
+ var dest_dir = try headers_dir.createDirPathOpen(io, dest_path, .{});
+ var dirs = std.StringHashMap(Dir).init(arena);
try dirs.putNoClobber(".", dest_dir);
var headers_list_file_reader = headers_list_file.reader(io, &.{});
@@ -206,25 +201,25 @@ fn fetchTarget(
if (mem.lastIndexOf(u8, line, prefix[0..])) |idx| {
const out_rel_path = line[idx + prefix.len + 1 ..];
const out_rel_path_stripped = mem.trim(u8, out_rel_path, " \\");
- const dirname = fs.path.dirname(out_rel_path_stripped) orelse ".";
+ const dirname = Dir.path.dirname(out_rel_path_stripped) orelse ".";
const maybe_dir = try dirs.getOrPut(dirname);
if (!maybe_dir.found_existing) {
- maybe_dir.value_ptr.* = try dest_dir.makeOpenPath(dirname, .{});
+ maybe_dir.value_ptr.* = try dest_dir.createDirPathOpen(io, dirname, .{});
}
- const basename = fs.path.basename(out_rel_path_stripped);
+ const basename = Dir.path.basename(out_rel_path_stripped);
const line_stripped = mem.trim(u8, line, " \\");
- const abs_dirname = fs.path.dirname(line_stripped).?;
- var orig_subdir = try fs.cwd().openDir(abs_dirname, .{});
- defer orig_subdir.close();
+ const abs_dirname = Dir.path.dirname(line_stripped).?;
+ var orig_subdir = try Dir.cwd().openDir(io, abs_dirname, .{});
+ defer orig_subdir.close(io);
- try orig_subdir.copyFile(basename, maybe_dir.value_ptr.*, basename, .{});
+ try orig_subdir.copyFile(basename, maybe_dir.value_ptr.*, basename, io, .{});
}
}
var dir_it = dirs.iterator();
while (dir_it.next()) |entry| {
- entry.value_ptr.close();
+ entry.value_ptr.close(io);
}
}
diff --git a/tools/gen_macos_headers_c.zig b/tools/gen_macos_headers_c.zig
index fe036cf6b7..95880fe342 100644
--- a/tools/gen_macos_headers_c.zig
+++ b/tools/gen_macos_headers_c.zig
@@ -1,8 +1,9 @@
const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const assert = std.debug.assert;
const info = std.log.info;
const fatal = std.process.fatal;
-
const Allocator = std.mem.Allocator;
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
@@ -20,6 +21,10 @@ pub fn main() anyerror!void {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
+ var threaded: Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
if (args.len == 1) fatal("no command or option specified", .{});
@@ -33,10 +38,10 @@ pub fn main() anyerror!void {
if (positionals.items.len != 1) fatal("expected one positional argument: [dir]", .{});
- var dir = try std.fs.cwd().openDir(positionals.items[0], .{ .follow_symlinks = false });
- defer dir.close();
+ var dir = try Io.Dir.cwd().openDir(io, positionals.items[0], .{ .follow_symlinks = false });
+ defer dir.close(io);
var paths = std.array_list.Managed([]const u8).init(arena);
- try findHeaders(arena, dir, "", &paths);
+ try findHeaders(arena, io, dir, "", &paths);
const SortFn = struct {
pub fn lessThan(ctx: void, lhs: []const u8, rhs: []const u8) bool {
@@ -48,7 +53,7 @@ pub fn main() anyerror!void {
std.mem.sort([]const u8, paths.items, {}, SortFn.lessThan);
var buffer: [2000]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &buffer);
const w = &stdout_writer.interface;
try w.writeAll("#define _XOPEN_SOURCE\n");
for (paths.items) |path| {
@@ -64,23 +69,24 @@ pub fn main() anyerror!void {
fn findHeaders(
arena: Allocator,
- dir: std.fs.Dir,
+ io: Io,
+ dir: Dir,
prefix: []const u8,
paths: *std.array_list.Managed([]const u8),
) anyerror!void {
var it = dir.iterate();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
switch (entry.kind) {
.directory => {
- const path = try std.fs.path.join(arena, &.{ prefix, entry.name });
- var subdir = try dir.openDir(entry.name, .{ .follow_symlinks = false });
- defer subdir.close();
- try findHeaders(arena, subdir, path, paths);
+ const path = try Io.Dir.path.join(arena, &.{ prefix, entry.name });
+ var subdir = try dir.openDir(io, entry.name, .{ .follow_symlinks = false });
+ defer subdir.close(io);
+ try findHeaders(arena, io, subdir, path, paths);
},
.file, .sym_link => {
- const ext = std.fs.path.extension(entry.name);
+ const ext = Io.Dir.path.extension(entry.name);
if (!std.mem.eql(u8, ext, ".h")) continue;
- const path = try std.fs.path.join(arena, &.{ prefix, entry.name });
+ const path = try Io.Dir.path.join(arena, &.{ prefix, entry.name });
try paths.append(path);
},
else => {},
diff --git a/tools/gen_outline_atomics.zig b/tools/gen_outline_atomics.zig
index 1dade66610..4d87e531bd 100644
--- a/tools/gen_outline_atomics.zig
+++ b/tools/gen_outline_atomics.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const AtomicOp = enum {
@@ -15,10 +16,14 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
+ var threaded: std.Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
//const args = try std.process.argsAlloc(arena);
var stdout_buffer: [2000]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const w = &stdout_writer.interface;
try w.writeAll(
diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig
index c211d1022e..e96c98b6c0 100644
--- a/tools/gen_spirv_spec.zig
+++ b/tools/gen_spirv_spec.zig
@@ -1,5 +1,7 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
+
const g = @import("spirv/grammar.zig");
const CoreRegistry = g.CoreRegistry;
const ExtensionRegistry = g.ExtensionRegistry;
@@ -63,24 +65,28 @@ pub fn main() !void {
usageAndExit(args[0], 1);
}
- const json_path = try std.fs.path.join(allocator, &.{ args[1], "include/spirv/unified1/" });
- const dir = try std.fs.cwd().openDir(json_path, .{ .iterate = true });
+ var threaded: std.Io.Threaded = .init(allocator, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
+ const json_path = try Io.Dir.path.join(allocator, &.{ args[1], "include/spirv/unified1/" });
+ const dir = try Io.Dir.cwd().openDir(io, json_path, .{ .iterate = true });
- const core_spec = try readRegistry(CoreRegistry, dir, "spirv.core.grammar.json");
+ const core_spec = try readRegistry(io, CoreRegistry, dir, "spirv.core.grammar.json");
std.mem.sortUnstable(Instruction, core_spec.instructions, CmpInst{}, CmpInst.lt);
var exts = std.array_list.Managed(Extension).init(allocator);
var it = dir.iterate();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
if (entry.kind != .file) {
continue;
}
- try readExtRegistry(&exts, dir, entry.name);
+ try readExtRegistry(io, &exts, dir, entry.name);
}
- try readExtRegistry(&exts, std.fs.cwd(), args[2]);
+ try readExtRegistry(io, &exts, Io.Dir.cwd(), args[2]);
var allocating: std.Io.Writer.Allocating = .init(allocator);
defer allocating.deinit();
@@ -91,7 +97,7 @@ pub fn main() !void {
var tree = try std.zig.Ast.parse(allocator, output, .zig);
if (tree.errors.len != 0) {
- try std.zig.printAstErrorsToStderr(allocator, tree, "", .auto);
+ try std.zig.printAstErrorsToStderr(allocator, io, tree, "", .auto);
return;
}
@@ -103,22 +109,22 @@ pub fn main() !void {
try wip_errors.addZirErrorMessages(zir, tree, output, "");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(allocator);
- error_bundle.renderToStdErr(.{}, .auto);
+ try error_bundle.renderToStderr(io, .{}, .auto);
}
const formatted_output = try tree.renderAlloc(allocator);
- _ = try std.fs.File.stdout().write(formatted_output);
+ try Io.File.stdout().writeStreamingAll(io, formatted_output);
}
-fn readExtRegistry(exts: *std.array_list.Managed(Extension), dir: std.fs.Dir, sub_path: []const u8) !void {
- const filename = std.fs.path.basename(sub_path);
+fn readExtRegistry(io: Io, exts: *std.array_list.Managed(Extension), dir: Io.Dir, sub_path: []const u8) !void {
+ const filename = Io.Dir.path.basename(sub_path);
if (!std.mem.startsWith(u8, filename, "extinst.")) {
return;
}
std.debug.assert(std.mem.endsWith(u8, filename, ".grammar.json"));
const name = filename["extinst.".len .. filename.len - ".grammar.json".len];
- const spec = try readRegistry(ExtensionRegistry, dir, sub_path);
+ const spec = try readRegistry(io, ExtensionRegistry, dir, sub_path);
const set_name = set_names.get(name) orelse {
std.log.info("ignored instruction set '{s}'", .{name});
@@ -134,8 +140,8 @@ fn readExtRegistry(exts: *std.array_list.Managed(Extension), dir: std.fs.Dir, su
});
}
-fn readRegistry(comptime RegistryType: type, dir: std.fs.Dir, path: []const u8) !RegistryType {
- const spec = try dir.readFileAlloc(path, allocator, .unlimited);
+fn readRegistry(io: Io, comptime RegistryType: type, dir: Io.Dir, path: []const u8) !RegistryType {
+ const spec = try dir.readFileAlloc(io, path, allocator, .unlimited);
// Required for json parsing.
// TODO: ALI
@setEvalBranchQuota(10000);
@@ -930,8 +936,9 @@ fn parseHexInt(text: []const u8) !u31 {
}
fn usageAndExit(arg0: []const u8, code: u8) noreturn {
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- stderr.print(
+ const stderr = std.debug.lockStderr(&.{});
+ const w = &stderr.file_writer.interface;
+ w.print(
\\Usage: {s} <SPIRV-Headers repository path> <path/to/zig/src/codegen/spirv/extinst.zig.grammar.json>
\\
\\Generates Zig bindings for SPIR-V specifications found in the SPIRV-Headers
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index 51f66d3080..51e83e1a08 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -55,12 +55,14 @@
// - e.g. find a common previous symbol and put it after that one
// - they definitely need to go into the correct section
+const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
const std = @import("std");
-const builtin = std.builtin;
+const Io = std.Io;
const mem = std.mem;
const log = std.log;
const elf = std.elf;
-const native_endian = @import("builtin").cpu.arch.endian();
const Arch = enum {
aarch64,
@@ -284,10 +286,14 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
+ var threaded: std.Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
const build_all_path = args[1];
- var build_all_dir = try std.fs.cwd().openDir(build_all_path, .{});
+ var build_all_dir = try Io.Dir.cwd().openDir(io, build_all_path, .{});
var sym_table = std.StringArrayHashMap(MultiSym).init(arena);
var sections = std.StringArrayHashMap(void).init(arena);
@@ -299,6 +305,7 @@ pub fn main() !void {
// Read the ELF header.
const elf_bytes = build_all_dir.readFileAllocOptions(
+ io,
libc_so_path,
arena,
.limited(100 * 1024 * 1024),
@@ -334,7 +341,7 @@ pub fn main() !void {
}
var stdout_buffer: [2000]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
try stdout.writeAll(
\\#ifdef PTR64
@@ -539,7 +546,7 @@ pub fn main() !void {
try stdout.flush();
}
-fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) !void {
+fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: std.builtin.Endian) !void {
const arena = parse.arena;
const elf_bytes = parse.elf_bytes;
const header = parse.header;
diff --git a/tools/generate_JSONTestSuite.zig b/tools/generate_JSONTestSuite.zig
index 2c6fee5bdd..e445a1badf 100644
--- a/tools/generate_JSONTestSuite.zig
+++ b/tools/generate_JSONTestSuite.zig
@@ -1,13 +1,18 @@
// zig run this file inside the test_parsing/ directory of this repo: https://github.com/nst/JSONTestSuite
const std = @import("std");
+const Io = std.Io;
pub fn main() !void {
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
var allocator = gpa.allocator();
+ var threaded: std.Io.Threaded = .init(allocator, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
var stdout_buffer: [2000]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const output = &stdout_writer.interface;
try output.writeAll(
\\// This file was generated by _generate_JSONTestSuite.zig
@@ -20,9 +25,9 @@ pub fn main() !void {
);
var names = std.array_list.Managed([]const u8).init(allocator);
- var cwd = try std.fs.cwd().openDir(".", .{ .iterate = true });
+ var cwd = try Io.Dir.cwd().openDir(io, ".", .{ .iterate = true });
var it = cwd.iterate();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
try names.append(try allocator.dupe(u8, entry.name));
}
std.mem.sort([]const u8, names.items, {}, (struct {
@@ -32,7 +37,7 @@ pub fn main() !void {
}).lessThan);
for (names.items) |name| {
- const contents = try std.fs.cwd().readFileAlloc(name, allocator, .limited(250001));
+ const contents = try Io.Dir.cwd().readFileAlloc(io, name, allocator, .limited(250001));
try output.writeAll("test ");
try writeString(output, name);
try output.writeAll(" {\n try ");
diff --git a/tools/generate_c_size_and_align_checks.zig b/tools/generate_c_size_and_align_checks.zig
index 3663756533..833fa50f5c 100644
--- a/tools/generate_c_size_and_align_checks.zig
+++ b/tools/generate_c_size_and_align_checks.zig
@@ -7,6 +7,7 @@
//! target.
const std = @import("std");
+const Io = std.Io;
fn cName(ty: std.Target.CType) []const u8 {
return switch (ty) {
@@ -39,7 +40,7 @@ pub fn main() !void {
std.process.exit(1);
}
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -47,7 +48,7 @@ pub fn main() !void {
const target = try std.zig.system.resolveTargetQuery(io, query);
var buffer: [2000]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &buffer);
const w = &stdout_writer.interface;
inline for (@typeInfo(std.Target.CType).@"enum".fields) |field| {
const c_type: std.Target.CType = @enumFromInt(field.value);
diff --git a/tools/generate_linux_syscalls.zig b/tools/generate_linux_syscalls.zig
index e009715e2f..fd632a6c97 100644
--- a/tools/generate_linux_syscalls.zig
+++ b/tools/generate_linux_syscalls.zig
@@ -175,21 +175,25 @@ pub fn main() !void {
defer arena.deinit();
const gpa = arena.allocator();
+ var threaded: Io.Threaded = .init(gpa, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(gpa);
if (args.len < 2 or mem.eql(u8, args[1], "--help")) {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
+ const stderr = std.debug.lockStderr(&.{});
+ const w = &stderr.file_writer.interface;
usage(w, args[0]) catch std.process.exit(2);
std.process.exit(1);
}
const linux_path = args[1];
var stdout_buffer: [2048]u8 = undefined;
- var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
- var linux_dir = try std.fs.cwd().openDir(linux_path, .{});
- defer linux_dir.close();
+ var linux_dir = try Io.Dir.cwd().openDir(io, linux_path, .{});
+ defer linux_dir.close(io);
// As of 6.11, the largest table is 24195 bytes.
// 32k should be enough for now.
@@ -198,7 +202,7 @@ pub fn main() !void {
// Fetch the kernel version from the Makefile variables.
const version = blk: {
- const head = try linux_dir.readFile("Makefile", buf[0..128]);
+ const head = try linux_dir.readFile(io, "Makefile", buf[0..128]);
var lines = mem.tokenizeScalar(u8, head, '\n');
_ = lines.next(); // Skip SPDX identifier
@@ -221,7 +225,7 @@ pub fn main() !void {
, .{version});
for (architectures, 0..) |arch, i| {
- const table = try linux_dir.readFile(switch (arch.table) {
+ const table = try linux_dir.readFile(io, switch (arch.table) {
.generic => "scripts/syscall.tbl",
.specific => |f| f,
}, buf);
diff --git a/tools/incr-check.zig b/tools/incr-check.zig
index 5023fb85dd..7bc956201a 100644
--- a/tools/incr-check.zig
+++ b/tools/incr-check.zig
@@ -1,9 +1,10 @@
const std = @import("std");
const Io = std.Io;
+const Dir = std.Io.Dir;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
-const usage = "usage: incr-check <zig binary path> <input file> [--zig-lib-dir lib] [--debug-zcu] [--debug-dwarf] [--debug-link] [--preserve-tmp] [--zig-cc-binary /path/to/zig]";
+const usage = "usage: incr-check <zig binary path> <input file> [--zig-lib-dir lib] [--debug-log foo] [--preserve-tmp] [--zig-cc-binary /path/to/zig]";
pub fn main() !void {
const fatal = std.process.fatal;
@@ -14,7 +15,7 @@ pub fn main() !void {
const gpa = arena;
- var threaded: Io.Threaded = .init(gpa);
+ var threaded: Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -22,27 +23,37 @@ pub fn main() !void {
var opt_input_file_name: ?[]const u8 = null;
var opt_lib_dir: ?[]const u8 = null;
var opt_cc_zig: ?[]const u8 = null;
- var debug_zcu = false;
- var debug_dwarf = false;
- var debug_link = false;
var preserve_tmp = false;
+ var enable_qemu: bool = false;
+ var enable_wine: bool = false;
+ var enable_wasmtime: bool = false;
+ var enable_darling: bool = false;
+
+ var debug_log_args: std.ArrayList([]const u8) = .empty;
var arg_it = try std.process.argsWithAllocator(arena);
_ = arg_it.skip();
while (arg_it.next()) |arg| {
if (arg.len > 0 and arg[0] == '-') {
if (std.mem.eql(u8, arg, "--zig-lib-dir")) {
- opt_lib_dir = arg_it.next() orelse fatal("expected arg after '--zig-lib-dir'\n{s}", .{usage});
- } else if (std.mem.eql(u8, arg, "--debug-zcu")) {
- debug_zcu = true;
- } else if (std.mem.eql(u8, arg, "--debug-dwarf")) {
- debug_dwarf = true;
- } else if (std.mem.eql(u8, arg, "--debug-link")) {
- debug_link = true;
+ opt_lib_dir = arg_it.next() orelse fatal("expected arg after --zig-lib-dir\n{s}", .{usage});
+ } else if (std.mem.eql(u8, arg, "--debug-log")) {
+ try debug_log_args.append(
+ arena,
+ arg_it.next() orelse fatal("expected arg after --debug-log\n{s}", .{usage}),
+ );
} else if (std.mem.eql(u8, arg, "--preserve-tmp")) {
preserve_tmp = true;
+ } else if (std.mem.eql(u8, arg, "-fqemu")) {
+ enable_qemu = true;
+ } else if (std.mem.eql(u8, arg, "-fwine")) {
+ enable_wine = true;
+ } else if (std.mem.eql(u8, arg, "-fwasmtime")) {
+ enable_wasmtime = true;
+ } else if (std.mem.eql(u8, arg, "-fdarling")) {
+ enable_darling = true;
} else if (std.mem.eql(u8, arg, "--zig-cc-binary")) {
- opt_cc_zig = arg_it.next() orelse fatal("expect arg after '--zig-cc-binary'\n{s}", .{usage});
+ opt_cc_zig = arg_it.next() orelse fatal("expected arg after --zig-cc-binary\n{s}", .{usage});
} else {
fatal("unknown option '{s}'\n{s}", .{ arg, usage });
}
@@ -59,7 +70,7 @@ pub fn main() !void {
const zig_exe = opt_zig_exe orelse fatal("missing path to zig\n{s}", .{usage});
const input_file_name = opt_input_file_name orelse fatal("missing input file\n{s}", .{usage});
- const input_file_bytes = try std.fs.cwd().readFileAlloc(input_file_name, arena, .limited(std.math.maxInt(u32)));
+ const input_file_bytes = try Dir.cwd().readFileAlloc(io, input_file_name, arena, .limited(std.math.maxInt(u32)));
const case = try Case.parse(arena, io, input_file_bytes);
// Check now: if there are any targets using the `cbe` backend, we need the lib dir.
@@ -71,31 +82,31 @@ pub fn main() !void {
}
}
- const prog_node = std.Progress.start(.{});
+ const prog_node = std.Progress.start(io, .{});
defer prog_node.end();
const rand_int = std.crypto.random.int(u64);
const tmp_dir_path = "tmp_" ++ std.fmt.hex(rand_int);
- var tmp_dir = try std.fs.cwd().makeOpenPath(tmp_dir_path, .{});
+ var tmp_dir = try Dir.cwd().createDirPathOpen(io, tmp_dir_path, .{});
defer {
- tmp_dir.close();
+ tmp_dir.close(io);
if (!preserve_tmp) {
- std.fs.cwd().deleteTree(tmp_dir_path) catch |err| {
- std.log.warn("failed to delete tree '{s}': {s}", .{ tmp_dir_path, @errorName(err) });
+ Dir.cwd().deleteTree(io, tmp_dir_path) catch |err| {
+ std.log.warn("failed to delete tree '{s}': {t}", .{ tmp_dir_path, err });
};
}
}
// Convert paths to be relative to the cwd of the subprocess.
- const resolved_zig_exe = try std.fs.path.relative(arena, tmp_dir_path, zig_exe);
+ const resolved_zig_exe = try Dir.path.relative(arena, tmp_dir_path, zig_exe);
const opt_resolved_lib_dir = if (opt_lib_dir) |lib_dir|
- try std.fs.path.relative(arena, tmp_dir_path, lib_dir)
+ try Dir.path.relative(arena, tmp_dir_path, lib_dir)
else
null;
const host = try std.zig.system.resolveTargetQuery(io, .{});
- const debug_log_verbose = debug_zcu or debug_dwarf or debug_link;
+ const debug_log_verbose = debug_log_args.items.len != 0;
for (case.targets) |target| {
const target_prog_node = node: {
@@ -133,14 +144,8 @@ pub fn main() !void {
.llvm => try child_args.appendSlice(arena, &.{ "-fllvm", "-flld" }),
.cbe => try child_args.appendSlice(arena, &.{ "-ofmt=c", "-lc" }),
}
- if (debug_zcu) {
- try child_args.appendSlice(arena, &.{ "--debug-log", "zcu" });
- }
- if (debug_dwarf) {
- try child_args.appendSlice(arena, &.{ "--debug-log", "dwarf" });
- }
- if (debug_link) {
- try child_args.appendSlice(arena, &.{ "--debug-log", "link", "--debug-log", "link_state", "--debug-log", "link_relocs" });
+ for (debug_log_args.items) |arg| {
+ try child_args.appendSlice(arena, &.{ "--debug-log", arg });
}
for (case.modules) |mod| {
try child_args.appendSlice(arena, &.{ "--dep", mod.name });
@@ -164,7 +169,7 @@ pub fn main() !void {
var cc_child_args: std.ArrayList([]const u8) = .empty;
if (target.backend == .cbe) {
const resolved_cc_zig_exe = if (opt_cc_zig) |cc_zig_exe|
- try std.fs.path.relative(arena, tmp_dir_path, cc_zig_exe)
+ try Dir.path.relative(arena, tmp_dir_path, cc_zig_exe)
else
resolved_zig_exe;
@@ -185,6 +190,7 @@ pub fn main() !void {
var eval: Eval = .{
.arena = arena,
+ .io = io,
.case = case,
.host = host,
.target = target,
@@ -194,11 +200,15 @@ pub fn main() !void {
.allow_stderr = debug_log_verbose,
.preserve_tmp_on_fatal = preserve_tmp,
.cc_child_args = &cc_child_args,
+ .enable_qemu = enable_qemu,
+ .enable_wine = enable_wine,
+ .enable_wasmtime = enable_wasmtime,
+ .enable_darling = enable_darling,
};
- try child.spawn();
+ try child.spawn(io);
errdefer {
- _ = child.kill() catch {};
+ _ = child.kill(io) catch {};
}
var poller = Io.poll(arena, Eval.StreamEnum, .{
@@ -228,10 +238,11 @@ pub fn main() !void {
const Eval = struct {
arena: Allocator,
+ io: Io,
host: std.Target,
case: Case,
target: Case.Target,
- tmp_dir: std.fs.Dir,
+ tmp_dir: Dir,
tmp_dir_path: []const u8,
child: *std.process.Child,
allow_stderr: bool,
@@ -240,22 +251,28 @@ const Eval = struct {
/// The arguments `out.c in.c` must be appended before spawning the subprocess.
cc_child_args: *std.ArrayList([]const u8),
+ enable_qemu: bool,
+ enable_wine: bool,
+ enable_wasmtime: bool,
+ enable_darling: bool,
+
const StreamEnum = enum { stdout, stderr };
const Poller = Io.Poller(StreamEnum);
/// Currently this function assumes the previous updates have already been written.
fn write(eval: *Eval, update: Case.Update) void {
+ const io = eval.io;
for (update.changes) |full_contents| {
- eval.tmp_dir.writeFile(.{
+ eval.tmp_dir.writeFile(io, .{
.sub_path = full_contents.name,
.data = full_contents.bytes,
}) catch |err| {
- eval.fatal("failed to update '{s}': {s}", .{ full_contents.name, @errorName(err) });
+ eval.fatal("failed to update '{s}': {t}", .{ full_contents.name, err });
};
}
for (update.deletes) |doomed_name| {
- eval.tmp_dir.deleteFile(doomed_name) catch |err| {
- eval.fatal("failed to delete '{s}': {s}", .{ doomed_name, @errorName(err) });
+ eval.tmp_dir.deleteFile(io, doomed_name) catch |err| {
+ eval.fatal("failed to delete '{s}': {t}", .{ doomed_name, err });
};
}
}
@@ -307,14 +324,14 @@ const Eval = struct {
}
const digest = r.takeArray(Cache.bin_digest_len) catch unreachable;
- const result_dir = ".local-cache" ++ std.fs.path.sep_str ++ "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*);
+ const result_dir = ".local-cache" ++ Dir.path.sep_str ++ "o" ++ Dir.path.sep_str ++ Cache.binToHex(digest.*);
const bin_name = try std.zig.EmitArtifact.bin.cacheName(arena, .{
.root_name = "root", // corresponds to the module name "root"
.target = &eval.target.resolved,
.output_mode = .Exe,
});
- const bin_path = try std.fs.path.join(arena, &.{ result_dir, bin_name });
+ const bin_path = try Dir.path.join(arena, &.{ result_dir, bin_name });
try eval.checkSuccessOutcome(update, bin_path, prog_node);
// This message indicates the end of the update.
@@ -338,11 +355,12 @@ const Eval = struct {
}
fn checkErrorOutcome(eval: *Eval, update: Case.Update, error_bundle: std.zig.ErrorBundle) !void {
+ const io = eval.io;
const expected = switch (update.outcome) {
.unknown => return,
.compile_errors => |ce| ce,
.stdout, .exit_code => {
- error_bundle.renderToStdErr(.{}, .auto);
+ try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': unexpected compile errors", .{update.name});
},
};
@@ -351,7 +369,7 @@ const Eval = struct {
for (error_bundle.getMessages()) |err_idx| {
if (expected_idx == expected.errors.len) {
- error_bundle.renderToStdErr(.{}, .auto);
+ try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': more errors than expected", .{update.name});
}
try eval.checkOneError(update, error_bundle, expected.errors[expected_idx], false, err_idx);
@@ -359,7 +377,7 @@ const Eval = struct {
for (error_bundle.getNotes(err_idx)) |note_idx| {
if (expected_idx == expected.errors.len) {
- error_bundle.renderToStdErr(.{}, .auto);
+ try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': more error notes than expected", .{update.name});
}
try eval.checkOneError(update, error_bundle, expected.errors[expected_idx], true, note_idx);
@@ -368,7 +386,7 @@ const Eval = struct {
}
if (!std.mem.eql(u8, error_bundle.getCompileLogOutput(), expected.compile_log_output)) {
- error_bundle.renderToStdErr(.{}, .auto);
+ try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': unexpected compile log output", .{update.name});
}
}
@@ -388,6 +406,8 @@ const Eval = struct {
const src = eb.getSourceLocation(err.src_loc);
const raw_filename = eb.nullTerminatedString(src.src_path);
+ const io = eval.io;
+
// We need to replace backslashes for consistency between platforms.
const filename = name: {
if (std.mem.indexOfScalar(u8, raw_filename, '\\') == null) break :name raw_filename;
@@ -402,7 +422,7 @@ const Eval = struct {
expected.column != src.column + 1 or
!std.mem.eql(u8, expected.msg, msg))
{
- eb.renderToStdErr(.{}, .auto);
+ eb.renderToStderr(io, .{}, .auto) catch {};
eval.fatal("update '{s}': compile error did not match expected error", .{update.name});
}
}
@@ -429,8 +449,11 @@ const Eval = struct {
},
};
+ const io = eval.io;
+
var argv_buf: [2][]const u8 = undefined;
- const argv: []const []const u8, const is_foreign: bool = switch (std.zig.system.getExternalExecutor(
+ const argv: []const []const u8, const is_foreign: bool = sw: switch (std.zig.system.getExternalExecutor(
+ io,
&eval.host,
&eval.target.resolved,
.{ .link_libc = eval.target.backend == .cbe },
@@ -449,18 +472,48 @@ const Eval = struct {
argv_buf[0] = binary_path;
break :argv .{ argv_buf[0..1], false };
},
- .qemu, .wine, .wasmtime, .darling => |executor_cmd| argv: {
- argv_buf[0] = executor_cmd;
- argv_buf[1] = binary_path;
- break :argv .{ argv_buf[0..2], true };
+ .qemu => |executor_cmd| argv: {
+ if (eval.enable_qemu) {
+ argv_buf[0] = executor_cmd;
+ argv_buf[1] = binary_path;
+ break :argv .{ argv_buf[0..2], true };
+ } else {
+ continue :sw .bad_os_or_cpu;
+ }
+ },
+ .wine => |executor_cmd| argv: {
+ if (eval.enable_wine) {
+ argv_buf[0] = executor_cmd;
+ argv_buf[1] = binary_path;
+ break :argv .{ argv_buf[0..2], true };
+ } else {
+ continue :sw .bad_os_or_cpu;
+ }
+ },
+ .wasmtime => |executor_cmd| argv: {
+ if (eval.enable_wasmtime) {
+ argv_buf[0] = executor_cmd;
+ argv_buf[1] = binary_path;
+ break :argv .{ argv_buf[0..2], true };
+ } else {
+ continue :sw .bad_os_or_cpu;
+ }
+ },
+ .darling => |executor_cmd| argv: {
+ if (eval.enable_darling) {
+ argv_buf[0] = executor_cmd;
+ argv_buf[1] = binary_path;
+ break :argv .{ argv_buf[0..2], true };
+ } else {
+ continue :sw .bad_os_or_cpu;
+ }
},
};
const run_prog_node = prog_node.start("run generated executable", 0);
defer run_prog_node.end();
- const result = std.process.Child.run(.{
- .allocator = eval.arena,
+ const result = std.process.Child.run(eval.arena, io, .{
.argv = argv,
.cwd_dir = eval.tmp_dir,
.cwd = eval.tmp_dir_path,
@@ -468,17 +521,17 @@ const Eval = struct {
if (is_foreign) {
// Chances are the foreign executor isn't available. Skip this evaluation.
if (eval.allow_stderr) {
- std.log.warn("update '{s}': skipping execution of '{s}' via executor for foreign target '{s}': {s}", .{
+ std.log.warn("update '{s}': skipping execution of '{s}' via executor for foreign target '{s}': {t}", .{
update.name,
binary_path,
try eval.target.resolved.zigTriple(eval.arena),
- @errorName(err),
+ err,
});
}
return;
}
- eval.fatal("update '{s}': failed to run the generated executable '{s}': {s}", .{
- update.name, binary_path, @errorName(err),
+ eval.fatal("update '{s}': failed to run the generated executable '{s}': {t}", .{
+ update.name, binary_path, err,
});
};
@@ -514,11 +567,12 @@ const Eval = struct {
}
fn requestUpdate(eval: *Eval) !void {
+ const io = eval.io;
const header: std.zig.Client.Message.Header = .{
.tag = .update,
.bytes_len = 0,
};
- var w = eval.child.stdin.?.writer(&.{});
+ var w = eval.child.stdin.?.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => return w.err.?,
};
@@ -552,16 +606,13 @@ const Eval = struct {
try eval.cc_child_args.appendSlice(eval.arena, &.{ out_path, c_path });
defer eval.cc_child_args.items.len -= 2;
- const result = std.process.Child.run(.{
- .allocator = eval.arena,
+ const result = std.process.Child.run(eval.arena, eval.io, .{
.argv = eval.cc_child_args.items,
.cwd_dir = eval.tmp_dir,
.cwd = eval.tmp_dir_path,
.progress_node = child_prog_node,
}) catch |err| {
- eval.fatal("update '{s}': failed to spawn zig cc for '{s}': {s}", .{
- update.name, c_path, @errorName(err),
- });
+ eval.fatal("update '{s}': failed to spawn zig cc for '{s}': {t}", .{ update.name, c_path, err });
};
switch (result.term) {
.Exited => |code| if (code != 0) {
@@ -588,12 +639,13 @@ const Eval = struct {
}
fn fatal(eval: *Eval, comptime fmt: []const u8, args: anytype) noreturn {
- eval.tmp_dir.close();
+ const io = eval.io;
+ eval.tmp_dir.close(io);
if (!eval.preserve_tmp_on_fatal) {
// Kill the child since it holds an open handle to its CWD which is the tmp dir path
- _ = eval.child.kill() catch {};
- std.fs.cwd().deleteTree(eval.tmp_dir_path) catch |err| {
- std.log.warn("failed to delete tree '{s}': {s}", .{ eval.tmp_dir_path, @errorName(err) });
+ _ = eval.child.kill(io) catch {};
+ Dir.cwd().deleteTree(io, eval.tmp_dir_path) catch |err| {
+ std.log.warn("failed to delete tree '{s}': {t}", .{ eval.tmp_dir_path, err });
};
}
std.process.fatal(fmt, args);
@@ -759,7 +811,7 @@ const Case = struct {
if (last_update.outcome != .unknown) fatal("line {d}: conflicting expect directive", .{line_n});
last_update.outcome = .{
.stdout = std.zig.string_literal.parseAlloc(arena, val) catch |err| {
- fatal("line {d}: bad string literal: {s}", .{ line_n, @errorName(err) });
+ fatal("line {d}: bad string literal: {t}", .{ line_n, err });
},
};
} else if (std.mem.eql(u8, key, "expect_error")) {
@@ -833,27 +885,29 @@ const Case = struct {
fn requestExit(child: *std.process.Child, eval: *Eval) void {
if (child.stdin == null) return;
+ const io = eval.io;
const header: std.zig.Client.Message.Header = .{
.tag = .exit,
.bytes_len = 0,
};
- var w = eval.child.stdin.?.writer(&.{});
+ var w = eval.child.stdin.?.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => switch (w.err.?) {
error.BrokenPipe => {},
- else => |e| eval.fatal("failed to send exit: {s}", .{@errorName(e)}),
+ else => |e| eval.fatal("failed to send exit: {t}", .{e}),
},
};
// Send EOF to stdin.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
}
fn waitChild(child: *std.process.Child, eval: *Eval) void {
+ const io = eval.io;
requestExit(child, eval);
- const term = child.wait() catch |err| eval.fatal("child process failed: {s}", .{@errorName(err)});
+ const term = child.wait(io) catch |err| eval.fatal("child process failed: {t}", .{err});
switch (term) {
.Exited => |code| if (code != 0) eval.fatal("compiler failed with code {d}", .{code}),
.Signal, .Stopped, .Unknown => eval.fatal("compiler terminated unexpectedly", .{}),
diff --git a/tools/migrate_langref.zig b/tools/migrate_langref.zig
index 3544cee175..24ddd5941d 100644
--- a/tools/migrate_langref.zig
+++ b/tools/migrate_langref.zig
@@ -1,13 +1,16 @@
-const std = @import("std");
const builtin = @import("builtin");
-const fs = std.fs;
+
+const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const print = std.debug.print;
const mem = std.mem;
const testing = std.testing;
const Allocator = std.mem.Allocator;
-const max_doc_file_size = 10 * 1024 * 1024;
const fatal = std.process.fatal;
+const max_doc_file_size = 10 * 1024 * 1024;
+
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
@@ -19,27 +22,27 @@ pub fn main() !void {
const input_file = args[1];
const output_file = args[2];
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
- var in_file = try fs.cwd().openFile(input_file, .{ .mode = .read_only });
- defer in_file.close();
+ var in_file = try Dir.cwd().openFile(io, input_file, .{ .mode = .read_only });
+ defer in_file.close(io);
- var out_file = try fs.cwd().createFile(output_file, .{});
- defer out_file.close();
+ var out_file = try Dir.cwd().createFile(io, output_file, .{});
+ defer out_file.close(io);
var out_file_buffer: [4096]u8 = undefined;
- var out_file_writer = out_file.writer(&out_file_buffer);
+ var out_file_writer = out_file.writer(io, &out_file_buffer);
- var out_dir = try fs.cwd().openDir(fs.path.dirname(output_file).?, .{});
- defer out_dir.close();
+ var out_dir = try Dir.cwd().openDir(io, Dir.path.dirname(output_file).?, .{});
+ defer out_dir.close(io);
var in_file_reader = in_file.reader(io, &.{});
const input_file_bytes = try in_file_reader.interface.allocRemaining(arena, .unlimited);
var tokenizer = Tokenizer.init(input_file, input_file_bytes);
- try walk(arena, &tokenizer, out_dir, &out_file_writer.interface);
+ try walk(arena, io, &tokenizer, out_dir, &out_file_writer.interface);
try out_file_writer.end();
}
@@ -266,7 +269,7 @@ const Code = struct {
};
};
-fn walk(arena: Allocator, tokenizer: *Tokenizer, out_dir: std.fs.Dir, w: anytype) !void {
+fn walk(arena: Allocator, io: Io, tokenizer: *Tokenizer, out_dir: Dir, w: anytype) !void {
while (true) {
const token = tokenizer.next();
switch (token.id) {
@@ -384,12 +387,12 @@ fn walk(arena: Allocator, tokenizer: *Tokenizer, out_dir: std.fs.Dir, w: anytype
const basename = try std.fmt.allocPrint(arena, "{s}.zig", .{name});
- var file = out_dir.createFile(basename, .{ .exclusive = true }) catch |err| {
+ var file = out_dir.createFile(io, basename, .{ .exclusive = true }) catch |err| {
fatal("unable to create file '{s}': {s}", .{ name, @errorName(err) });
};
- defer file.close();
+ defer file.close(io);
var file_buffer: [1024]u8 = undefined;
- var file_writer = file.writer(&file_buffer);
+ var file_writer = file.writer(io, &file_buffer);
const code = &file_writer.interface;
const source = tokenizer.buffer[source_token.start..source_token.end];
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index c8bfa1ac4b..cbd94c6292 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -12,6 +12,8 @@
//! You'll then have to manually update Zig source repo with these new files.
const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const Arch = std.Target.Cpu.Arch;
const Abi = std.Target.Abi;
const OsTag = std.Target.Os.Tag;
@@ -128,6 +130,11 @@ const LibCVendor = enum {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = arena.allocator();
+
+ var threaded: Io.Threaded = .init(allocator, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(allocator);
var search_paths = std.array_list.Managed([]const u8).init(allocator);
var opt_out_dir: ?[]const u8 = null;
@@ -232,28 +239,28 @@ pub fn main() !void {
=> &[_][]const u8{ search_path, libc_dir, "usr", "include" },
.musl => &[_][]const u8{ search_path, libc_dir, "usr", "local", "musl", "include" },
};
- const target_include_dir = try std.fs.path.join(allocator, sub_path);
+ const target_include_dir = try Dir.path.join(allocator, sub_path);
var dir_stack = std.array_list.Managed([]const u8).init(allocator);
try dir_stack.append(target_include_dir);
while (dir_stack.pop()) |full_dir_name| {
- var dir = std.fs.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
+ var dir = Dir.cwd().openDir(io, full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
error.FileNotFound => continue :search,
error.AccessDenied => continue :search,
else => return err,
};
- defer dir.close();
+ defer dir.close(io);
var dir_it = dir.iterate();
- while (try dir_it.next()) |entry| {
- const full_path = try std.fs.path.join(allocator, &[_][]const u8{ full_dir_name, entry.name });
+ while (try dir_it.next(io)) |entry| {
+ const full_path = try Dir.path.join(allocator, &[_][]const u8{ full_dir_name, entry.name });
switch (entry.kind) {
.directory => try dir_stack.append(full_path),
.file, .sym_link => {
- const rel_path = try std.fs.path.relative(allocator, target_include_dir, full_path);
+ const rel_path = try Dir.path.relative(allocator, target_include_dir, full_path);
const max_size = 2 * 1024 * 1024 * 1024;
- const raw_bytes = try std.fs.cwd().readFileAlloc(full_path, allocator, .limited(max_size));
+ const raw_bytes = try Dir.cwd().readFileAlloc(io, full_path, allocator, .limited(max_size));
const trimmed = std.mem.trim(u8, raw_bytes, " \r\n\t");
total_bytes += raw_bytes.len;
const hash = try allocator.alloc(u8, 32);
@@ -266,9 +273,7 @@ pub fn main() !void {
max_bytes_saved += raw_bytes.len;
gop.value_ptr.hit_count += 1;
std.debug.print("duplicate: {s} {s} ({B})\n", .{
- libc_dir,
- rel_path,
- raw_bytes.len,
+ libc_dir, rel_path, raw_bytes.len,
});
} else {
gop.value_ptr.* = Contents{
@@ -314,7 +319,7 @@ pub fn main() !void {
total_bytes,
total_bytes - max_bytes_saved,
});
- try std.fs.cwd().makePath(out_dir);
+ try Dir.cwd().createDirPath(io, out_dir);
var missed_opportunity_bytes: usize = 0;
// iterate path_table. for each path, put all the hashes into a list. sort by hit_count.
@@ -334,9 +339,9 @@ pub fn main() !void {
const best_contents = contents_list.pop().?;
if (best_contents.hit_count > 1) {
// worth it to make it generic
- const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
- try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
- try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = best_contents.bytes });
+ const full_path = try Dir.path.join(allocator, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
+ try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
+ try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = best_contents.bytes });
best_contents.is_generic = true;
while (contents_list.pop()) |contender| {
if (contender.hit_count > 1) {
@@ -355,9 +360,9 @@ pub fn main() !void {
if (contents.is_generic) continue;
const dest_target = hash_kv.key_ptr.*;
- const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, dest_target, path_kv.key_ptr.* });
- try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
- try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = contents.bytes });
+ const full_path = try Dir.path.join(allocator, &[_][]const u8{ out_dir, dest_target, path_kv.key_ptr.* });
+ try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
+ try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = contents.bytes });
}
}
}
diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig
index bf9edf0753..5091b18dbe 100644
--- a/tools/update-linux-headers.zig
+++ b/tools/update-linux-headers.zig
@@ -15,6 +15,8 @@
//! You'll then have to manually update Zig source repo with these new files.
const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const Arch = std.Target.Cpu.Arch;
const Abi = std.Target.Abi;
const assert = std.debug.assert;
@@ -142,6 +144,11 @@ const PathTable = std.StringHashMap(*TargetToHash);
pub fn main() !void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const arena = arena_state.allocator();
+
+ var threaded: Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
var search_paths = std.array_list.Managed([]const u8).init(arena);
var opt_out_dir: ?[]const u8 = null;
@@ -183,30 +190,30 @@ pub fn main() !void {
.arch = linux_target.arch,
};
search: for (search_paths.items) |search_path| {
- const target_include_dir = try std.fs.path.join(arena, &.{
+ const target_include_dir = try Dir.path.join(arena, &.{
search_path, linux_target.name, "include",
});
var dir_stack = std.array_list.Managed([]const u8).init(arena);
try dir_stack.append(target_include_dir);
while (dir_stack.pop()) |full_dir_name| {
- var dir = std.fs.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
+ var dir = Dir.cwd().openDir(io, full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
error.FileNotFound => continue :search,
error.AccessDenied => continue :search,
else => return err,
};
- defer dir.close();
+ defer dir.close(io);
var dir_it = dir.iterate();
- while (try dir_it.next()) |entry| {
- const full_path = try std.fs.path.join(arena, &[_][]const u8{ full_dir_name, entry.name });
+ while (try dir_it.next(io)) |entry| {
+ const full_path = try Dir.path.join(arena, &[_][]const u8{ full_dir_name, entry.name });
switch (entry.kind) {
.directory => try dir_stack.append(full_path),
.file => {
- const rel_path = try std.fs.path.relative(arena, target_include_dir, full_path);
+ const rel_path = try Dir.path.relative(arena, target_include_dir, full_path);
const max_size = 2 * 1024 * 1024 * 1024;
- const raw_bytes = try std.fs.cwd().readFileAlloc(full_path, arena, .limited(max_size));
+ const raw_bytes = try Dir.cwd().readFileAlloc(io, full_path, arena, .limited(max_size));
const trimmed = std.mem.trim(u8, raw_bytes, " \r\n\t");
total_bytes += raw_bytes.len;
const hash = try arena.alloc(u8, 32);
@@ -253,7 +260,7 @@ pub fn main() !void {
total_bytes,
total_bytes - max_bytes_saved,
});
- try std.fs.cwd().makePath(out_dir);
+ try Dir.cwd().createDirPath(io, out_dir);
var missed_opportunity_bytes: usize = 0;
// iterate path_table. for each path, put all the hashes into a list. sort by hit_count.
@@ -273,9 +280,9 @@ pub fn main() !void {
const best_contents = contents_list.pop().?;
if (best_contents.hit_count > 1) {
// worth it to make it generic
- const full_path = try std.fs.path.join(arena, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
- try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
- try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = best_contents.bytes });
+ const full_path = try Dir.path.join(arena, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
+ try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
+ try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = best_contents.bytes });
best_contents.is_generic = true;
while (contents_list.pop()) |contender| {
if (contender.hit_count > 1) {
@@ -299,9 +306,9 @@ pub fn main() !void {
else => @tagName(dest_target.arch),
};
const out_subpath = try std.fmt.allocPrint(arena, "{s}-linux-any", .{arch_name});
- const full_path = try std.fs.path.join(arena, &[_][]const u8{ out_dir, out_subpath, path_kv.key_ptr.* });
- try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
- try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = contents.bytes });
+ const full_path = try Dir.path.join(arena, &[_][]const u8{ out_dir, out_subpath, path_kv.key_ptr.* });
+ try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
+ try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = contents.bytes });
}
}
@@ -316,8 +323,8 @@ pub fn main() !void {
"any-linux-any/linux/netfilter_ipv6/ip6t_HL.h",
};
for (bad_files) |bad_file| {
- const full_path = try std.fs.path.join(arena, &[_][]const u8{ out_dir, bad_file });
- try std.fs.cwd().deleteFile(full_path);
+ const full_path = try Dir.path.join(arena, &[_][]const u8{ out_dir, bad_file });
+ try Dir.cwd().deleteFile(io, full_path);
}
}
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index c8b941cbfa..3c7762bab6 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -10,7 +10,7 @@
//! would mean that the next parameter specifies the target.
const std = @import("std");
-const fs = std.fs;
+const Io = std.Io;
const assert = std.debug.assert;
const json = std.json;
@@ -634,8 +634,12 @@ pub fn main() anyerror!void {
const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
+ var threaded: std.Io.Threaded = .init(allocator, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
var stdout_buffer: [4000]u8 = undefined;
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
if (args.len <= 1) printUsageAndExit(args[0]);
@@ -676,8 +680,7 @@ pub fn main() anyerror!void {
try std.fmt.allocPrint(allocator, "-I={s}/clang/include/clang/Driver", .{llvm_src_root}),
};
- const child_result = try std.process.Child.run(.{
- .allocator = allocator,
+ const child_result = try std.process.Child.run(allocator, io, .{
.argv = &child_args,
.max_output_bytes = 100 * 1024 * 1024,
});
@@ -961,8 +964,8 @@ fn objectLessThan(context: void, a: *json.ObjectMap, b: *json.ObjectMap) bool {
}
fn printUsageAndExit(arg0: []const u8) noreturn {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
+ const stderr = std.debug.lockStderr(&.{});
+ const w = &stderr.file_writer.interface;
printUsage(w, arg0) catch std.process.exit(2);
std.process.exit(1);
}
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index 2c722b8531..6880c55d05 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -1,6 +1,8 @@
-const std = @import("std");
const builtin = @import("builtin");
-const fs = std.fs;
+
+const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const mem = std.mem;
const json = std.json;
const assert = std.debug.assert;
@@ -1890,7 +1892,7 @@ pub fn main() anyerror!void {
defer arena_state.deinit();
const arena = arena_state.allocator();
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -1927,26 +1929,26 @@ pub fn main() anyerror!void {
// there shouldn't be any more argument after the optional filter
if (args.skip()) usageAndExit(args0, 1);
- var zig_src_dir = try fs.cwd().openDir(zig_src_root, .{});
- defer zig_src_dir.close();
+ var zig_src_dir = try Dir.cwd().openDir(io, zig_src_root, .{});
+ defer zig_src_dir.close(io);
- const root_progress = std.Progress.start(.{ .estimated_total_items = targets.len });
+ const root_progress = std.Progress.start(io, .{ .estimated_total_items = targets.len });
defer root_progress.end();
- var group: std.Io.Group = .init;
+ var group: Io.Group = .init;
defer group.cancel(io);
for (targets) |target| {
if (filter) |zig_name| {
if (!std.mem.eql(u8, target.zig_name, zig_name)) continue;
}
- group.async(io, processOneTarget, .{.{
+ group.async(io, processOneTarget, .{ io, .{
.llvm_tblgen_exe = llvm_tblgen_exe,
.llvm_src_root = llvm_src_root,
.zig_src_dir = zig_src_dir,
.root_progress = root_progress,
.target = target,
- }});
+ } });
}
group.wait(io);
@@ -1955,12 +1957,12 @@ pub fn main() anyerror!void {
const Job = struct {
llvm_tblgen_exe: []const u8,
llvm_src_root: []const u8,
- zig_src_dir: std.fs.Dir,
+ zig_src_dir: Dir,
root_progress: std.Progress.Node,
target: ArchTarget,
};
-fn processOneTarget(job: Job) void {
+fn processOneTarget(io: Io, job: Job) void {
errdefer |err| std.debug.panic("panic: {s}", .{@errorName(err)});
const target = job.target;
@@ -1992,8 +1994,7 @@ fn processOneTarget(job: Job) void {
}),
};
- const child_result = try std.process.Child.run(.{
- .allocator = arena,
+ const child_result = try std.process.Child.run(arena, io, .{
.argv = &child_args,
.max_output_bytes = 500 * 1024 * 1024,
});
@@ -2240,15 +2241,15 @@ fn processOneTarget(job: Job) void {
const render_progress = progress_node.start("rendering Zig code", 0);
- var target_dir = try job.zig_src_dir.openDir("lib/std/Target", .{});
- defer target_dir.close();
+ var target_dir = try job.zig_src_dir.openDir(io, "lib/std/Target", .{});
+ defer target_dir.close(io);
const zig_code_basename = try std.fmt.allocPrint(arena, "{s}.zig", .{target.zig_name});
- var zig_code_file = try target_dir.createFile(zig_code_basename, .{});
- defer zig_code_file.close();
+ var zig_code_file = try target_dir.createFile(io, zig_code_basename, .{});
+ defer zig_code_file.close(io);
var zig_code_file_buffer: [4096]u8 = undefined;
- var zig_code_file_writer = zig_code_file.writer(&zig_code_file_buffer);
+ var zig_code_file_writer = zig_code_file.writer(io, &zig_code_file_buffer);
const w = &zig_code_file_writer.interface;
try w.writeAll(
@@ -2424,8 +2425,9 @@ fn processOneTarget(job: Job) void {
}
fn usageAndExit(arg0: []const u8, code: u8) noreturn {
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- stderr.print(
+ const stderr = std.debug.lockStderr(&.{});
+ const w = &stderr.file_writer.interface;
+ w.print(
\\Usage: {s} /path/to/llvm-tblgen /path/git/llvm-project /path/git/zig [zig_name filter]
\\
\\Updates lib/std/target/<target>.zig from llvm/lib/Target/<Target>/<Target>.td .
diff --git a/tools/update_crc_catalog.zig b/tools/update_crc_catalog.zig
index 4c8614e02f..29856aacf8 100644
--- a/tools/update_crc_catalog.zig
+++ b/tools/update_crc_catalog.zig
@@ -1,5 +1,6 @@
const std = @import("std");
-const fs = std.fs;
+const Io = std.Io;
+const Dir = std.Io.Dir;
const mem = std.mem;
const ascii = std.ascii;
@@ -10,27 +11,31 @@ pub fn main() anyerror!void {
defer arena_state.deinit();
const arena = arena_state.allocator();
+ var threaded: Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
if (args.len <= 1) printUsageAndExit(args[0]);
const zig_src_root = args[1];
if (mem.startsWith(u8, zig_src_root, "-")) printUsageAndExit(args[0]);
- var zig_src_dir = try fs.cwd().openDir(zig_src_root, .{});
- defer zig_src_dir.close();
+ var zig_src_dir = try Dir.cwd().openDir(io, zig_src_root, .{});
+ defer zig_src_dir.close(io);
- const hash_sub_path = try fs.path.join(arena, &.{ "lib", "std", "hash" });
- var hash_target_dir = try zig_src_dir.makeOpenPath(hash_sub_path, .{});
- defer hash_target_dir.close();
+ const hash_sub_path = try Dir.path.join(arena, &.{ "lib", "std", "hash" });
+ var hash_target_dir = try zig_src_dir.createDirPathOpen(io, hash_sub_path, .{});
+ defer hash_target_dir.close(io);
- const crc_sub_path = try fs.path.join(arena, &.{ "lib", "std", "hash", "crc" });
- var crc_target_dir = try zig_src_dir.makeOpenPath(crc_sub_path, .{});
- defer crc_target_dir.close();
+ const crc_sub_path = try Dir.path.join(arena, &.{ "lib", "std", "hash", "crc" });
+ var crc_target_dir = try zig_src_dir.createDirPathOpen(io, crc_sub_path, .{});
+ defer crc_target_dir.close(io);
- var zig_code_file = try hash_target_dir.createFile("crc.zig", .{});
- defer zig_code_file.close();
+ var zig_code_file = try hash_target_dir.createFile(io, "crc.zig", .{});
+ defer zig_code_file.close(io);
var zig_code_file_buffer: [4096]u8 = undefined;
- var zig_code_file_writer = zig_code_file.writer(&zig_code_file_buffer);
+ var zig_code_file_writer = zig_code_file.writer(io, &zig_code_file_buffer);
const code_writer = &zig_code_file_writer.interface;
try code_writer.writeAll(
@@ -51,10 +56,10 @@ pub fn main() anyerror!void {
\\
);
- var zig_test_file = try crc_target_dir.createFile("test.zig", .{});
- defer zig_test_file.close();
+ var zig_test_file = try crc_target_dir.createFile(io, "test.zig", .{});
+ defer zig_test_file.close(io);
var zig_test_file_buffer: [4096]u8 = undefined;
- var zig_test_file_writer = zig_test_file.writer(&zig_test_file_buffer);
+ var zig_test_file_writer = zig_test_file.writer(io, &zig_test_file_buffer);
const test_writer = &zig_test_file_writer.interface;
try test_writer.writeAll(
@@ -190,8 +195,8 @@ pub fn main() anyerror!void {
}
fn printUsageAndExit(arg0: []const u8) noreturn {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
+ const stderr = std.debug.lockStderr(&.{});
+ const w = &stderr.file_writer.interface;
printUsage(w, arg0) catch std.process.exit(2);
std.process.exit(1);
}
diff --git a/tools/update_freebsd_libc.zig b/tools/update_freebsd_libc.zig
index 420147e0df..d50364351f 100644
--- a/tools/update_freebsd_libc.zig
+++ b/tools/update_freebsd_libc.zig
@@ -5,6 +5,7 @@
//! `zig run tools/update_freebsd_libc.zig -- ~/Downloads/freebsd-src .`
const std = @import("std");
+const Io = std.Io;
const exempt_files = [_][]const u8{
// This file is maintained by a separate project and does not come from FreeBSD.
@@ -16,29 +17,31 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
+ var threaded: Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
const freebsd_src_path = args[1];
const zig_src_path = args[2];
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/freebsd", .{zig_src_path});
- var dest_dir = std.fs.cwd().openDir(dest_dir_path, .{ .iterate = true }) catch |err| {
- std.log.err("unable to open destination directory '{s}': {s}", .{
- dest_dir_path, @errorName(err),
- });
+ var dest_dir = Io.Dir.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
+ std.log.err("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
std.process.exit(1);
};
- defer dest_dir.close();
+ defer dest_dir.close(io);
- var freebsd_src_dir = try std.fs.cwd().openDir(freebsd_src_path, .{});
- defer freebsd_src_dir.close();
+ var freebsd_src_dir = try Io.Dir.cwd().openDir(io, freebsd_src_path, .{});
+ defer freebsd_src_dir.close(io);
// Copy updated files from upstream.
{
var walker = try dest_dir.walk(arena);
defer walker.deinit();
- walk: while (try walker.next()) |entry| {
+ walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (std.mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@@ -46,18 +49,15 @@ pub fn main() !void {
}
std.log.info("updating '{s}/{s}' from '{s}/{s}'", .{
- dest_dir_path, entry.path,
- freebsd_src_path, entry.path,
+ dest_dir_path, entry.path, freebsd_src_path, entry.path,
});
- freebsd_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
- std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {s}", .{
- freebsd_src_path, entry.path,
- dest_dir_path, entry.path,
- @errorName(err),
+ freebsd_src_dir.copyFile(entry.path, dest_dir, entry.path, io, .{}) catch |err| {
+ std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
+ freebsd_src_path, entry.path, dest_dir_path, entry.path, err,
});
if (err == error.FileNotFound) {
- try dest_dir.deleteFile(entry.path);
+ try dest_dir.deleteFile(io, entry.path);
}
};
}
diff --git a/tools/update_glibc.zig b/tools/update_glibc.zig
index cebd593fe1..296d677d45 100644
--- a/tools/update_glibc.zig
+++ b/tools/update_glibc.zig
@@ -7,9 +7,11 @@
//! `zig run ../tools/update_glibc.zig -- ~/Downloads/glibc ..`
const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const mem = std.mem;
const log = std.log;
-const fs = std.fs;
+const fatal = std.process.fatal;
const exempt_files = [_][]const u8{
// This file is maintained by a separate project and does not come from glibc.
@@ -41,28 +43,30 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
+ var threaded: Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
const glibc_src_path = args[1];
const zig_src_path = args[2];
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/glibc", .{zig_src_path});
- var dest_dir = fs.cwd().openDir(dest_dir_path, .{ .iterate = true }) catch |err| {
- fatal("unable to open destination directory '{s}': {s}", .{
- dest_dir_path, @errorName(err),
- });
+ var dest_dir = Dir.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
+ fatal("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
};
- defer dest_dir.close();
+ defer dest_dir.close(io);
- var glibc_src_dir = try fs.cwd().openDir(glibc_src_path, .{});
- defer glibc_src_dir.close();
+ var glibc_src_dir = try Dir.cwd().openDir(io, glibc_src_path, .{});
+ defer glibc_src_dir.close(io);
// Copy updated files from upstream.
{
var walker = try dest_dir.walk(arena);
defer walker.deinit();
- walk: while (try walker.next()) |entry| {
+ walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@@ -72,14 +76,12 @@ pub fn main() !void {
if (mem.endsWith(u8, entry.path, ext)) continue :walk;
}
- glibc_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
- log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {s}", .{
- glibc_src_path, entry.path,
- dest_dir_path, entry.path,
- @errorName(err),
+ glibc_src_dir.copyFile(entry.path, dest_dir, entry.path, io, .{}) catch |err| {
+ log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
+ glibc_src_path, entry.path, dest_dir_path, entry.path, err,
});
if (err == error.FileNotFound) {
- try dest_dir.deleteFile(entry.path);
+ try dest_dir.deleteFile(io, entry.path);
}
};
}
@@ -88,25 +90,23 @@ pub fn main() !void {
// Warn about duplicated files inside glibc/include/* that can be omitted
// because they are already in generic-glibc/*.
- var include_dir = dest_dir.openDir("include", .{ .iterate = true }) catch |err| {
- fatal("unable to open directory '{s}/include': {s}", .{
- dest_dir_path, @errorName(err),
- });
+ var include_dir = dest_dir.openDir(io, "include", .{ .iterate = true }) catch |err| {
+ fatal("unable to open directory '{s}/include': {t}", .{ dest_dir_path, err });
};
- defer include_dir.close();
+ defer include_dir.close(io);
const generic_glibc_path = try std.fmt.allocPrint(
arena,
"{s}/lib/libc/include/generic-glibc",
.{zig_src_path},
);
- var generic_glibc_dir = try fs.cwd().openDir(generic_glibc_path, .{});
- defer generic_glibc_dir.close();
+ var generic_glibc_dir = try Dir.cwd().openDir(io, generic_glibc_path, .{});
+ defer generic_glibc_dir.close(io);
var walker = try include_dir.walk(arena);
defer walker.deinit();
- walk: while (try walker.next()) |entry| {
+ walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@@ -116,23 +116,21 @@ pub fn main() !void {
const max_file_size = 10 * 1024 * 1024;
const generic_glibc_contents = generic_glibc_dir.readFileAlloc(
+ io,
entry.path,
arena,
.limited(max_file_size),
) catch |err| switch (err) {
error.FileNotFound => continue,
- else => |e| fatal("unable to load '{s}/include/{s}': {s}", .{
- generic_glibc_path, entry.path, @errorName(e),
- }),
+ else => |e| fatal("unable to load '{s}/include/{s}': {t}", .{ generic_glibc_path, entry.path, e }),
};
const glibc_include_contents = include_dir.readFileAlloc(
+ io,
entry.path,
arena,
.limited(max_file_size),
) catch |err| {
- fatal("unable to load '{s}/include/{s}': {s}", .{
- dest_dir_path, entry.path, @errorName(err),
- });
+ fatal("unable to load '{s}/include/{s}': {t}", .{ dest_dir_path, entry.path, err });
};
const whitespace = " \r\n\t";
@@ -140,14 +138,8 @@ pub fn main() !void {
const glibc_include_trimmed = mem.trim(u8, glibc_include_contents, whitespace);
if (mem.eql(u8, generic_glibc_trimmed, glibc_include_trimmed)) {
log.warn("same contents: '{s}/include/{s}' and '{s}/include/{s}'", .{
- generic_glibc_path, entry.path,
- dest_dir_path, entry.path,
+ generic_glibc_path, entry.path, dest_dir_path, entry.path,
});
}
}
}
-
-fn fatal(comptime format: []const u8, args: anytype) noreturn {
- log.err(format, args);
- std.process.exit(1);
-}
diff --git a/tools/update_mingw.zig b/tools/update_mingw.zig
index 6288807cf1..678c3dbdca 100644
--- a/tools/update_mingw.zig
+++ b/tools/update_mingw.zig
@@ -1,35 +1,41 @@
const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
+ var threaded: Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
const zig_src_lib_path = args[1];
const mingw_src_path = args[2];
- const dest_mingw_crt_path = try std.fs.path.join(arena, &.{
+ const dest_mingw_crt_path = try Dir.path.join(arena, &.{
zig_src_lib_path, "libc", "mingw",
});
- const src_mingw_crt_path = try std.fs.path.join(arena, &.{
+ const src_mingw_crt_path = try Dir.path.join(arena, &.{
mingw_src_path, "mingw-w64-crt",
});
// Update only the set of existing files we have already chosen to include
// in zig's installation.
- var dest_crt_dir = std.fs.cwd().openDir(dest_mingw_crt_path, .{ .iterate = true }) catch |err| {
- std.log.err("unable to open directory '{s}': {s}", .{ dest_mingw_crt_path, @errorName(err) });
+ var dest_crt_dir = Dir.cwd().openDir(io, dest_mingw_crt_path, .{ .iterate = true }) catch |err| {
+ std.log.err("unable to open directory '{s}': {t}", .{ dest_mingw_crt_path, err });
std.process.exit(1);
};
- defer dest_crt_dir.close();
+ defer dest_crt_dir.close(io);
- var src_crt_dir = std.fs.cwd().openDir(src_mingw_crt_path, .{ .iterate = true }) catch |err| {
- std.log.err("unable to open directory '{s}': {s}", .{ src_mingw_crt_path, @errorName(err) });
+ var src_crt_dir = Dir.cwd().openDir(io, src_mingw_crt_path, .{ .iterate = true }) catch |err| {
+ std.log.err("unable to open directory '{s}': {t}", .{ src_mingw_crt_path, err });
std.process.exit(1);
};
- defer src_crt_dir.close();
+ defer src_crt_dir.close(io);
{
var walker = try dest_crt_dir.walk(arena);
@@ -37,10 +43,10 @@ pub fn main() !void {
var fail = false;
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
- src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, .{}) catch |err| switch (err) {
+ src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, io, .{}) catch |err| switch (err) {
error.FileNotFound => {
const keep = for (kept_crt_files) |item| {
if (std.mem.eql(u8, entry.path, item)) break true;
@@ -49,11 +55,11 @@ pub fn main() !void {
if (!keep) {
std.log.warn("deleting {s}", .{entry.path});
- try dest_crt_dir.deleteFile(entry.path);
+ try dest_crt_dir.deleteFile(io, entry.path);
}
},
else => {
- std.log.err("unable to copy {s}: {s}", .{ entry.path, @errorName(err) });
+ std.log.err("unable to copy {s}: {t}", .{ entry.path, err });
fail = true;
},
};
@@ -63,24 +69,24 @@ pub fn main() !void {
}
{
- const dest_mingw_winpthreads_path = try std.fs.path.join(arena, &.{
+ const dest_mingw_winpthreads_path = try Dir.path.join(arena, &.{
zig_src_lib_path, "libc", "mingw", "winpthreads",
});
- const src_mingw_libraries_winpthreads_src_path = try std.fs.path.join(arena, &.{
+ const src_mingw_libraries_winpthreads_src_path = try Dir.path.join(arena, &.{
mingw_src_path, "mingw-w64-libraries", "winpthreads", "src",
});
- var dest_winpthreads_dir = std.fs.cwd().openDir(dest_mingw_winpthreads_path, .{ .iterate = true }) catch |err| {
+ var dest_winpthreads_dir = Dir.cwd().openDir(io, dest_mingw_winpthreads_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ dest_mingw_winpthreads_path, @errorName(err) });
std.process.exit(1);
};
- defer dest_winpthreads_dir.close();
+ defer dest_winpthreads_dir.close(io);
- var src_winpthreads_dir = std.fs.cwd().openDir(src_mingw_libraries_winpthreads_src_path, .{ .iterate = true }) catch |err| {
+ var src_winpthreads_dir = Dir.cwd().openDir(io, src_mingw_libraries_winpthreads_src_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ src_mingw_libraries_winpthreads_src_path, @errorName(err) });
std.process.exit(1);
};
- defer src_winpthreads_dir.close();
+ defer src_winpthreads_dir.close(io);
{
var walker = try dest_winpthreads_dir.walk(arena);
@@ -88,16 +94,16 @@ pub fn main() !void {
var fail = false;
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
- src_winpthreads_dir.copyFile(entry.path, dest_winpthreads_dir, entry.path, .{}) catch |err| switch (err) {
+ src_winpthreads_dir.copyFile(entry.path, dest_winpthreads_dir, entry.path, io, .{}) catch |err| switch (err) {
error.FileNotFound => {
std.log.warn("deleting {s}", .{entry.path});
- try dest_winpthreads_dir.deleteFile(entry.path);
+ try dest_winpthreads_dir.deleteFile(io, entry.path);
},
else => {
- std.log.err("unable to copy {s}: {s}", .{ entry.path, @errorName(err) });
+ std.log.err("unable to copy {s}: {t}", .{ entry.path, err });
fail = true;
},
};
@@ -114,17 +120,17 @@ pub fn main() !void {
var fail = false;
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
switch (entry.kind) {
.directory => {
switch (entry.depth()) {
1 => if (def_dirs.has(entry.basename)) {
- try walker.enter(entry);
+ try walker.enter(io, entry);
continue;
},
else => {
// The top-level directory was already validated
- try walker.enter(entry);
+ try walker.enter(io, entry);
continue;
},
}
@@ -151,20 +157,20 @@ pub fn main() !void {
if (std.mem.endsWith(u8, entry.basename, "_onecore.def"))
continue;
- src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, .{}) catch |err| {
- std.log.err("unable to copy {s}: {s}", .{ entry.path, @errorName(err) });
+ src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, io, .{}) catch |err| {
+ std.log.err("unable to copy {s}: {t}", .{ entry.path, err });
fail = true;
};
}
if (fail) std.process.exit(1);
}
- return std.process.cleanExit();
+ return std.process.cleanExit(io);
}
const kept_crt_files = [_][]const u8{
"COPYING",
- "include" ++ std.fs.path.sep_str ++ "config.h",
+ "include" ++ Dir.path.sep_str ++ "config.h",
};
const def_exts = [_][]const u8{
diff --git a/tools/update_netbsd_libc.zig b/tools/update_netbsd_libc.zig
index 7bfe99b094..a5eeca35c7 100644
--- a/tools/update_netbsd_libc.zig
+++ b/tools/update_netbsd_libc.zig
@@ -5,6 +5,7 @@
//! `zig run tools/update_netbsd_libc.zig -- ~/Downloads/netbsd-src .`
const std = @import("std");
+const Io = std.Io;
const exempt_files = [_][]const u8{
// This file is maintained by a separate project and does not come from NetBSD.
@@ -16,29 +17,31 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
+ var threaded: Io.Threaded = .init(arena, .{});
+ defer threaded.deinit();
+ const io = threaded.io();
+
const args = try std.process.argsAlloc(arena);
const netbsd_src_path = args[1];
const zig_src_path = args[2];
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/netbsd", .{zig_src_path});
- var dest_dir = std.fs.cwd().openDir(dest_dir_path, .{ .iterate = true }) catch |err| {
- std.log.err("unable to open destination directory '{s}': {s}", .{
- dest_dir_path, @errorName(err),
- });
+ var dest_dir = Io.Dir.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
+ std.log.err("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
std.process.exit(1);
};
- defer dest_dir.close();
+ defer dest_dir.close(io);
- var netbsd_src_dir = try std.fs.cwd().openDir(netbsd_src_path, .{});
- defer netbsd_src_dir.close();
+ var netbsd_src_dir = try Io.Dir.cwd().openDir(io, netbsd_src_path, .{});
+ defer netbsd_src_dir.close(io);
// Copy updated files from upstream.
{
var walker = try dest_dir.walk(arena);
defer walker.deinit();
- walk: while (try walker.next()) |entry| {
+ walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (std.mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@@ -50,14 +53,12 @@ pub fn main() !void {
netbsd_src_path, entry.path,
});
- netbsd_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
- std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {s}", .{
- netbsd_src_path, entry.path,
- dest_dir_path, entry.path,
- @errorName(err),
+ netbsd_src_dir.copyFile(entry.path, dest_dir, entry.path, io, .{}) catch |err| {
+ std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
+ netbsd_src_path, entry.path, dest_dir_path, entry.path, err,
});
if (err == error.FileNotFound) {
- try dest_dir.deleteFile(entry.path);
+ try dest_dir.deleteFile(io, entry.path);
}
};
}