From aafddc2ea13e40a8262d9378aeca2e097a37ac03 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 Dec 2025 19:08:37 -0800 Subject: update all occurrences of close() to close(io) --- lib/std/Build/WebServer.zig | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 2c865a8889..f91075b444 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -129,6 +129,7 @@ pub fn init(opts: Options) WebServer { } pub fn deinit(ws: *WebServer) void { const gpa = ws.gpa; + const io = ws.graph.io; gpa.free(ws.step_names_trailing); gpa.free(ws.step_status_bits); @@ -139,7 +140,7 @@ pub fn deinit(ws: *WebServer) void { gpa.free(ws.time_report_update_times); if (ws.serve_thread) |t| { - if (ws.tcp_server) |*s| s.stream.close(); + if (ws.tcp_server) |*s| s.stream.close(io); t.join(); } if (ws.tcp_server) |*s| s.deinit(); @@ -507,7 +508,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons log.err("failed to open '{f}': {s}", .{ path, @errorName(err) }); continue; }; - defer file.close(); + defer file.close(io); const stat = try file.stat(); var read_buffer: [1024]u8 = undefined; var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size); @@ -634,7 +635,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim } // Send EOF to stdin. - child.stdin.?.close(); + child.stdin.?.close(io); child.stdin = null; switch (try child.wait()) { -- cgit v1.2.3 From 8328de24f13e21e325207b19288a143854df50df Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 Dec 2025 17:52:57 -0800 Subject: update all occurrences of openFile to receive an io instance --- lib/compiler/aro/aro/Compilation.zig | 4 +-- lib/compiler/aro/aro/Driver/Filesystem.zig | 2 +- lib/compiler/aro/aro/Toolchain.zig | 6 ++-- lib/compiler/objcopy.zig | 2 +- lib/compiler/std-docs.zig | 2 +- lib/std/Build/Cache.zig | 8 ++++-- lib/std/Build/Cache/Path.zig | 8 ++---- lib/std/Build/Fuzz.zig | 4 +-- lib/std/Build/Step/Run.zig | 6 ++-- lib/std/Build/Step/UpdateSourceFiles.zig | 2 +- lib/std/Build/Step/WriteFile.zig | 6 ++-- lib/std/Build/WebServer.zig | 4 +-- lib/std/Io/Dir.zig | 2 +- lib/std/Io/test.zig | 2 +- lib/std/Thread.zig | 4 +-- lib/std/crypto/Certificate/Bundle.zig | 2 +- lib/std/debug/ElfFile.zig | 2 +- lib/std/debug/Info.zig | 2 +- lib/std/debug/MachOFile.zig | 2 +- lib/std/debug/SelfInfo/Elf.zig | 4 +-- lib/std/debug/SelfInfo/MachO.zig | 2 +- lib/std/debug/SelfInfo/Windows.zig | 6 ++-- lib/std/fs.zig | 17 ----------- lib/std/fs/test.zig | 46 +++++++++++++++--------------- lib/std/os/linux/IoUring.zig | 6 ++-- lib/std/posix/test.zig | 8 +++--- lib/std/zig/system.zig | 6 ++-- src/Compilation.zig | 8 +++--- src/Package/Fetch.zig | 6 ++-- src/Package/Fetch/git.zig | 2 +- src/Zcu.zig | 2 +- src/Zcu/PerThread.zig | 4 +-- src/fmt.zig | 2 +- src/introspect.zig | 4 +-- src/link.zig | 24 ++++++++-------- src/link/Coff.zig | 4 +-- src/link/Elf2.zig | 14 +++++---- src/link/MachO.zig | 8 ++++-- src/link/MachO/relocatable.zig | 7 +++-- src/link/MappedFile.zig | 2 +- src/main.zig | 10 +++---- 41 files changed, 124 insertions(+), 138 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index c31caefb0f..09e4861d13 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -1641,7 +1641,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin const io = comp.io; - const file = try comp.cwd.openFile(path, .{}); + const file = try comp.cwd.openFile(io, path, .{}); defer file.close(io); return comp.addSourceFromFile(file, path, kind); } @@ -1975,7 +1975,7 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 const io = comp.io; - const file = try comp.cwd.openFile(path, .{}); + const file = try comp.cwd.openFile(io, path, .{}); defer file.close(io); return comp.getFileContents(file, limit); } diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig index 19ac9bfe41..b0bdbb7e21 100644 --- a/lib/compiler/aro/aro/Driver/Filesystem.zig +++ b/lib/compiler/aro/aro/Driver/Filesystem.zig @@ -213,7 +213,7 @@ pub const Filesystem = union(enum) { pub fn readFile(fs: Filesystem, io: Io, path: []const u8, buf: []u8) ?[]const u8 { return switch (fs) { .real => |cwd| { - const file = cwd.openFile(path, .{}) catch return null; + const file = cwd.openFile(io, path, .{}) catch return null; defer file.close(io); const bytes_read = file.readAll(buf) catch return null; diff --git a/lib/compiler/aro/aro/Toolchain.zig b/lib/compiler/aro/aro/Toolchain.zig index ae84369205..95a8baba77 100644 --- a/lib/compiler/aro/aro/Toolchain.zig +++ b/lib/compiler/aro/aro/Toolchain.zig @@ -524,12 +524,12 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void { /// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned pub fn readFile(tc: *const Toolchain, path: []const u8, buf: []u8) ?[]const u8 { const comp = tc.driver.comp; - return comp.cwd.adaptToNewApi().readFile(comp.io, path, buf) catch null; + return comp.cwd.readFile(comp.io, path, buf) catch null; } pub fn exists(tc: *const Toolchain, path: []const u8) bool { const comp = tc.driver.comp; - comp.cwd.adaptToNewApi().access(comp.io, path, .{}) catch return false; + comp.cwd.access(comp.io, path, .{}) catch return false; return true; } @@ -547,7 +547,7 @@ pub fn canExecute(tc: *const Toolchain, path: []const u8) bool { } const comp = tc.driver.comp; - comp.cwd.adaptToNewApi().access(comp.io, path, .{ .execute = true }) catch return false; + comp.cwd.access(comp.io, path, .{ .execute = true }) catch return false; // Todo: ensure path is not a directory return true; } diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index c360ea8df0..485e644daa 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -157,7 +157,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); - var in: File.Reader = .initSize(input_file.adaptToNewApi(), io, &input_buffer, stat.size); + var in: File.Reader = .initSize(input_file, io, &input_buffer, stat.size); const elf_hdr = std.elf.Header.read(&in.interface) catch |err| switch (err) { error.ReadFailed => fatal("unable to read {s}: {t}", .{ input, in.err.? }), diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index f1ca7fb5bb..e4efac28cd 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -225,7 +225,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void { }, else => continue, } - var file = try entry.dir.openFile(entry.basename, .{}); + var file = try entry.dir.openFile(io, entry.basename, .{}); defer file.close(io); const stat = try file.stat(); var file_reader: std.Io.File.Reader = .{ diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index b06547dc53..42459c033d 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -502,6 +502,8 @@ pub const Manifest = struct { @memcpy(manifest_file_path[0..self.hex_digest.len], &self.hex_digest); manifest_file_path[hex_digest_len..][0..ext.len].* = ext.*; + const io = self.cache.io; + // We'll try to open the cache with an exclusive lock, but if that would block // and `want_shared_lock` is set, a shared lock might be sufficient, so we'll // open with a shared lock instead. @@ -517,7 +519,7 @@ pub const Manifest = struct { break; } else |err| switch (err) { error.WouldBlock => { - self.manifest_file = self.cache.manifest_dir.openFile(&manifest_file_path, .{ + self.manifest_file = self.cache.manifest_dir.openFile(io, &manifest_file_path, .{ .mode = .read_write, .lock = .shared, }) catch |e| { @@ -757,7 +759,7 @@ pub const Manifest = struct { const pp = cache_hash_file.prefixed_path; const dir = self.cache.prefixes()[pp.prefix].handle; - const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { + const this_file = dir.openFile(io, pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { error.FileNotFound => { // Every digest before this one has been populated successfully. return .{ .miss = .{ .file_digests_populated = idx } }; @@ -900,7 +902,7 @@ pub const Manifest = struct { } else { const pp = ch_file.prefixed_path; const dir = self.cache.prefixes()[pp.prefix].handle; - const handle = try dir.openFile(pp.sub_path, .{}); + const handle = try dir.openFile(io, pp.sub_path, .{}); defer handle.close(io); return populateFileHashHandle(self, ch_file, handle); } diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index f6f76c1e8f..60211670de 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -59,18 +59,14 @@ pub fn joinStringZ(p: Path, gpa: Allocator, sub_path: []const u8) Allocator.Erro return p.root_dir.joinZ(gpa, parts); } -pub fn openFile( - p: Path, - sub_path: []const u8, - flags: Io.File.OpenFlags, -) !Io.File { +pub fn openFile(p: Path, io: Io, sub_path: []const u8, flags: Io.File.OpenFlags) !Io.File { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ p.sub_path, sub_path, }) catch return error.NameTooLong; }; - return p.root_dir.handle.openFile(joined_path, flags); + return p.root_dir.handle.openFile(io, joined_path, flags); } pub fn openDir( diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index db83f393fd..c95c9bd354 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -405,7 +405,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO .root_dir = run_step.step.owner.cache_root, .sub_path = "v/" ++ std.fmt.hex(coverage_id), }; - var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { + var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| { log.err("step '{s}': failed to load coverage file '{f}': {t}", .{ run_step.step.name, coverage_file_path, err, }); @@ -528,7 +528,7 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void { .root_dir = cov.run.step.owner.cache_root, .sub_path = "v/" ++ std.fmt.hex(cov.id), }; - var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { + var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| { fatal("step '{s}': failed to load coverage file '{f}': {t}", .{ cov.run.step.name, coverage_file_path, err, }); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 1df6f42a35..7c54c8048e 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -846,7 +846,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { errdefer result.deinit(); result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory; - const file = file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}) catch |err| { + const file = file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{}) catch |err| { return step.fail( "unable to open input file '{f}': {t}", .{ file_path, err }, @@ -1111,7 +1111,7 @@ pub fn rerunInFuzzMode( errdefer result.deinit(); result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory; - const file = try file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}); + const file = try file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{}); defer file.close(io); var buf: [1024]u8 = undefined; @@ -2185,7 +2185,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult { }, .lazy_path => |lazy_path| { const path = lazy_path.getPath3(b, &run.step); - const file = path.root_dir.handle.openFile(path.subPathOrDot(), .{}) catch |err| { + const file = path.root_dir.handle.openFile(io, path.subPathOrDot(), .{}) catch |err| { return run.step.fail("unable to open stdin file: {s}", .{@errorName(err)}); }; defer file.close(io); diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig index 7cdb521d21..f5d95182e9 100644 --- a/lib/std/Build/Step/UpdateSourceFiles.zig +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -99,7 +99,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cwd(), io, source_path, - b.build_root.handle.adaptToNewApi(), + b.build_root.handle, output_source_file.sub_path, .{}, ) catch |err| { diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 201b132271..2834f18564 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -284,7 +284,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { }, .copy => |file_source| { const source_path = file_source.getPath2(b, step); - const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir.adaptToNewApi(), file.sub_path, .{}) catch |err| { + const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir, file.sub_path, .{}) catch |err| { return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {t}", .{ source_path, b.cache_root, cache_path, fs.path.sep, file.sub_path, err, }); @@ -321,10 +321,10 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .directory => try cache_dir.makePath(dest_path), .file => { const prev_status = Io.Dir.updateFile( - src_entry_path.root_dir.handle.adaptToNewApi(), + src_entry_path.root_dir.handle, io, src_entry_path.sub_path, - cache_dir.adaptToNewApi(), + cache_dir, dest_path, .{}, ) catch |err| { diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index f91075b444..9938d5e1b0 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -504,14 +504,14 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons var archiver: std.tar.Writer = .{ .underlying_writer = &response.writer }; for (paths) |path| { - var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| { + var file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| { log.err("failed to open '{f}': {s}", .{ path, @errorName(err) }); continue; }; defer file.close(io); const stat = try file.stat(); var read_buffer: [1024]u8 = undefined; - var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size); + var file_reader: Io.File.Reader = .initSize(file, io, &read_buffer, stat.size); // TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can // be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI: diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 1c28c2f9b3..9ae636d4a6 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -481,7 +481,7 @@ pub fn updateFile( } var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available. - var atomic_file = try Dir.atomicFile(.adaptFromNewApi(dest_dir), dest_path, .{ + var atomic_file = try Dir.atomicFile(dest_dir, dest_path, .{ .permissions = actual_permissions, .write_buffer = &buffer, }); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index b922acc333..9f21fe50e7 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -44,7 +44,7 @@ test "write a file, read it, then delete it" { } { - var file = try tmp.dir.openFile(tmp_file_name, .{}); + var file = try tmp.dir.openFile(io, tmp_file_name, .{}); defer file.close(io); const file_size = try file.getEndPos(); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 8d8e5979df..102bb59415 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -208,7 +208,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); - const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only }); + const file = try std.fs.cwd().openFile(io, path, .{ .mode = .write_only }); defer file.close(io); try file.writeAll(name); @@ -325,7 +325,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co var threaded: std.Io.Threaded = .init_single_threaded; const io = threaded.ioBasic(); - const file = try std.fs.cwd().openFile(path, .{}); + const file = try std.fs.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.readerStreaming(io, &.{}); diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index df671e6f63..eb60ad37a8 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -208,7 +208,7 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, i else => continue, } - try addCertsFromFilePath(cb, gpa, io, now, iterable_dir.adaptToNewApi(), entry.name); + try addCertsFromFilePath(cb, gpa, io, now, iterable_dir, entry.name); } } diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index 5dbae18130..a0f1188ade 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -375,7 +375,7 @@ fn loadSeparateDebugFile( args: anytype, ) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { const path = try std.fmt.allocPrint(arena, fmt, args); - const elf_file = std.fs.cwd().openFile(path, .{}) catch return null; + const elf_file = std.fs.cwd().openFile(io, path, .{}) catch return null; defer elf_file.close(io); const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig index da7656e626..6b31f03f72 100644 --- a/lib/std/debug/Info.zig +++ b/lib/std/debug/Info.zig @@ -39,7 +39,7 @@ pub fn load( ) LoadError!Info { switch (format) { .elf => { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); defer file.close(io); var elf_file: ElfFile = try .load(gpa, file, null, &.none); diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig index 3f0f620a90..ae904c0aec 100644 --- a/lib/std/debug/MachOFile.zig +++ b/lib/std/debug/MachOFile.zig @@ -512,7 +512,7 @@ fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) { + const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 155dac6fb8..124768687c 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -325,7 +325,7 @@ const Module = struct { } fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf { const load_result = if (mod.name.len > 0) res: { - var file = std.fs.cwd().openFile(mod.name, .{}) catch return error.MissingDebugInfo; + var file = std.fs.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name)); } else res: { @@ -334,7 +334,7 @@ const Module = struct { else => return error.ReadFailed, }; defer gpa.free(path); - var file = std.fs.cwd().openFile(path, .{}) catch return error.MissingDebugInfo; + var file = std.fs.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path)); }; diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 2491cf416c..15da616f3b 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -616,7 +616,7 @@ test { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) { + const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index 3af7223293..c7f9d8c352 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -387,7 +387,7 @@ const Module = struct { const section_view = section_view_ptr.?[0..coff_len]; coff_obj = coff.Coff.init(section_view, false) catch return error.InvalidDebugInfo; break :mapped .{ - .file = .adaptFromNewApi(coff_file), + .file = coff_file, .section_handle = section_handle, .section_view = section_view, }; @@ -432,7 +432,7 @@ const Module = struct { break :pdb null; }; const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: { - break :res std.fs.cwd().openFile(path, .{}); + break :res std.fs.cwd().openFile(io, path, .{}); } else res: { const self_dir = fs.selfExeDirPathAlloc(gpa) catch |err| switch (err) { error.OutOfMemory, error.Unexpected => |e| return e, @@ -441,7 +441,7 @@ const Module = struct { defer gpa.free(self_dir); const abs_path = try fs.path.join(gpa, &.{ self_dir, path }); defer gpa.free(abs_path); - break :res std.fs.cwd().openFile(abs_path, .{}); + break :res std.fs.cwd().openFile(io, abs_path, .{}); }; const pdb_file = pdb_file_open_result catch |err| switch (err) { error.FileNotFound, error.IsDir => break :pdb null, diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 9472e5d2a5..cb4daf7c50 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -287,23 +287,6 @@ pub fn symLinkAbsoluteW( return windows.CreateSymbolicLink(null, mem.span(sym_link_path_w), mem.span(target_path_w), flags.is_directory); } -pub const OpenSelfExeError = Io.File.OpenSelfExeError; - -/// Deprecated in favor of `Io.File.openSelfExe`. -pub fn openSelfExe(flags: File.OpenFlags) OpenSelfExeError!File { - if (native_os == .linux or native_os == .serenity or native_os == .windows) { - var threaded: Io.Threaded = .init_single_threaded; - const io = threaded.ioBasic(); - return .adaptFromNewApi(try Io.File.openSelfExe(io, flags)); - } - // Use of max_path_bytes here is valid as the resulting path is immediately - // opened with no modification. - var buf: [max_path_bytes]u8 = undefined; - const self_exe_path = try selfExePath(&buf); - buf[self_exe_path.len] = 0; - return openFileAbsolute(buf[0..self_exe_path.len :0], flags); -} - // This is `posix.ReadLinkError || posix.RealPathError` with impossible errors excluded pub const SelfExePathError = error{ FileNotFound, diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 36ccd3a6be..7d566da0e9 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -855,7 +855,7 @@ test "directory operations on files" { } // ensure the file still exists and is a file as a sanity check - file = try ctx.dir.openFile(test_file_name, .{}); + file = try ctx.dir.openFile(io, test_file_name, .{}); const stat = try file.stat(); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -895,12 +895,12 @@ test "file operations on directories" { if (native_os == .wasi and builtin.link_libc) { // wasmtime unexpectedly succeeds here, see https://github.com/ziglang/zig/issues/20747 - const handle = try ctx.dir.openFile(test_dir_name, .{ .mode = .read_write }); + const handle = try ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write }); handle.close(io); } else { // Note: The `.mode = .read_write` is necessary to ensure the error occurs on all platforms. // TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732 - try testing.expectError(error.IsDir, ctx.dir.openFile(test_dir_name, .{ .mode = .read_write })); + try testing.expectError(error.IsDir, ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write })); } if (ctx.path_type == .absolute and comptime PathType.absolute.isSupported(builtin.os)) { @@ -973,8 +973,8 @@ test "Dir.rename files" { try ctx.dir.rename(test_file_name, renamed_test_file_name); // Ensure the file was renamed - try testing.expectError(error.FileNotFound, ctx.dir.openFile(test_file_name, .{})); - file = try ctx.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, test_file_name, .{})); + file = try ctx.dir.openFile(io, renamed_test_file_name, .{}); file.close(io); // Rename to self succeeds @@ -986,8 +986,8 @@ test "Dir.rename files" { existing_file.close(io); try ctx.dir.rename(renamed_test_file_name, existing_file_path); - try testing.expectError(error.FileNotFound, ctx.dir.openFile(renamed_test_file_name, .{})); - file = try ctx.dir.openFile(existing_file_path, .{}); + try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, renamed_test_file_name, .{})); + file = try ctx.dir.openFile(io, existing_file_path, .{}); file.close(io); } }.impl); @@ -1026,7 +1026,7 @@ test "Dir.rename directories" { // Ensure the directory was renamed and the file still exists in it try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_renamed_path, .{})); dir = try ctx.dir.openDir(test_dir_renamed_again_path, .{}); - file = try dir.openFile("test_file", .{}); + file = try dir.openFile(io, "test_file", .{}); file.close(io); dir.close(io); } @@ -1119,8 +1119,8 @@ test "rename" { try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); // ensure the file was renamed - try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(test_file_name, .{})); - file = try tmp_dir2.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(io, test_file_name, .{})); + file = try tmp_dir2.dir.openFile(io, renamed_test_file_name, .{}); file.close(io); } @@ -1156,8 +1156,8 @@ test "renameAbsolute" { ); // ensure the file was renamed - try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(test_file_name, .{})); - file = try tmp_dir.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(io, test_file_name, .{})); + file = try tmp_dir.dir.openFile(io, renamed_test_file_name, .{}); const stat = try file.stat(); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -1512,7 +1512,7 @@ test "setEndPos" { const file_name = "afile.txt"; try tmp.dir.writeFile(.{ .sub_path = file_name, .data = "ninebytes" }); - const f = try tmp.dir.openFile(file_name, .{ .mode = .read_write }); + const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write }); defer f.close(io); const initial_size = try f.getEndPos(); @@ -1856,7 +1856,7 @@ test "read from locked file" { .lock = .exclusive, }); defer f.close(io); - const f2 = try ctx.dir.openFile(filename, .{}); + const f2 = try ctx.dir.openFile(io, filename, .{}); defer f2.close(io); var buffer: [1]u8 = undefined; if (builtin.os.tag == .windows) { @@ -2041,12 +2041,12 @@ test "'.' and '..' in Io.Dir functions" { try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{}); try ctx.dir.rename(copy_path, rename_path); - const renamed_file = try ctx.dir.openFile(rename_path, .{}); + const renamed_file = try ctx.dir.openFile(io, rename_path, .{}); renamed_file.close(io); try ctx.dir.deleteFile(rename_path); try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" }); - var dir = ctx.dir.adaptToNewApi(); + var dir = ctx.dir; const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{}); try testing.expectEqual(Io.Dir.PrevStatus.stale, prev_status); @@ -2186,7 +2186,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{})); - var dir = ctx.dir.adaptToNewApi(); + var dir = ctx.dir; try testing.expectError(expected_err, dir.updateFile(io, invalid_path, dir, invalid_path, .{})); try testing.expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, .{})); @@ -2235,7 +2235,7 @@ test "read file non vectored" { try file_writer.interface.flush(); } - var file_reader: std.Io.File.Reader = .initAdapted(file, io, &.{}); + var file_reader: std.Io.File.Reader = .init(file, io, &.{}); var write_buffer: [100]u8 = undefined; var w: std.Io.Writer = .fixed(&write_buffer); @@ -2268,7 +2268,7 @@ test "seek keeping partial buffer" { } var read_buffer: [3]u8 = undefined; - var file_reader: Io.File.Reader = .initAdapted(file, io, &read_buffer); + var file_reader: Io.File.Reader = .init(file, io, &read_buffer); try testing.expectEqual(0, file_reader.logicalPos()); @@ -2301,7 +2301,7 @@ test "seekBy" { defer tmp_dir.cleanup(); try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" }); - const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only }); + const f = try tmp_dir.dir.openFile(io, "blah.txt", .{ .mode = .read_only }); defer f.close(io); var reader = f.readerStreaming(io, &.{}); try reader.seekBy(2); @@ -2332,7 +2332,7 @@ test "seekTo flushes buffered data" { } var read_buffer: [16]u8 = undefined; - var file_reader: std.Io.File.Reader = .initAdapted(file, io, &read_buffer); + var file_reader: std.Io.File.Reader = .init(file, io, &read_buffer); var buf: [4]u8 = undefined; try file_reader.interface.readSliceAll(&buf); @@ -2347,7 +2347,7 @@ test "File.Writer sendfile with buffered contents" { { try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); - const in = try tmp_dir.dir.openFile("a", .{}); + const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); const out = try tmp_dir.dir.createFile("b", .{}); defer out.close(io); @@ -2364,7 +2364,7 @@ test "File.Writer sendfile with buffered contents" { try out_w.interface.flush(); } - var check = try tmp_dir.dir.openFile("b", .{}); + var check = try tmp_dir.dir.openFile(io, "b", .{}); defer check.close(io); var check_buf: [4]u8 = undefined; var check_r = check.reader(io, &check_buf); diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index e4a5bd3738..c7d3f35d40 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -3002,7 +3002,7 @@ test "renameat" { }, cqe); // Validate that the old file doesn't exist anymore - try testing.expectError(error.FileNotFound, tmp.dir.openFile(old_path, .{})); + try testing.expectError(error.FileNotFound, tmp.dir.openFile(io, old_path, .{})); // Validate that the new file exists with the proper content var new_file_data: [16]u8 = undefined; @@ -3057,7 +3057,7 @@ test "unlinkat" { }, cqe); // Validate that the file doesn't exist anymore - _ = tmp.dir.openFile(path, .{}) catch |err| switch (err) { + _ = tmp.dir.openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => {}, else => std.debug.panic("unexpected error: {}", .{err}), }; @@ -3154,7 +3154,7 @@ test "symlinkat" { }, cqe); // Validate that the symlink exist - _ = try tmp.dir.openFile(link_path, .{}); + _ = try tmp.dir.openFile(io, link_path, .{}); } test "linkat" { diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 82fa2c41d1..19313e3ff7 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -164,10 +164,10 @@ test "linkat with different directories" { // Test 1: link from file in subdir back up to target in parent directory try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0); - const efd = try tmp.dir.openFile(target_name, .{}); + const efd = try tmp.dir.openFile(io, target_name, .{}); defer efd.close(io); - const nfd = try subdir.openFile(link_name, .{}); + const nfd = try subdir.openFile(io, link_name, .{}); defer nfd.close(io); { @@ -429,7 +429,7 @@ test "mmap" { // Map the whole file { - const file = try tmp.dir.openFile(test_out_file, .{}); + const file = try tmp.dir.openFile(io, test_out_file, .{}); defer file.close(io); const data = try posix.mmap( @@ -454,7 +454,7 @@ test "mmap" { // Map the upper half of the file { - const file = try tmp.dir.openFile(test_out_file, .{}); + const file = try tmp.dir.openFile(io, test_out_file, .{}); defer file.close(io); const data = try posix.mmap( diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 4f0c11797d..cc74da956a 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -817,7 +817,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion { // .dynstr section, and finding the max version number of symbols // that start with "GLIBC_2.". const glibc_so_basename = "libc.so.6"; - var file = dir.openFile(glibc_so_basename, .{}) catch |err| switch (err) { + var file = dir.openFile(io, glibc_so_basename, .{}) catch |err| switch (err) { error.NameTooLong => return error.Unexpected, error.BadPathName => return error.Unexpected, error.PipeBusy => return error.Unexpected, // Windows-only @@ -851,7 +851,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion { // Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system. var buffer: [8000]u8 = undefined; - var file_reader: Io.File.Reader = .initAdapted(file, io, &buffer); + var file_reader: Io.File.Reader = .init(file, io, &buffer); return glibcVerFromSoFile(&file_reader) catch |err| switch (err) { error.InvalidElfMagic, @@ -1053,7 +1053,7 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ var is_elf_file = false; defer if (!is_elf_file) file.close(io); - file_reader = .initAdapted(file, io, &file_reader_buffer); + file_reader = .init(file, io, &file_reader_buffer); file_name = undefined; // it aliases file_reader_buffer const header = elf.Header.read(&file_reader.interface) catch |hdr_err| switch (hdr_err) { diff --git a/src/Compilation.zig b/src/Compilation.zig index d75cba5a11..24b994f608 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1104,7 +1104,7 @@ pub const CObject = struct { const source_line = source_line: { if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0; - const file = fs.cwd().openFile(file_name, .{}) catch break :source_line 0; + const file = fs.cwd().openFile(io, file_name, .{}) catch break :source_line 0; defer file.close(io); var buffer: [1024]u8 = undefined; var file_reader = file.reader(io, &buffer); @@ -1179,7 +1179,7 @@ pub const CObject = struct { }; var buffer: [1024]u8 = undefined; - const file = try fs.cwd().openFile(path, .{}); + const file = try fs.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.reader(io, &buffer); var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface }); @@ -5354,14 +5354,14 @@ fn docsCopyModule( }, else => continue, } - var file = mod_dir.openFile(entry.path, .{}) catch |err| { + var file = mod_dir.openFile(io, entry.path, .{}) catch |err| { return comp.lockAndSetMiscFailure(.docs_copy, "unable to open {f}{s}: {t}", .{ root.fmt(comp), entry.path, err, }); }; defer file.close(io); const stat = try file.stat(); - var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size); + var file_reader: Io.File.Reader = .initSize(file, io, &buffer, stat.size); archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| { return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{ diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 988282097b..3bd05120ff 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -390,7 +390,7 @@ pub fn run(f: *Fetch) RunError!void { var server_header_buffer: [init_resource_buffer_size]u8 = undefined; const file_err = if (dir_err == error.NotDir) e: { - if (fs.cwd().openFile(path_or_url, .{})) |file| { + if (fs.cwd().openFile(io, path_or_url, .{})) |file| { var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) }; return f.runResource(path_or_url, &resource, null); } else |err| break :e err; @@ -995,7 +995,7 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u if (ascii.eqlIgnoreCase(uri.scheme, "file")) { const path = try uri.path.toRawMaybeAlloc(arena); - const file = f.parent_package_root.openFile(path, .{}) catch |err| { + const file = f.parent_package_root.openFile(io, path, .{}) catch |err| { return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{ f.parent_package_root, path, err, })); @@ -1677,7 +1677,7 @@ fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Er switch (hashed_file.kind) { .file => { - var file = try dir.openFile(hashed_file.fs_path, .{}); + var file = try dir.openFile(io, hashed_file.fs_path, .{}); defer file.close(io); // Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463 hasher.update(&.{ 0, 0 }); diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index abaa8fef73..ccae9440e2 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1714,7 +1714,7 @@ pub fn main() !void { const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat; - var pack_file = try std.fs.cwd().openFile(args[2], .{}); + var pack_file = try std.fs.cwd().openFile(io, args[2], .{}); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); diff --git a/src/Zcu.zig b/src/Zcu.zig index cd4a8c7783..d2634a8962 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1076,7 +1076,7 @@ pub const File = struct { var f = f: { const dir, const sub_path = file.path.openInfo(zcu.comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer f.close(io); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 55d6a3861f..45b1302138 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -94,7 +94,7 @@ pub fn updateFile( // In any case we need to examine the stat of the file to determine the course of action. var source_file = f: { const dir, const sub_path = file.path.openInfo(comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer source_file.close(io); @@ -2466,7 +2466,7 @@ fn updateEmbedFileInner( var file = f: { const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer file.close(io); diff --git a/src/fmt.zig b/src/fmt.zig index 663d09e9cb..ce8a31fa4c 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -262,7 +262,7 @@ fn fmtPathFile( ) !void { const io = fmt.io; - const source_file = try dir.openFile(sub_path, .{}); + const source_file = try dir.openFile(io, sub_path, .{}); var file_closed = false; errdefer if (!file_closed) source_file.close(io); diff --git a/src/introspect.zig b/src/introspect.zig index 9b6797e7d8..d2faa9a55c 100644 --- a/src/introspect.zig +++ b/src/introspect.zig @@ -22,7 +22,7 @@ fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { // Try lib/zig/std/std.zig const lib_zig = "lib" ++ fs.path.sep_str ++ "zig"; var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir; - const file = test_zig_dir.openFile(test_index_file, .{}) catch { + const file = test_zig_dir.openFile(io, test_index_file, .{}) catch { test_zig_dir.close(io); break :zig_dir; }; @@ -32,7 +32,7 @@ fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { // Try lib/std/std.zig var test_zig_dir = base_dir.openDir("lib", .{}) catch return null; - const file = test_zig_dir.openFile(test_index_file, .{}) catch { + const file = test_zig_dir.openFile(io, test_index_file, .{}) catch { test_zig_dir.close(io); return null; }; diff --git a/src/link.zig b/src/link.zig index d5daf6fca7..073ec632c6 100644 --- a/src/link.zig +++ b/src/link.zig @@ -637,7 +637,7 @@ pub const File = struct { } } } - base.file = try emit.root_dir.handle.openFile(emit.sub_path, .{ .mode = .read_write }); + base.file = try emit.root_dir.handle.openFile(io, emit.sub_path, .{ .mode = .read_write }); }, .elf2, .coff2 => if (base.file == null) { const mf = if (base.cast(.elf2)) |elf| @@ -646,10 +646,10 @@ pub const File = struct { &coff.mf else unreachable; - mf.file = try base.emit.root_dir.handle.adaptToNewApi().openFile(io, base.emit.sub_path, .{ + mf.file = try base.emit.root_dir.handle.openFile(io, base.emit.sub_path, .{ .mode = .read_write, }); - base.file = .adaptFromNewApi(mf.file); + base.file = mf.file; try mf.ensureTotalCapacity(@intCast(mf.nodes.items[0].location().resolve(mf)[1])); }, .c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }), @@ -2007,7 +2007,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :tbd, else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }), }; @@ -2043,7 +2043,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :so, else => |e| fatal("unable to search for so library '{f}': {s}", .{ test_path, @errorName(e), @@ -2061,7 +2061,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :mingw, else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }), }; @@ -2115,7 +2115,7 @@ fn resolvePathInput( .static_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color), .shared_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color), .object => { - var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err| + var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err| fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) }); errdefer file.close(io); try resolved_inputs.append(gpa, .{ .object = .{ @@ -2127,7 +2127,7 @@ fn resolvePathInput( return null; }, .res => { - var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err| + var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err| fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) }); errdefer file.close(io); try resolved_inputs.append(gpa, .{ .res = .{ @@ -2164,7 +2164,7 @@ fn resolvePathInputLib( .static_library, .shared_library => true, else => false, }) { - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => return .no_match, else => |e| fatal("unable to search for {s} library '{f}': {s}", .{ @tagName(link_mode), std.fmt.alt(test_path, .formatEscapeChar), @errorName(e), @@ -2242,7 +2242,7 @@ fn resolvePathInputLib( return .ok; } - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => return .no_match, else => |e| fatal("unable to search for {s} library {f}: {s}", .{ @tagName(link_mode), test_path, @errorName(e), @@ -2253,7 +2253,7 @@ fn resolvePathInputLib( } pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Object { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); errdefer file.close(io); return .{ .path = path, @@ -2264,7 +2264,7 @@ pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Obje } pub fn openDso(io: Io, path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); errdefer file.close(io); return .{ .path = path, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f33e0ccdea..e1d52fb7c4 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -632,7 +632,7 @@ fn create( }; const coff = try arena.create(Coff); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(comp.io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); @@ -644,7 +644,7 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index e35444bc02..72fdb244a4 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -928,6 +928,7 @@ fn create( path: std.Build.Cache.Path, options: link.File.OpenOptions, ) !*Elf { + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(target.ofmt == .elf); const class: std.elf.CLASS = switch (target.ptrBitWidth()) { @@ -973,11 +974,11 @@ fn create( }; const elf = try arena.create(Elf); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); elf.* = .{ .base = .{ .tag = .elf2, @@ -985,7 +986,7 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, @@ -3325,12 +3326,13 @@ fn flushInputSection(elf: *Elf, isi: Node.InputSectionIndex) !void { const file_loc = isi.fileLocation(elf); if (file_loc.size == 0) return; const comp = elf.base.comp; + const io = comp.io; const gpa = comp.gpa; const ii = isi.input(elf); const path = ii.path(elf); - const file = try path.root_dir.handle.adaptToNewApi().openFile(comp.io, path.sub_path, .{}); - defer file.close(comp.io); - var fr = file.reader(comp.io, &.{}); + const file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); + defer file.close(io); + var fr = file.reader(io, &.{}); try fr.seekTo(file_loc.offset); var nw: MappedFile.Node.Writer = undefined; const si = isi.symbol(elf); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 93adc633ce..e837cc853a 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1126,7 +1126,9 @@ fn parseDependentDylibs(self: *MachO) !void { if (self.dylibs.items.len == 0) return; - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; const framework_dirs = self.framework_dirs; // TODO delete this, directories must instead be resolved by the frontend @@ -1183,7 +1185,7 @@ fn parseDependentDylibs(self: *MachO) !void { const path = if (existing_ext.len > 0) id.name[0 .. id.name.len - existing_ext.len] else id.name; for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| { test_path.clearRetainingCapacity(); - if (self.base.comp.sysroot) |root| { + if (comp.sysroot) |root| { try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext }); } else { try test_path.print("{s}{s}", .{ path, ext }); @@ -1235,7 +1237,7 @@ fn parseDependentDylibs(self: *MachO) !void { .path = Path.initCwd(full_path), .weak = is_weak, }; - const file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{}); + const file = try lib.path.root_dir.handle.openFile(io, lib.path.sub_path, .{}); const fh = try self.addFileHandle(file); const fat_arch = try self.parseFatFile(file, lib.path); const offset = if (fat_arch) |fa| fa.offset else 0; diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index d2a6c2a3ab..0f42442640 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -1,6 +1,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { - const gpa = macho_file.base.comp.gpa; - const diags = &macho_file.base.comp.link_diags; + const gpa = comp.gpa; + const io = comp.io; + const diags = &comp.link_diags; // TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list. var positionals = std.array_list.Managed(link.Input).init(gpa); @@ -19,7 +20,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat // debug info segments/sections (this is apparently by design by Apple), we copy // the *only* input file over. const path = positionals.items[0].path().?; - const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| + const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) }); const stat = in_file.stat() catch |err| return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) }); diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig index 7d4134ccaf..a61c6e764c 100644 --- a/src/link/MappedFile.zig +++ b/src/link/MappedFile.zig @@ -630,7 +630,7 @@ fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested // Resize the entire file if (ni == Node.Index.root) { try mf.ensureCapacityForSetLocation(gpa); - try Io.File.adaptFromNewApi(mf.file).setEndPos(new_size); + try mf.file.setEndPos(new_size); try mf.ensureTotalCapacity(@intCast(new_size)); ni.setLocationAssumeCapacity(mf, old_offset, new_size); return; diff --git a/src/main.zig b/src/main.zig index 3ca64881f8..b040b6c8ef 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4681,7 +4681,7 @@ fn cmdTranslateC( } else { const hex_digest = Cache.binToHex(result.digest); const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_basename }); - const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| { + const zig_file = comp.dirs.local_cache.handle.openFile(io, out_zig_path, .{}) catch |err| { const path = comp.dirs.local_cache.path orelse "."; fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{ path, @@ -6187,7 +6187,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const display_path = zig_source_path orelse ""; const source: [:0]const u8 = s: { var f = if (zig_source_path) |p| file: { - break :file fs.cwd().openFile(p, .{}) catch |err| { + break :file fs.cwd().openFile(io, p, .{}) catch |err| { fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) }); }; } else Io.File.stdin(); @@ -6494,7 +6494,7 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void { const cache_file = args[0]; - var f = fs.cwd().openFile(cache_file, .{}) catch |err| { + var f = fs.cwd().openFile(io, cache_file, .{}) catch |err| { fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) }); }; defer f.close(io); @@ -6541,7 +6541,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { const new_source_path = args[1]; const old_source = source: { - var f = fs.cwd().openFile(old_source_path, .{}) catch |err| + var f = fs.cwd().openFile(io, old_source_path, .{}) catch |err| fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6549,7 +6549,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); }; const new_source = source: { - var f = fs.cwd().openFile(new_source_path, .{}) catch |err| + var f = fs.cwd().openFile(io, new_source_path, .{}) catch |err| fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); -- cgit v1.2.3 From 9f4d40b1f9bffc4137055b8a07f042ecfa398124 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 14:17:52 -0800 Subject: update all stat() to stat(io) --- lib/compiler/objcopy.zig | 2 +- lib/compiler/resinator/utils.zig | 2 +- lib/compiler/std-docs.zig | 2 +- lib/std/Build/Cache.zig | 9 ++++---- lib/std/Build/WebServer.zig | 2 +- lib/std/Io/test.zig | 4 ++-- lib/std/dynamic_library.zig | 2 +- lib/std/fs/test.zig | 22 ++++++++++---------- lib/std/os/linux/IoUring.zig | 6 +++--- lib/std/os/linux/test.zig | 4 ++-- src/Compilation.zig | 2 +- src/Zcu.zig | 2 +- src/Zcu/PerThread.zig | 4 ++-- src/fmt.zig | 6 +++--- src/link.zig | 6 +++--- src/link/Elf.zig | 5 +++-- src/link/Elf/Archive.zig | 45 +++++++++++++++++++++------------------- src/link/Elf/Object.zig | 7 +++++-- src/link/MachO.zig | 3 ++- src/link/MachO/Archive.zig | 3 ++- src/link/MachO/Object.zig | 4 +++- src/link/MachO/relocatable.zig | 2 +- src/link/tapi.zig | 4 ++-- 23 files changed, 80 insertions(+), 68 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index e48f76a6a6..6473a10dd0 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -155,7 +155,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const input_file = Io.Dir.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); defer input_file.close(io); - const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); + const stat = input_file.stat(io) catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); var in: File.Reader = .initSize(input_file, io, &input_buffer, stat.size); diff --git a/lib/compiler/resinator/utils.zig b/lib/compiler/resinator/utils.zig index 3f03b73c54..285342ad74 100644 --- a/lib/compiler/resinator/utils.zig +++ b/lib/compiler/resinator/utils.zig @@ -38,7 +38,7 @@ pub fn openFileNotDir( errdefer file.close(io); // https://github.com/ziglang/zig/issues/5732 if (builtin.os.tag != .windows) { - const stat = try file.stat(); + const stat = try file.stat(io); if (stat.kind == .directory) return error.IsDir; diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 37e72d49e5..d5beab5f17 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -227,7 +227,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void { } var file = try entry.dir.openFile(io, entry.basename, .{}); defer file.close(io); - const stat = try file.stat(); + const stat = try file.stat(io); var file_reader: std.Io.File.Reader = .{ .file = file, .interface = std.Io.File.Reader.initInterface(&.{}), diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index fdcb2ab714..0176d71a1c 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -775,7 +775,7 @@ pub const Manifest = struct { }; defer this_file.close(io); - const actual_stat = this_file.stat() catch |err| { + const actual_stat = this_file.stat(io) catch |err| { self.diagnostic = .{ .file_stat = .{ .file_index = idx, .err = err, @@ -883,7 +883,7 @@ pub const Manifest = struct { defer file.close(io); // Save locally and also save globally (we still hold the global lock). - const stat = file.stat() catch |err| switch (err) { + const stat = file.stat(io) catch |err| switch (err) { error.Canceled => return error.Canceled, else => return true, }; @@ -909,7 +909,8 @@ pub const Manifest = struct { } fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: Io.File) !void { - const actual_stat = try handle.stat(); + const io = self.cache.io; + const actual_stat = try handle.stat(io); ch_file.stat = .{ .size = actual_stat.size, .mtime = actual_stat.mtime, @@ -1333,7 +1334,7 @@ fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { dir.deleteFile(test_out_file) catch {}; } - return (try file.stat()).mtime; + return (try file.stat(io)).mtime; } test "cache file and then recall it" { diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 9938d5e1b0..472e87b05a 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -509,7 +509,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons continue; }; defer file.close(io); - const stat = try file.stat(); + const stat = try file.stat(io); var read_buffer: [1024]u8 = undefined; var file_reader: Io.File.Reader = .initSize(file, io, &read_buffer, stat.size); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index e731dc18d7..8b9d714ee8 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -124,13 +124,13 @@ test "updateTimes" { var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true }); defer file.close(io); - const stat_old = try file.stat(); + const stat_old = try file.stat(io); // Set atime and mtime to 5s before try file.updateTimes( stat_old.atime.subDuration(.fromSeconds(5)), stat_old.mtime.subDuration(.fromSeconds(5)), ); - const stat_new = try file.stat(); + const stat_new = try file.stat(io); try expect(stat_new.atime.nanoseconds < stat_old.atime.nanoseconds); try expect(stat_new.mtime.nanoseconds < stat_old.mtime.nanoseconds); } diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index b1965b60ee..f02caa1f5b 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -226,7 +226,7 @@ pub const ElfDynLib = struct { defer posix.close(fd); const file: Io.File = .{ .handle = fd }; - const stat = try file.stat(); + const stat = try file.stat(io); const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; const page_size = std.heap.pageSize(); diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 9924cafaf8..7b12ba4271 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -350,7 +350,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { }; defer symlink.close(io); - const stat = try symlink.stat(); + const stat = try symlink.stat(io); try testing.expectEqual(File.Kind.sym_link, stat.kind); } }.impl); @@ -396,7 +396,7 @@ test "openDirAbsolute" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const tmp_ino = (try tmp.dir.stat()).inode; + const tmp_ino = (try tmp.dir.stat(io)).inode; try tmp.dir.makeDir("subdir"); const sub_path = try tmp.dir.realpathAlloc(testing.allocator, "subdir"); @@ -406,7 +406,7 @@ test "openDirAbsolute" { var tmp_sub = try fs.openDirAbsolute(sub_path, .{}); defer tmp_sub.close(io); - const sub_ino = (try tmp_sub.stat()).inode; + const sub_ino = (try tmp_sub.stat(io)).inode; { // Can open sub_path + ".." @@ -416,7 +416,7 @@ test "openDirAbsolute" { var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(io); - const ino = (try dir.stat()).inode; + const ino = (try dir.stat(io)).inode; try testing.expectEqual(tmp_ino, ino); } @@ -428,7 +428,7 @@ test "openDirAbsolute" { var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(io); - const ino = (try dir.stat()).inode; + const ino = (try dir.stat(io)).inode; try testing.expectEqual(sub_ino, ino); } @@ -440,7 +440,7 @@ test "openDirAbsolute" { var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(io); - const ino = (try dir.stat()).inode; + const ino = (try dir.stat(io)).inode; try testing.expectEqual(tmp_ino, ino); } } @@ -849,7 +849,7 @@ test "directory operations on files" { // ensure the file still exists and is a file as a sanity check file = try ctx.dir.openFile(io, test_file_name, .{}); - const stat = try file.stat(); + const stat = try file.stat(io); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); } @@ -1151,7 +1151,7 @@ test "renameAbsolute" { // ensure the file was renamed try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(io, test_file_name, .{})); file = try tmp_dir.dir.openFile(io, renamed_test_file_name, .{}); - const stat = try file.stat(); + const stat = try file.stat(io); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -2099,17 +2099,17 @@ test "chmod" { const file = try tmp.dir.createFile(io, "test_file", .{ .mode = 0o600 }); defer file.close(io); - try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat()).mode & 0o7777); + try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat(io)).mode & 0o7777); try file.chmod(0o644); - try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat()).mode & 0o7777); + try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat(io)).mode & 0o7777); try tmp.dir.makeDir("test_dir"); var dir = try tmp.dir.openDir(io, "test_dir", .{ .iterate = true }); defer dir.close(io); try dir.chmod(0o700); - try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat()).mode & 0o7777); + try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat(io)).mode & 0o7777); } test "chown" { diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index 167c6ef398..3c9a313a4a 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -2655,7 +2655,7 @@ test "fallocate" { const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); - try testing.expectEqual(@as(u64, 0), (try file.stat()).size); + try testing.expectEqual(@as(u64, 0), (try file.stat(io)).size); const len: u64 = 65536; const sqe = try ring.fallocate(0xaaaaaaaa, file.handle, 0, 0, len); @@ -2681,7 +2681,7 @@ test "fallocate" { .flags = 0, }, cqe); - try testing.expectEqual(len, (try file.stat()).size); + try testing.expectEqual(len, (try file.stat(io)).size); } test "statx" { @@ -2702,7 +2702,7 @@ test "statx" { const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); - try testing.expectEqual(@as(u64, 0), (try file.stat()).size); + try testing.expectEqual(@as(u64, 0), (try file.stat(io)).size); try file.writeAll("foobar"); diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index d7cfb4e138..cfafebe826 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -21,7 +21,7 @@ test "fallocate" { const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); - try expect((try file.stat()).size == 0); + try expect((try file.stat(io)).size == 0); const len: i64 = 65536; switch (linux.errno(linux.fallocate(file.handle, 0, 0, len))) { @@ -31,7 +31,7 @@ test "fallocate" { else => |errno| std.debug.panic("unhandled errno: {}", .{errno}), } - try expect((try file.stat()).size == len); + try expect((try file.stat(io)).size == len); } test "getpid" { diff --git a/src/Compilation.zig b/src/Compilation.zig index ef01fbb07c..3a48705880 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5367,7 +5367,7 @@ fn docsCopyModule( }); }; defer file.close(io); - const stat = try file.stat(); + const stat = try file.stat(io); var file_reader: Io.File.Reader = .initSize(file, io, &buffer, stat.size); archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| { diff --git a/src/Zcu.zig b/src/Zcu.zig index d2634a8962..1f2fe89236 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1080,7 +1080,7 @@ pub const File = struct { }; defer f.close(io); - const stat = f.stat() catch |err| switch (err) { + const stat = f.stat(io) catch |err| switch (err) { error.Streaming => { // Since `file.stat` is populated, this was previously a file stream; since it is // now not a file stream, it must have changed. diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 9a75b2096e..74196705c3 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -98,7 +98,7 @@ pub fn updateFile( }; defer source_file.close(io); - const stat = try source_file.stat(); + const stat = try source_file.stat(io); const want_local_cache = switch (file.path.root) { .none, .local_cache => true, @@ -2470,7 +2470,7 @@ fn updateEmbedFileInner( }; defer file.close(io); - const stat: Cache.File.Stat = .fromFs(try file.stat()); + const stat: Cache.File.Stat = .fromFs(try file.stat(io)); if (ef.val != .none) { const old_stat = ef.stat; diff --git a/src/fmt.zig b/src/fmt.zig index dc67619d54..1a1e5298e2 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -188,7 +188,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! error.IsDir => dir: { var dir = try Io.Dir.cwd().openDir(io, file_path, .{}); defer dir.close(io); - break :dir try dir.stat(); + break :dir try dir.stat(io); }, else => |e| return e, }; @@ -227,7 +227,7 @@ fn fmtPathDir( var dir = try parent_dir.openDir(io, parent_sub_path, .{ .iterate = true }); defer dir.close(io); - const stat = try dir.stat(); + const stat = try dir.stat(io); if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; var dir_it = dir.iterate(); @@ -266,7 +266,7 @@ fn fmtPathFile( var file_closed = false; errdefer if (!file_closed) source_file.close(io); - const stat = try source_file.stat(); + const stat = try source_file.stat(io); if (stat.kind == .directory) return error.IsDir; diff --git a/src/link.zig b/src/link.zig index 073ec632c6..06d18ec2d5 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1112,10 +1112,10 @@ pub const File = struct { fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: Io.File) anyerror!void { const comp = base.comp; + const io = comp.io; const diags = &comp.link_diags; const gpa = comp.gpa; - const io = comp.io; - const stat = try file.stat(); + const stat = try file.stat(io); const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig; const buf = try gpa.alloc(u8, size); defer gpa.free(buf); @@ -2180,7 +2180,7 @@ fn resolvePathInputLib( // Appears to be an ELF or archive file. return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query); } - const stat = file.stat() catch |err| + const stat = file.stat(io) catch |err| fatal("failed to stat {f}: {s}", .{ test_path, @errorName(err) }); const size = std.math.cast(u32, stat.size) orelse fatal("{f}: linker script too big", .{test_path}); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 53812a37ec..13a624a295 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -742,7 +742,7 @@ pub fn loadInput(self: *Elf, input: link.Input) !void { .dso_exact => @panic("TODO"), .object => |obj| try parseObject(self, obj), .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib), - .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target), + .dso => |dso| try parseDso(gpa, io, diags, dso, &self.shared_objects, &self.files, target), } } @@ -1136,6 +1136,7 @@ fn parseArchive( fn parseDso( gpa: Allocator, + io: Io, diags: *Diags, dso: link.Input.Dso, shared_objects: *std.StringArrayHashMapUnmanaged(File.Index), @@ -1147,7 +1148,7 @@ fn parseDso( const handle = dso.file; - const stat = Stat.fromFs(try handle.stat()); + const stat = Stat.fromFs(try handle.stat(io)); var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target); defer header.deinit(gpa); diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig index a9961bf8f9..0f786c1a47 100644 --- a/src/link/Elf/Archive.zig +++ b/src/link/Elf/Archive.zig @@ -1,3 +1,21 @@ +const Archive = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const elf = std.elf; +const fs = std.fs; +const log = std.log.scoped(.link); +const mem = std.mem; +const Path = std.Build.Cache.Path; +const Allocator = std.mem.Allocator; + +const Diags = @import("../../link.zig").Diags; +const Elf = @import("../Elf.zig"); +const File = @import("file.zig").File; +const Object = @import("Object.zig"); +const StringTable = @import("../StringTable.zig"); + objects: []const Object, /// '\n'-delimited strtab: []const u8, @@ -10,6 +28,7 @@ pub fn deinit(a: *Archive, gpa: Allocator) void { pub fn parse( gpa: Allocator, + io: Io, diags: *Diags, file_handles: *const std.ArrayList(File.Handle), path: Path, @@ -25,7 +44,7 @@ pub fn parse( pos += magic_buffer.len; } - const size = (try handle.stat()).size; + const size = (try handle.stat(io)).size; var objects: std.ArrayList(Object) = .empty; defer objects.deinit(gpa); @@ -120,7 +139,7 @@ pub fn setArHdr(opts: struct { @memset(mem.asBytes(&hdr), 0x20); { - var writer: std.Io.Writer = .fixed(&hdr.ar_name); + var writer: Io.Writer = .fixed(&hdr.ar_name); switch (opts.name) { .symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable, .strtab => writer.print("//", .{}) catch unreachable, @@ -133,7 +152,7 @@ pub fn setArHdr(opts: struct { hdr.ar_gid[0] = '0'; hdr.ar_mode[0] = '0'; { - var writer: std.Io.Writer = .fixed(&hdr.ar_size); + var writer: Io.Writer = .fixed(&hdr.ar_size); writer.print("{d}", .{opts.size}) catch unreachable; } hdr.ar_fmag = elf.ARFMAG.*; @@ -206,7 +225,7 @@ pub const ArSymtab = struct { ar: ArSymtab, elf_file: *Elf, - fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void { + fn default(f: Format, writer: *Io.Writer) Io.Writer.Error!void { const ar = f.ar; const elf_file = f.elf_file; for (ar.symtab.items, 0..) |entry, i| { @@ -261,7 +280,7 @@ pub const ArStrtab = struct { try writer.writeAll(ar.buffer.items); } - pub fn format(ar: ArStrtab, writer: *std.Io.Writer) std.Io.Writer.Error!void { + pub fn format(ar: ArStrtab, writer: *Io.Writer) Io.Writer.Error!void { try writer.print("{f}", .{std.ascii.hexEscape(ar.buffer.items, .lower)}); } }; @@ -277,19 +296,3 @@ pub const ArState = struct { /// Total size of the contributing object (excludes ar_hdr). size: u64 = 0, }; - -const std = @import("std"); -const assert = std.debug.assert; -const elf = std.elf; -const fs = std.fs; -const log = std.log.scoped(.link); -const mem = std.mem; -const Path = std.Build.Cache.Path; -const Allocator = std.mem.Allocator; - -const Diags = @import("../../link.zig").Diags; -const Archive = @This(); -const Elf = @import("../Elf.zig"); -const File = @import("file.zig").File; -const Object = @import("Object.zig"); -const StringTable = @import("../StringTable.zig"); diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index c0dde4176a..7dacfb3a62 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -122,13 +122,14 @@ pub fn parse( pub fn parseCommon( self: *Object, gpa: Allocator, + io: Io, diags: *Diags, path: Path, handle: Io.File, target: *const std.Target, ) !void { const offset = if (self.archive) |ar| ar.offset else 0; - const file_size = (try handle.stat()).size; + const file_size = (try handle.stat(io)).size; const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr)); defer gpa.free(header_buffer); @@ -1122,9 +1123,11 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf } pub fn updateArSize(self: *Object, elf_file: *Elf) !void { + const comp = elf_file.base.comp; + const io = comp.io; self.output_ar_state.size = if (self.archive) |ar| ar.size else size: { const handle = elf_file.fileHandle(self.file_handle); - break :size (try handle.stat()).size; + break :size (try handle.stat(io)).size; }; } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 0f6127e10e..78e035e2ad 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -925,6 +925,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u const comp = self.base.comp; const gpa = comp.gpa; + const io = comp.io; const abs_path = try std.fs.path.resolvePosix(gpa, &.{ comp.dirs.cwd, @@ -934,7 +935,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u errdefer gpa.free(abs_path); const file = self.getFileHandle(handle_index); - const stat = try file.stat(); + const stat = try file.stat(io); const mtime = stat.mtime.toSeconds(); const index: File.Index = @intCast(try self.files.addOne(gpa)); self.files.set(index, .{ .object = .{ diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig index d1962412c4..122a408533 100644 --- a/src/link/MachO/Archive.zig +++ b/src/link/MachO/Archive.zig @@ -6,6 +6,7 @@ pub fn deinit(self: *Archive, allocator: Allocator) void { pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File.HandleIndex, fat_arch: ?fat.Arch) !void { const comp = macho_file.base.comp; + const io = comp.io; const gpa = comp.gpa; const diags = &comp.link_diags; @@ -14,7 +15,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File const handle = macho_file.getFileHandle(handle_index); const offset = if (fat_arch) |ar| ar.offset else 0; - const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat()).size; + const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat(io)).size; var pos: usize = offset + SARMAG; while (true) { diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 5fc77fe763..1a1799f551 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -1689,9 +1689,11 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, macho_file: *M } pub fn updateArSize(self: *Object, macho_file: *MachO) !void { + const comp = macho_file.base.comp; + const io = comp.io; self.output_ar_state.size = if (self.in_archive) |ar| ar.size else size: { const file = macho_file.getFileHandle(self.file_handle); - break :size (try file.stat()).size; + break :size (try file.stat(io)).size; }; } diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index 0f42442640..e9f78a8ef2 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -22,7 +22,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat const path = positionals.items[0].path().?; const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) }); - const stat = in_file.stat() catch |err| + const stat = in_file.stat(io) catch |err| return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) }); const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err| return diags.fail("failed to copy range of file {f}: {s}", .{ path, @errorName(err) }); diff --git a/src/link/tapi.zig b/src/link/tapi.zig index fff25b7544..046f43eae2 100644 --- a/src/link/tapi.zig +++ b/src/link/tapi.zig @@ -139,9 +139,9 @@ pub const LibStub = struct { /// Typed contents of the tbd file. inner: []Tbd, - pub fn loadFromFile(allocator: Allocator, file: Io.File) TapiError!LibStub { + pub fn loadFromFile(allocator: Allocator, io: Io, file: Io.File) TapiError!LibStub { const filesize = blk: { - const stat = file.stat() catch break :blk std.math.maxInt(u32); + const stat = file.stat(io) catch break :blk std.math.maxInt(u32); break :blk @min(stat.size, std.math.maxInt(u32)); }; const source = try allocator.alloc(u8, filesize); -- cgit v1.2.3 From 6f46570958af8ae27308eb4a9470e05f33aaa522 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 15:23:18 -0800 Subject: link.MachO: update parallel hasher to std.Io --- lib/std/Build/Cache.zig | 66 ++++++++++++++++++---------------------- lib/std/Build/WebServer.zig | 6 ++-- lib/std/Io.zig | 4 +-- lib/std/Io/File.zig | 34 +++++++++++++++++++-- lib/std/fs/test.zig | 14 ++++----- src/link/MachO/CodeSignature.zig | 17 ++++++----- src/link/MachO/hasher.zig | 24 ++++++--------- src/link/MachO/uuid.zig | 16 +++++----- 8 files changed, 100 insertions(+), 81 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 0176d71a1c..dab1926f53 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -800,7 +800,7 @@ pub const Manifest = struct { } var actual_digest: BinDigest = undefined; - hashFile(this_file, &actual_digest) catch |err| { + hashFile(io, this_file, &actual_digest) catch |err| { self.diagnostic = .{ .file_read = .{ .file_index = idx, .err = err, @@ -908,9 +908,11 @@ pub const Manifest = struct { } } - fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: Io.File) !void { + fn populateFileHashHandle(self: *Manifest, ch_file: *File, io_file: Io.File) !void { const io = self.cache.io; - const actual_stat = try handle.stat(io); + const gpa = self.cache.gpa; + + const actual_stat = try io_file.stat(io); ch_file.stat = .{ .size = actual_stat.size, .mtime = actual_stat.mtime, @@ -924,19 +926,17 @@ pub const Manifest = struct { } if (ch_file.max_file_size) |max_file_size| { - if (ch_file.stat.size > max_file_size) { - return error.FileTooBig; - } + if (ch_file.stat.size > max_file_size) return error.FileTooBig; - const contents = try self.cache.gpa.alloc(u8, @as(usize, @intCast(ch_file.stat.size))); - errdefer self.cache.gpa.free(contents); + // Hash while reading from disk, to keep the contents in the cpu + // cache while doing hashing. + const contents = try gpa.alloc(u8, @intCast(ch_file.stat.size)); + errdefer gpa.free(contents); - // Hash while reading from disk, to keep the contents in the cpu cache while - // doing hashing. var hasher = hasher_init; var off: usize = 0; while (true) { - const bytes_read = try handle.pread(contents[off..], off); + const bytes_read = try io_file.readPositional(io, &.{contents[off..]}, off); if (bytes_read == 0) break; hasher.update(contents[off..][0..bytes_read]); off += bytes_read; @@ -945,7 +945,7 @@ pub const Manifest = struct { ch_file.contents = contents; } else { - try hashFile(handle, &ch_file.bin_digest); + try hashFile(io, io_file, &ch_file.bin_digest); } self.hash.hasher.update(&ch_file.bin_digest); @@ -1169,13 +1169,11 @@ pub const Manifest = struct { fn downgradeToSharedLock(self: *Manifest) !void { if (!self.have_exclusive_lock) return; + const io = self.cache.io; - // WASI does not currently support flock, so we bypass it here. - // TODO: If/when flock is supported on WASI, this check should be removed. - // See https://github.com/WebAssembly/wasi-filesystem/issues/2 - if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) { + if (std.process.can_spawn or !builtin.single_threaded) { const manifest_file = self.manifest_file.?; - try manifest_file.downgradeLock(); + try manifest_file.downgradeLock(io); } self.have_exclusive_lock = false; @@ -1184,16 +1182,14 @@ pub const Manifest = struct { fn upgradeToExclusiveLock(self: *Manifest) error{CacheCheckFailed}!bool { if (self.have_exclusive_lock) return false; assert(self.manifest_file != null); + const io = self.cache.io; - // WASI does not currently support flock, so we bypass it here. - // TODO: If/when flock is supported on WASI, this check should be removed. - // See https://github.com/WebAssembly/wasi-filesystem/issues/2 - if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) { + if (std.process.can_spawn or !builtin.single_threaded) { const manifest_file = self.manifest_file.?; // Here we intentionally have a period where the lock is released, in case there are // other processes holding a shared lock. - manifest_file.unlock(); - manifest_file.lock(.exclusive) catch |err| { + manifest_file.unlock(io); + manifest_file.lock(io, .exclusive) catch |err| { self.diagnostic = .{ .manifest_lock = err }; return error.CacheCheckFailed; }; @@ -1206,12 +1202,8 @@ pub const Manifest = struct { /// The `Manifest` remains safe to deinit. /// Don't forget to call `writeManifest` before this! pub fn toOwnedLock(self: *Manifest) Lock { - const lock: Lock = .{ - .manifest_file = self.manifest_file.?, - }; - - self.manifest_file = null; - return lock; + defer self.manifest_file = null; + return .{ .manifest_file = self.manifest_file.? }; } /// Releases the manifest file and frees any memory the Manifest was using. @@ -1223,7 +1215,7 @@ pub const Manifest = struct { if (self.manifest_file) |file| { if (builtin.os.tag == .windows) { // See Lock.release for why this is required on Windows - file.unlock(); + file.unlock(io); } file.close(io); @@ -1308,15 +1300,15 @@ pub fn writeSmallFile(dir: Io.Dir, sub_path: []const u8, data: []const u8) !void } } -fn hashFile(file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.PReadError!void { - var buf: [1024]u8 = undefined; +fn hashFile(io: Io, file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.ReadPositionalError!void { + var buffer: [2048]u8 = undefined; var hasher = hasher_init; - var off: u64 = 0; + var offset: u64 = 0; while (true) { - const bytes_read = try file.pread(&buf, off); - if (bytes_read == 0) break; - hasher.update(buf[0..bytes_read]); - off += bytes_read; + const n = try file.readPositional(io, &.{&buffer}, offset); + if (n == 0) break; + hasher.update(buffer[0..n]); + offset += n; } hasher.final(bin_digest); } diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 472e87b05a..162d17f070 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -218,9 +218,9 @@ pub fn finishBuild(ws: *WebServer, opts: struct { else => {}, } if (@bitSizeOf(usize) != 64) { - // Current implementation depends on posix.mmap()'s second parameter, `length: usize`, - // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case - // on 32-bit platforms. + // Current implementation depends on posix.mmap()'s second + // parameter, `length: usize`, being compatible with file system's + // u64 return value. This is not the case on 32-bit platforms. // Affects or affected by issues #5185, #22523, and #22464. std.process.fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)}); } diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 17fb75fe54..9d6dcef615 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -692,9 +692,9 @@ pub const VTable = struct { fileWriteFileStreaming: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit) File.Writer.WriteFileError!usize, fileWriteFilePositional: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit, offset: u64) File.WriteFilePositionalError!usize, /// Returns 0 on end of stream. - fileReadStreaming: *const fn (?*anyopaque, File, data: [][]u8) File.Reader.Error!usize, + fileReadStreaming: *const fn (?*anyopaque, File, data: []const []u8) File.Reader.Error!usize, /// Returns 0 on end of stream. - fileReadPositional: *const fn (?*anyopaque, File, data: [][]u8, offset: u64) File.ReadPositionalError!usize, + fileReadPositional: *const fn (?*anyopaque, File, data: []const []u8, offset: u64) File.ReadPositionalError!usize, fileSeekBy: *const fn (?*anyopaque, File, relative_offset: i64) File.SeekError!void, fileSeekTo: *const fn (?*anyopaque, File, absolute_offset: u64) File.SeekError!void, fileSync: *const fn (?*anyopaque, File) File.SyncError!void, diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index a9b4775772..5e89025478 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -466,13 +466,21 @@ pub fn setTimestampsNow(file: File, io: Io) SetTimestampsError!void { pub const ReadPositionalError = Reader.Error || error{Unseekable}; -pub fn readPositional(file: File, io: Io, buffer: [][]u8, offset: u64) ReadPositionalError!usize { +/// Returns 0 on end of stream. +/// +/// See also: +/// * `reader` +pub fn readPositional(file: File, io: Io, buffer: []const []u8, offset: u64) ReadPositionalError!usize { return io.vtable.fileReadPositional(io.userdata, file, buffer, offset); } pub const WritePositionalError = Writer.Error || error{Unseekable}; -pub fn writePositional(file: File, io: Io, buffer: [][]const u8, offset: u64) WritePositionalError!usize { +/// Returns 0 on end of stream. +/// +/// See also: +/// * `writer` +pub fn writePositional(file: File, io: Io, buffer: []const []const u8, offset: u64) WritePositionalError!usize { return io.vtable.fileWritePositional(io.userdata, file, buffer, offset); } @@ -501,13 +509,35 @@ pub const WriteFilePositionalError = Writer.WriteFileError || error{Unseekable}; /// /// Positional is more threadsafe, since the global seek position is not /// affected. +/// +/// See also: +/// * `readerStreaming` pub fn reader(file: File, io: Io, buffer: []u8) Reader { return .init(file, io, buffer); } +/// Equivalent to creating a positional reader and reading multiple times to fill `buffer`. +/// +/// Returns number of bytes read into `buffer`. If less than `buffer.len`, end of file occurred. +/// +/// See also: +/// * `reader` +pub fn readPositionalAll(file: File, io: Io, buffer: []u8, offset: u64) ReadPositionalError!usize { + var index: usize = 0; + while (index != buffer.len) { + const amt = try file.readPositional(io, &.{buffer[index..]}, offset + index); + if (amt == 0) break; + index += amt; + } + return index; +} + /// Positional is more threadsafe, since the global seek position is not /// affected, but when such syscalls are not available, preemptively /// initializing in streaming mode skips a failed syscall. +/// +/// See also: +/// * `reader` pub fn readerStreaming(file: File, io: Io, buffer: []u8) Reader { return .initStreaming(file, io, buffer); } diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 7b12ba4271..e044a97620 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -1455,7 +1455,7 @@ test "writev, readv" { try writer.interface.writeVecAll(&write_vecs); try writer.interface.flush(); - try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.getEndPos()); + try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.length(io)); var reader = writer.moveToReader(io); try reader.seekTo(0); @@ -1486,7 +1486,7 @@ test "pwritev, preadv" { try writer.seekTo(16); try writer.interface.writeVecAll(&lines); try writer.interface.flush(); - try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.getEndPos()); + try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.length(io)); var reader = writer.moveToReader(io); try reader.seekTo(16); @@ -1511,13 +1511,13 @@ test "setEndPos" { const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write }); defer f.close(io); - const initial_size = try f.getEndPos(); + const initial_size = try f.length(io); var buffer: [32]u8 = undefined; var reader = f.reader(io, &.{}); { try f.setEndPos(initial_size); - try testing.expectEqual(initial_size, try f.getEndPos()); + try testing.expectEqual(initial_size, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(initial_size, try reader.interface.readSliceShort(&buffer)); try testing.expectEqualStrings("ninebytes", buffer[0..@intCast(initial_size)]); @@ -1526,7 +1526,7 @@ test "setEndPos" { { const larger = initial_size + 4; try f.setEndPos(larger); - try testing.expectEqual(larger, try f.getEndPos()); + try testing.expectEqual(larger, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(larger, try reader.interface.readSliceShort(&buffer)); try testing.expectEqualStrings("ninebytes\x00\x00\x00\x00", buffer[0..@intCast(larger)]); @@ -1535,14 +1535,14 @@ test "setEndPos" { { const smaller = initial_size - 5; try f.setEndPos(smaller); - try testing.expectEqual(smaller, try f.getEndPos()); + try testing.expectEqual(smaller, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(smaller, try reader.interface.readSliceShort(&buffer)); try testing.expectEqualStrings("nine", buffer[0..@intCast(smaller)]); } try f.setEndPos(0); - try testing.expectEqual(0, try f.getEndPos()); + try testing.expectEqual(0, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(0, try reader.interface.readSliceShort(&buffer)); } diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 814faf234a..ec516d4af0 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -12,7 +12,7 @@ const Sha256 = std.crypto.hash.sha2.Sha256; const Allocator = std.mem.Allocator; const trace = @import("../../tracy.zig").trace; -const Hasher = @import("hasher.zig").ParallelHasher; +const ParallelHasher = @import("hasher.zig").ParallelHasher; const MachO = @import("../MachO.zig"); const hash_size = Sha256.digest_length; @@ -268,7 +268,9 @@ pub fn writeAdhocSignature( const tracy = trace(@src()); defer tracy.end(); - const allocator = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; var header: macho.SuperBlob = .{ .magic = macho.CSMAGIC_EMBEDDED_SIGNATURE, @@ -276,7 +278,7 @@ pub fn writeAdhocSignature( .count = 0, }; - var blobs = std.array_list.Managed(Blob).init(allocator); + var blobs = std.array_list.Managed(Blob).init(gpa); defer blobs.deinit(); self.code_directory.inner.execSegBase = opts.exec_seg_base; @@ -286,13 +288,12 @@ pub fn writeAdhocSignature( const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size)); - try self.code_directory.code_slots.ensureTotalCapacityPrecise(allocator, total_pages); + try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; self.code_directory.inner.nCodeSlots = total_pages; // Calculate hash for each page (in file) and write it to the buffer - var hasher = Hasher(Sha256){ .allocator = allocator, .io = macho_file.base.comp.io }; - try hasher.hash(opts.file, self.code_directory.code_slots.items, .{ + try ParallelHasher(Sha256).hash(gpa, io, opts.file, self.code_directory.code_slots.items, .{ .chunk_size = self.page_size, .max_file_size = opts.file_size, }); @@ -304,7 +305,7 @@ pub fn writeAdhocSignature( var hash: [hash_size]u8 = undefined; if (self.requirements) |*req| { - var a: std.Io.Writer.Allocating = .init(allocator); + var a: std.Io.Writer.Allocating = .init(gpa); defer a.deinit(); try req.write(&a.writer); Sha256.hash(a.written(), &hash, .{}); @@ -316,7 +317,7 @@ pub fn writeAdhocSignature( } if (self.entitlements) |*ents| { - var a: std.Io.Writer.Allocating = .init(allocator); + var a: std.Io.Writer.Allocating = .init(gpa); defer a.deinit(); try ents.write(&a.writer); Sha256.hash(a.written(), &hash, .{}); diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig index 8cf53071c8..2e0129d240 100644 --- a/src/link/MachO/hasher.zig +++ b/src/link/MachO/hasher.zig @@ -1,5 +1,6 @@ const std = @import("std"); const Io = std.Io; +const assert = std.debug.assert; const Allocator = std.mem.Allocator; const trace = @import("../../tracy.zig").trace; @@ -8,20 +9,15 @@ pub fn ParallelHasher(comptime Hasher: type) type { const hash_size = Hasher.digest_length; return struct { - allocator: Allocator, - io: std.Io, - - pub fn hash(self: Self, file: Io.File, out: [][hash_size]u8, opts: struct { + pub fn hash(self: Self, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct { chunk_size: u64 = 0x4000, max_file_size: ?u64 = null, }) !void { const tracy = trace(@src()); defer tracy.end(); - const io = self.io; - const file_size = blk: { - const file_size = opts.max_file_size orelse try file.getEndPos(); + const file_size = opts.max_file_size orelse try file.length(io); break :blk std.math.cast(usize, file_size) orelse return error.Overflow; }; const chunk_size = std.math.cast(usize, opts.chunk_size) orelse return error.Overflow; @@ -29,12 +25,12 @@ pub fn ParallelHasher(comptime Hasher: type) type { const buffer = try self.allocator.alloc(u8, chunk_size * out.len); defer self.allocator.free(buffer); - const results = try self.allocator.alloc(Io.File.PReadError!usize, out.len); + const results = try self.allocator.alloc(Io.File.ReadPositionalError!usize, out.len); defer self.allocator.free(results); { - var group: std.Io.Group = .init; - errdefer group.cancel(io); + var group: Io.Group = .init; + defer group.cancel(io); for (out, results, 0..) |*out_buf, *result, i| { const fstart = i * chunk_size; @@ -42,7 +38,7 @@ pub fn ParallelHasher(comptime Hasher: type) type { file_size - fstart else chunk_size; - group.async(io, worker, .{ + group.async(worker, .{ file, fstart, buffer[fstart..][0..fsize], @@ -61,11 +57,9 @@ pub fn ParallelHasher(comptime Hasher: type) type { fstart: usize, buffer: []u8, out: *[hash_size]u8, - err: *Io.File.PReadError!usize, + err: *Io.File.ReadPositionalError!usize, ) void { - const tracy = trace(@src()); - defer tracy.end(); - err.* = file.preadAll(buffer, fstart); + err.* = file.readPositionalAll(buffer, fstart); Hasher.hash(buffer, out, .{}); } diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig index 4d8eac7523..a75799d01e 100644 --- a/src/link/MachO/uuid.zig +++ b/src/link/MachO/uuid.zig @@ -4,7 +4,7 @@ const Md5 = std.crypto.hash.Md5; const trace = @import("../../tracy.zig").trace; const Compilation = @import("../../Compilation.zig"); -const Hasher = @import("hasher.zig").ParallelHasher; +const ParallelHasher = @import("hasher.zig").ParallelHasher; /// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce /// the final digest. @@ -16,21 +16,23 @@ pub fn calcUuid(comp: *const Compilation, file: Io.File, file_size: u64, out: *[ const tracy = trace(@src()); defer tracy.end(); + const gpa = comp.gpa; + const io = comp.io; + const chunk_size: usize = 1024 * 1024; const num_chunks: usize = std.math.cast(usize, @divTrunc(file_size, chunk_size)) orelse return error.Overflow; const actual_num_chunks = if (@rem(file_size, chunk_size) > 0) num_chunks + 1 else num_chunks; - const hashes = try comp.gpa.alloc([Md5.digest_length]u8, actual_num_chunks); - defer comp.gpa.free(hashes); + const hashes = try gpa.alloc([Md5.digest_length]u8, actual_num_chunks); + defer gpa.free(hashes); - var hasher = Hasher(Md5){ .allocator = comp.gpa, .io = comp.io }; - try hasher.hash(file, hashes, .{ + try ParallelHasher(Md5).hash(gpa, io, file, hashes, .{ .chunk_size = chunk_size, .max_file_size = file_size, }); - const final_buffer = try comp.gpa.alloc(u8, actual_num_chunks * Md5.digest_length); - defer comp.gpa.free(final_buffer); + const final_buffer = try gpa.alloc(u8, actual_num_chunks * Md5.digest_length); + defer gpa.free(final_buffer); for (hashes, 0..) |hash, i| { @memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash); -- cgit v1.2.3 From 4218344dd3178f2fd3d9d00e9ff6895ee344df6d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 17:21:52 -0800 Subject: std.Build.Cache: remove readSmallFile and writeSmallFile These were to support optimizations involving detecting when to avoid calling into LLD, which are no longer implemented. --- lib/compiler/build_runner.zig | 2 +- lib/compiler/reduce.zig | 12 +++++--- lib/compiler/resinator/main.zig | 2 +- lib/compiler/std-docs.zig | 2 +- lib/std/Build/Cache.zig | 36 ++++-------------------- lib/std/Build/Step/ConfigHeader.zig | 2 +- lib/std/Build/Step/Run.zig | 2 +- lib/std/Build/Step/UpdateSourceFiles.zig | 2 +- lib/std/Build/Step/WriteFile.zig | 2 +- lib/std/Build/WebServer.zig | 2 +- lib/std/debug.zig | 4 +-- lib/std/fs/test.zig | 48 +++++++++++++++++++------------- lib/std/posix/test.zig | 2 +- src/Compilation.zig | 4 +-- src/libs/freebsd.zig | 4 +-- src/libs/glibc.zig | 4 +-- src/libs/netbsd.zig | 2 +- src/main.zig | 6 ++-- 18 files changed, 63 insertions(+), 75 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 36c73e96eb..9150d84470 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -459,7 +459,7 @@ pub fn main() !void { } const s = std.fs.path.sep_str; const tmp_sub_path = "tmp" ++ s ++ (output_tmp_nonce orelse fatal("missing -Z arg", .{})); - local_cache_directory.handle.writeFile(.{ + local_cache_directory.handle.writeFile(io, .{ .sub_path = tmp_sub_path, .data = buffer.items, .flags = .{ .exclusive = true }, diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig index d3f33ad81a..0bfa1902ab 100644 --- a/lib/compiler/reduce.zig +++ b/lib/compiler/reduce.zig @@ -55,6 +55,10 @@ pub fn main() !void { var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); + var threaded: std.Io.Threaded = .init(gpa); + defer threaded.deinit(); + const io = threaded.io(); + const args = try std.process.argsAlloc(arena); var opt_checker_path: ?[]const u8 = null; @@ -233,12 +237,12 @@ pub fn main() !void { } } - try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(io, .{ .sub_path = root_source_file_path, .data = rendered.written() }); // std.debug.print("trying this code:\n{s}\n", .{rendered.items}); const interestingness = try runCheck(arena, interestingness_argv.items); - std.debug.print("{d} random transformations: {s}. {d}/{d}\n", .{ - subset_size, @tagName(interestingness), start_index, transformations.items.len, + std.debug.print("{d} random transformations: {t}. {d}/{d}\n", .{ + subset_size, interestingness, start_index, transformations.items.len, }); switch (interestingness) { .interesting => { @@ -274,7 +278,7 @@ pub fn main() !void { fixups.clearRetainingCapacity(); rendered.clearRetainingCapacity(); try tree.render(gpa, &rendered.writer, fixups); - try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(io, .{ .sub_path = root_source_file_path, .data = rendered.written() }); return std.process.cleanExit(); } diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index 1a6ef5eea3..afe1dcbe91 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -212,7 +212,7 @@ pub fn main() !void { try output_file.writeAll(full_input); }, .filename => |output_filename| { - try Io.Dir.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); + try Io.Dir.cwd().writeFile(io, .{ .sub_path = output_filename, .data = full_input }); }, } return; diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index f1382e6eae..8f02f05958 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -233,7 +233,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void { .interface = std.Io.File.Reader.initInterface(&.{}), .size = stat.size, }; - try archiver.writeFile(entry.path, &file_reader, stat.mtime); + try archiver.writeFile(io, entry.path, &file_reader, stat.mtime); } { diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index dab1926f53..f7c4d729bc 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1276,30 +1276,6 @@ pub const Manifest = struct { } }; -/// On operating systems that support symlinks, does a readlink. On other operating systems, -/// uses the file contents. Windows supports symlinks but only with elevated privileges, so -/// it is treated as not supporting symlinks. -pub fn readSmallFile(dir: Io.Dir, sub_path: []const u8, buffer: []u8) ![]u8 { - if (builtin.os.tag == .windows) { - return dir.readFile(sub_path, buffer); - } else { - return dir.readLink(sub_path, buffer); - } -} - -/// On operating systems that support symlinks, does a symlink. On other operating systems, -/// uses the file contents. Windows supports symlinks but only with elevated privileges, so -/// it is treated as not supporting symlinks. -/// `data` must be a valid UTF-8 encoded file path and 255 bytes or fewer. -pub fn writeSmallFile(dir: Io.Dir, sub_path: []const u8, data: []const u8) !void { - assert(data.len <= 255); - if (builtin.os.tag == .windows) { - return dir.writeFile(.{ .sub_path = sub_path, .data = data }); - } else { - return dir.symLink(data, sub_path, .{}); - } -} - fn hashFile(io: Io, file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.ReadPositionalError!void { var buffer: [2048]u8 = undefined; var hasher = hasher_init; @@ -1338,7 +1314,7 @@ test "cache file and then recall it" { const temp_file = "test.txt"; const temp_manifest_dir = "temp_manifest_dir"; - try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = "Hello, world!\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = "Hello, world!\n" }); // Wait for file timestamps to tick const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir); @@ -1404,7 +1380,7 @@ test "check that changing a file makes cache fail" { const original_temp_file_contents = "Hello, world!\n"; const updated_temp_file_contents = "Hello, world; but updated!\n"; - try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = original_temp_file_contents }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = original_temp_file_contents }); // Wait for file timestamps to tick const initial_time = try testGetCurrentFileTimestamp(tmp.dir); @@ -1441,7 +1417,7 @@ test "check that changing a file makes cache fail" { try ch.writeManifest(); } - try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = updated_temp_file_contents }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = updated_temp_file_contents }); { var ch = cache.obtain(); @@ -1521,8 +1497,8 @@ test "Manifest with files added after initial hash work" { const temp_file2 = "cache_hash_post_file_test2.txt"; const temp_manifest_dir = "cache_hash_post_file_manifest_dir"; - try tmp.dir.writeFile(.{ .sub_path = temp_file1, .data = "Hello, world!\n" }); - try tmp.dir.writeFile(.{ .sub_path = temp_file2, .data = "Hello world the second!\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file1, .data = "Hello, world!\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second!\n" }); // Wait for file timestamps to tick const initial_time = try testGetCurrentFileTimestamp(tmp.dir); @@ -1573,7 +1549,7 @@ test "Manifest with files added after initial hash work" { try testing.expect(mem.eql(u8, &digest1, &digest2)); // Modify the file added after initial hash - try tmp.dir.writeFile(.{ .sub_path = temp_file2, .data = "Hello world the second, updated\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second, updated\n" }); // Wait for file timestamps to tick const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index f377959610..589110d4c4 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -264,7 +264,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { }); }; - b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output }) catch |err| { + b.cache_root.handle.writeFile(io, .{ .sub_path = sub_path, .data = output }) catch |err| { return step.fail("unable to write file '{f}{s}': {s}", .{ b.cache_root, sub_path, @errorName(err), }); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index f6b29635c1..a1618beb02 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1482,7 +1482,7 @@ fn runCommand( .leading => mem.trimStart(u8, stream.bytes.?, &std.ascii.whitespace), .trailing => mem.trimEnd(u8, stream.bytes.?, &std.ascii.whitespace), }; - b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = data }) catch |err| { + b.cache_root.handle.writeFile(io, .{ .sub_path = sub_path, .data = data }) catch |err| { return step.fail("unable to write file '{f}{s}': {s}", .{ b.cache_root, sub_path, @errorName(err), }); diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig index 44c6ae1ed4..eb8a6a85dd 100644 --- a/lib/std/Build/Step/UpdateSourceFiles.zig +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -84,7 +84,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { } switch (output_source_file.contents) { .bytes => |bytes| { - b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { + b.build_root.handle.writeFile(io, .{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { return step.fail("unable to write file '{f}{s}': {t}", .{ b.build_root, output_source_file.sub_path, err, }); diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 94b04b4212..3d712fa1d4 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -273,7 +273,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { } switch (file.contents) { .bytes => |bytes| { - cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| { + cache_dir.writeFile(io, .{ .sub_path = file.sub_path, .data = bytes }) catch |err| { return step.fail("unable to write file '{f}{s}{c}{s}': {t}", .{ b.cache_root, cache_path, fs.path.sep, file.sub_path, err, }); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 162d17f070..ed07d04d57 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -523,7 +523,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa); break :cwd cached_cwd_path.?; }; - try archiver.writeFile(path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); + try archiver.writeFile(io, path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); } // intentionally not calling `archiver.finishPedantically` diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 39207f938d..cb79bb7855 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1243,7 +1243,7 @@ test printLineFromFile { { const path = try join(gpa, &.{ test_dir_path, "one_line.zig" }); defer gpa.free(path); - try test_dir.dir.writeFile(.{ .sub_path = "one_line.zig", .data = "no new lines in this file, but one is printed anyway" }); + try test_dir.dir.writeFile(io, .{ .sub_path = "one_line.zig", .data = "no new lines in this file, but one is printed anyway" }); try expectError(error.EndOfFile, printLineFromFile(io, output_stream, .{ .file_name = path, .line = 2, .column = 0 })); @@ -1254,7 +1254,7 @@ test printLineFromFile { { const path = try fs.path.join(gpa, &.{ test_dir_path, "three_lines.zig" }); defer gpa.free(path); - try test_dir.dir.writeFile(.{ + try test_dir.dir.writeFile(io, .{ .sub_path = "three_lines.zig", .data = \\1 diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 59bacff2d0..59e0990eb0 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -184,7 +184,7 @@ test "Dir.readLink" { fn impl(ctx: *TestContext) !void { // Create some targets const file_target_path = try ctx.transformPath("file.txt"); - try ctx.dir.writeFile(.{ .sub_path = file_target_path, .data = "nonsense" }); + try ctx.dir.writeFile(io, .{ .sub_path = file_target_path, .data = "nonsense" }); const dir_target_path = try ctx.transformPath("subdir"); try ctx.dir.makeDir(dir_target_path); @@ -487,11 +487,13 @@ test "readLinkAbsolute" { if (native_os == .wasi) return error.SkipZigTest; if (native_os == .openbsd) return error.SkipZigTest; + const io = testing.io; + var tmp = tmpDir(.{}); defer tmp.cleanup(); // Create some targets - try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = "nonsense" }); + try tmp.dir.writeFile(io, .{ .sub_path = "file.txt", .data = "nonsense" }); try tmp.dir.makeDir("subdir"); // Get base abs path @@ -708,6 +710,7 @@ test "Dir.realpath smoke test" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const allocator = ctx.arena.allocator(); const test_file_path = try ctx.transformPath("test_file"); const test_dir_path = try ctx.transformPath("test_dir"); @@ -720,7 +723,7 @@ test "Dir.realpath smoke test" { try testing.expectError(error.FileNotFound, ctx.dir.realpath(test_dir_path, &buf)); // Now create the file and dir - try ctx.dir.writeFile(.{ .sub_path = test_file_path, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = test_file_path, .data = "" }); try ctx.dir.makeDir(test_dir_path); const base_path = try ctx.transformPath("."); @@ -803,11 +806,12 @@ test "readFileAlloc" { test "Dir.statFile" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const test_file_name = try ctx.transformPath("test_file"); try testing.expectError(error.FileNotFound, ctx.dir.statFile(test_file_name)); - try ctx.dir.writeFile(.{ .sub_path = test_file_name, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = test_file_name, .data = "" }); const stat = try ctx.dir.statFile(test_file_name); try testing.expectEqual(File.Kind.file, stat.kind); @@ -925,6 +929,7 @@ test "makeOpenPath parent dirs do not exist" { test "deleteDir" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const test_dir_path = try ctx.transformPath("test_dir"); const test_file_path = try ctx.transformPath("test_dir" ++ fs.path.sep_str ++ "test_file"); @@ -933,7 +938,7 @@ test "deleteDir" { // deleting a non-empty directory try ctx.dir.makeDir(test_dir_path); - try ctx.dir.writeFile(.{ .sub_path = test_file_path, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = test_file_path, .data = "" }); try testing.expectError(error.DirNotEmpty, ctx.dir.deleteDir(test_dir_path)); // deleting an empty directory @@ -1217,7 +1222,7 @@ test "deleteTree on a symlink" { defer tmp.cleanup(); // Symlink to a file - try tmp.dir.writeFile(.{ .sub_path = "file", .data = "" }); + try tmp.dir.writeFile(io, .{ .sub_path = "file", .data = "" }); try setupSymlink(tmp.dir, "file", "filelink", .{}); try tmp.dir.deleteTree("filelink"); @@ -1241,11 +1246,11 @@ test "makePath, put some files in it, deleteTree" { const dir_path = try ctx.transformPath("os_test_tmp"); try ctx.dir.makePath(io, try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }), .data = "nonsense", }); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }), .data = "blah", }); @@ -1264,11 +1269,11 @@ test "makePath, put some files in it, deleteTreeMinStackSize" { const dir_path = try ctx.transformPath("os_test_tmp"); try ctx.dir.makePath(io, try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }), .data = "nonsense", }); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }), .data = "blah", }); @@ -1298,7 +1303,7 @@ test "makePath but sub_path contains pre-existing file" { defer tmp.cleanup(); try tmp.dir.makeDir("foo"); - try tmp.dir.writeFile(.{ .sub_path = "foo/bar", .data = "" }); + try tmp.dir.writeFile(io, .{ .sub_path = "foo/bar", .data = "" }); try testing.expectError(error.NotDir, tmp.dir.makePath(io, "foo/bar/baz")); } @@ -1400,7 +1405,7 @@ fn testFilenameLimits(io: Io, iterable_dir: Dir, maxed_filename: []const u8) !vo var maxed_dir = try iterable_dir.makeOpenPath(maxed_filename, .{}); defer maxed_dir.close(io); - try maxed_dir.writeFile(.{ .sub_path = maxed_filename, .data = "" }); + try maxed_dir.writeFile(io, .{ .sub_path = maxed_filename, .data = "" }); var walker = try iterable_dir.walk(testing.allocator); defer walker.deinit(); @@ -1513,7 +1518,7 @@ test "setEndPos" { defer tmp.cleanup(); const file_name = "afile.txt"; - try tmp.dir.writeFile(.{ .sub_path = file_name, .data = "ninebytes" }); + try tmp.dir.writeFile(io, .{ .sub_path = file_name, .data = "ninebytes" }); const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write }); defer f.close(io); @@ -1563,7 +1568,7 @@ test "access file" { try ctx.dir.makePath(io, dir_path); try testing.expectError(error.FileNotFound, ctx.dir.access(io, file_path, .{})); - try ctx.dir.writeFile(.{ .sub_path = file_path, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = file_path, .data = "" }); try ctx.dir.access(io, file_path, .{}); try ctx.dir.deleteTree(dir_path); } @@ -1659,12 +1664,13 @@ test "sendfile with buffered data" { test "copyFile" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP"; const src_file = try ctx.transformPath("tmp_test_copy_file.txt"); const dest_file = try ctx.transformPath("tmp_test_copy_file2.txt"); const dest_file2 = try ctx.transformPath("tmp_test_copy_file3.txt"); - try ctx.dir.writeFile(.{ .sub_path = src_file, .data = data }); + try ctx.dir.writeFile(io, .{ .sub_path = src_file, .data = data }); defer ctx.dir.deleteFile(src_file) catch {}; try ctx.dir.copyFile(src_file, ctx.dir, dest_file, .{}); @@ -2050,7 +2056,7 @@ test "'.' and '..' in Io.Dir functions" { renamed_file.close(io); try ctx.dir.deleteFile(rename_path); - try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" }); + try ctx.dir.writeFile(io, .{ .sub_path = update_path, .data = "something" }); var dir = ctx.dir; const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{}); try testing.expectEqual(Io.Dir.PrevStatus.stale, prev_status); @@ -2187,7 +2193,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.deleteTree(invalid_path)); try testing.expectError(expected_err, ctx.dir.deleteTreeMinStackSize(invalid_path)); - try testing.expectError(expected_err, ctx.dir.writeFile(.{ .sub_path = invalid_path, .data = "" })); + try testing.expectError(expected_err, ctx.dir.writeFile(io, .{ .sub_path = invalid_path, .data = "" })); try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{})); @@ -2304,7 +2310,7 @@ test "seekBy" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" }); + try tmp_dir.dir.writeFile(io, .{ .sub_path = "blah.txt", .data = "let's test seekBy" }); const f = try tmp_dir.dir.openFile(io, "blah.txt", .{ .mode = .read_only }); defer f.close(io); var reader = f.readerStreaming(io, &.{}); @@ -2350,7 +2356,7 @@ test "File.Writer sendfile with buffered contents" { defer tmp_dir.cleanup(); { - try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); + try tmp_dir.dir.writeFile(io, .{ .sub_path = "a", .data = "bcd" }); const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); const out = try tmp_dir.dir.createFile(io, "b", .{}); @@ -2391,11 +2397,13 @@ fn testReadlink(target_path: []const u8, symlink_path: []const u8) !void { } test "readlinkat" { + const io = testing.io; + var tmp = tmpDir(.{}); defer tmp.cleanup(); // create file - try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = "nonsense" }); + try tmp.dir.writeFile(io, .{ .sub_path = "file.txt", .data = "nonsense" }); // create a symbolic link if (native_os == .windows) { diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index af17f1f9ff..bacfdacf83 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -145,7 +145,7 @@ test "linkat with different directories" { const subdir = try tmp.dir.makeOpenPath("subdir", .{}); defer tmp.dir.deleteFile(target_name) catch {}; - try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "example" }); + try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" }); // Test 1: link from file in subdir back up to target in parent directory try posix.linkat(tmp.dir.handle, target_name, subdir.handle, link_name, 0); diff --git a/src/Compilation.zig b/src/Compilation.zig index 36429a42f8..7fe217b385 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5715,7 +5715,7 @@ pub fn translateC( const out_h_sub_path = tmp_sub_path ++ fs.path.sep_str ++ cimport_basename; const out_h_path = try comp.dirs.local_cache.join(arena, &.{out_h_sub_path}); if (comp.verbose_cimport) log.info("writing C import source to {s}", .{out_h_path}); - try cache_dir.writeFile(.{ .sub_path = out_h_sub_path, .data = c_src }); + try cache_dir.writeFile(io, .{ .sub_path = out_h_sub_path, .data = c_src }); break :path out_h_path; }, .path => |p| p, @@ -6572,7 +6572,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 resource_id, resource_type, fmtRcEscape(src_path), }); - try o_dir.writeFile(.{ .sub_path = rc_basename, .data = input }); + try o_dir.writeFile(io, .{ .sub_path = rc_basename, .data = input }); var argv = std.array_list.Managed([]const u8).init(comp.gpa); defer argv.deinit(); diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index cfd8d5554c..77bd4372d0 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -520,7 +520,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| { try map_contents.print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor }); } - try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items }); map_contents.deinit(); } @@ -974,7 +974,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "stdthreads", etc. const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable; - try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items }); try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node); } diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index e3d8ce1f7f..a60dc921be 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -759,7 +759,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye try map_contents.print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch }); } } - try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items }); map_contents.deinit(); // The most recent allocation of an arena can be freed :) } @@ -1118,7 +1118,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc. const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable; - try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items }); try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node); } diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index cb6a80d69d..fd80616e9d 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -628,7 +628,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc. const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable; - try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items }); try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node); } diff --git a/src/main.zig b/src/main.zig index 99850f5ffe..bef3a3efb5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -7164,7 +7164,7 @@ fn cmdFetch( try ast.render(gpa, &aw.writer, fixups); const rendered = aw.written(); - build_root.directory.handle.writeFile(.{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| { + build_root.directory.handle.writeFile(io, .{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| { fatal("unable to write {s} file: {t}", .{ Package.Manifest.basename, err }); }; @@ -7207,7 +7207,7 @@ fn createDependenciesModule( { var tmp_dir = try dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}); defer tmp_dir.close(io); - try tmp_dir.writeFile(.{ .sub_path = basename, .data = source }); + try tmp_dir.writeFile(io, .{ .sub_path = basename, .data = source }); } var hh: Cache.HashHelper = .{}; @@ -7438,7 +7438,7 @@ const Templates = struct { i += 1; } - return out_dir.writeFile(.{ + return out_dir.writeFile(io, .{ .sub_path = template_path, .data = templates.buffer.items, .flags = .{ .exclusive = true }, -- cgit v1.2.3 From 4be8be1d2bd6959efae7df95e3f5713adf953a42 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 17:45:17 -0800 Subject: update all rename() to rename(io) --- lib/compiler/build_runner.zig | 8 ++++---- lib/compiler/objcopy.zig | 2 +- lib/std/Build/Cache.zig | 4 ++-- lib/std/Build/Fuzz.zig | 1 + lib/std/Build/Step/Compile.zig | 6 +++--- lib/std/Build/Step/Options.zig | 2 +- lib/std/Build/Step/Run.zig | 22 ++++++++-------------- lib/std/Build/WebServer.zig | 6 +++--- lib/std/Io.zig | 2 +- lib/std/Io/Dir.zig | 11 +++++++++-- lib/std/fs/test.zig | 38 +++++++++++++++++++------------------- lib/std/posix/test.zig | 4 ++-- lib/std/tar.zig | 2 +- lib/std/zip.zig | 2 +- src/Compilation.zig | 6 +++--- src/Package/Fetch.zig | 12 ++++++------ src/Package/Fetch/git.zig | 4 ++-- src/Zcu.zig | 14 ++++++++++---- src/Zcu/PerThread.zig | 12 ++++++------ src/libs/mingw.zig | 2 +- src/link.zig | 2 +- src/link/Lld.zig | 2 +- src/main.zig | 8 ++------ 23 files changed, 88 insertions(+), 84 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 9150d84470..677158645d 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -478,14 +478,14 @@ pub fn main() !void { validateSystemLibraryOptions(builder); if (help_menu) { - var w = initStdoutWriter(); + var w = initStdoutWriter(io); printUsage(builder, w) catch return stdout_writer_allocation.err.?; w.flush() catch return stdout_writer_allocation.err.?; return; } if (steps_menu) { - var w = initStdoutWriter(); + var w = initStdoutWriter(io); printSteps(builder, w) catch return stdout_writer_allocation.err.?; w.flush() catch return stdout_writer_allocation.err.?; return; @@ -1847,7 +1847,7 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void { var stdio_buffer_allocation: [256]u8 = undefined; var stdout_writer_allocation: Io.File.Writer = undefined; -fn initStdoutWriter() *Writer { - stdout_writer_allocation = Io.File.stdout().writerStreaming(&stdio_buffer_allocation); +fn initStdoutWriter(io: Io) *Writer { + stdout_writer_allocation = Io.File.stdout().writerStreaming(io, &stdio_buffer_allocation); return &stdout_writer_allocation.interface; } diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index 6473a10dd0..d9b9b33186 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -183,7 +183,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void var output_file = try Io.Dir.cwd().createFile(io, output, .{ .mode = mode }); defer output_file.close(io); - var out = output_file.writer(&output_buffer); + var out = output_file.writer(io, &output_buffer); switch (out_fmt) { .hex, .raw => { diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index f7c4d729bc..2d6dbc02fa 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1134,13 +1134,13 @@ pub const Manifest = struct { /// lock from exclusive to shared. pub fn writeManifest(self: *Manifest) !void { assert(self.have_exclusive_lock); - + const io = self.cache.io; const manifest_file = self.manifest_file.?; if (self.manifest_dirty) { self.manifest_dirty = false; var buffer: [4000]u8 = undefined; - var fw = manifest_file.writer(&buffer); + var fw = manifest_file.writer(io, &buffer); writeDirtyManifestToStream(self, &fw) catch |err| switch (err) { error.WriteFailed => return fw.err.?, else => |e| return e, diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index 8d5bc27f3a..8fedb7e067 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -389,6 +389,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO const target = run_step.producer.?.rootModuleTarget(); var debug_info = std.debug.Info.load( fuzz.gpa, + io, rebuilt_exe_path, &gop.value_ptr.coverage, target.ofmt, diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 4f1e83a573..dfa5460981 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -1709,7 +1709,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { defer b.cache_root.handle.deleteFile(io, tmp_path) catch { // It's fine if the temporary file can't be cleaned up. }; - b.cache_root.handle.rename(io, tmp_path, args_file) catch |rename_err| switch (rename_err) { + b.cache_root.handle.rename(tmp_path, b.cache_root.handle, args_file, io) catch |rename_err| switch (rename_err) { error.PathAlreadyExists => { // The args file was created by another concurrent build process. }, @@ -1827,14 +1827,14 @@ pub fn doAtomicSymLinks( // sym link for libfoo.so.1 to libfoo.so.1.2.3 const major_only_path = b.pathJoin(&.{ out_dir, filename_major_only }); const cwd: Io.Dir = .cwd(); - cwd.atomicSymLink(io, out_basename, major_only_path, .{}) catch |err| { + cwd.symLinkAtomic(io, out_basename, major_only_path, .{}) catch |err| { return step.fail("unable to symlink {s} -> {s}: {s}", .{ major_only_path, out_basename, @errorName(err), }); }; // sym link for libfoo.so to libfoo.so.1 const name_only_path = b.pathJoin(&.{ out_dir, filename_name_only }); - cwd.atomicSymLink(io, filename_major_only, name_only_path, .{}) catch |err| { + cwd.symLinkAtomic(io, filename_major_only, name_only_path, .{}) catch |err| { return step.fail("Unable to symlink {s} -> {s}: {s}", .{ name_only_path, filename_major_only, @errorName(err), }); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 1416e0e916..676ea4d851 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -498,7 +498,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { }); }; - b.cache_root.handle.rename(io, tmp_sub_path, sub_path) catch |err| switch (err) { + b.cache_root.handle.rename(tmp_sub_path, b.cache_root.handle, sub_path, io) catch |err| switch (err) { error.PathAlreadyExists => { // Other process beat us to it. Clean up the temp file. b.cache_root.handle.deleteFile(io, tmp_sub_path) catch |e| { diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index a1618beb02..e52dd21a96 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1042,27 +1042,21 @@ fn make(step: *Step, options: Step.MakeOptions) !void { if (any_output) { const o_sub_path = "o" ++ fs.path.sep_str ++ &digest; - b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |err| { + b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |err| { if (err == error.PathAlreadyExists) { b.cache_root.handle.deleteTree(o_sub_path) catch |del_err| { - return step.fail("unable to remove dir '{f}'{s}: {s}", .{ - b.cache_root, - tmp_dir_path, - @errorName(del_err), + return step.fail("unable to remove dir '{f}'{s}: {t}", .{ + b.cache_root, tmp_dir_path, del_err, }); }; - b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |retry_err| { - return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{ - b.cache_root, tmp_dir_path, - b.cache_root, o_sub_path, - @errorName(retry_err), + b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |retry_err| { + return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{ + b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, retry_err, }); }; } else { - return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{ - b.cache_root, tmp_dir_path, - b.cache_root, o_sub_path, - @errorName(err), + return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{ + b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, err, }); } }; diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index ed07d04d57..38a7a73588 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -523,7 +523,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa); break :cwd cached_cwd_path.?; }; - try archiver.writeFile(io, path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); + try archiver.writeFile(path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); } // intentionally not calling `archiver.finishPedantically` @@ -587,7 +587,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim }); defer poller.deinit(); - try child.stdin.?.writeAll(@ptrCast(@as([]const std.zig.Client.Message.Header, &.{ + try child.stdin.?.writeStreamingAll(io, @ptrCast(@as([]const std.zig.Client.Message.Header, &.{ .{ .tag = .update, .bytes_len = 0 }, .{ .tag = .exit, .bytes_len = 0 }, }))); @@ -638,7 +638,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim child.stdin.?.close(io); child.stdin = null; - switch (try child.wait()) { + switch (try child.wait(io)) { .Exited => |code| { if (code != 0) { log.err( diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 7f5f049d34..193998f037 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -677,7 +677,7 @@ pub const VTable = struct { dirDeleteFile: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteFileError!void, dirDeleteDir: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteDirError!void, dirRename: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenameError!void, - dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.RenameError!void, + dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.SymLinkError!void, dirReadLink: *const fn (?*anyopaque, Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize, dirSetOwner: *const fn (?*anyopaque, Dir, ?File.Uid, ?File.Gid) Dir.SetOwnerError!void, dirSetPermissions: *const fn (?*anyopaque, Dir, Dir.Permissions) Dir.SetPermissionsError!void, diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 755ce924ad..7d1f6212dc 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -501,7 +501,7 @@ pub const WriteFileError = File.Writer.Error || File.OpenError; pub fn writeFile(dir: Dir, io: Io, options: WriteFileOptions) WriteFileError!void { var file = try dir.createFile(io, options.sub_path, options.flags); defer file.close(io); - try file.writeAll(io, options.data); + try file.writeStreamingAll(io, options.data); } pub const PrevStatus = enum { @@ -955,6 +955,13 @@ pub fn rename( return io.vtable.dirRename(io.userdata, old_dir, old_sub_path, new_dir, new_sub_path); } +pub fn renameAbsolute(io: Io, old_path: []const u8, new_path: []const u8) RenameError!void { + assert(path.isAbsolute(old_path)); + assert(path.isAbsolute(new_path)); + const my_cwd = cwd(); + return io.vtable.dirRename(io.userdata, my_cwd, old_path, my_cwd, new_path); +} + /// Use with `Dir.symLink`, `Dir.symLinkAtomic`, and `symLinkAbsolute` to /// specify whether the symlink will point to a file or a directory. This value /// is ignored on all hosts except Windows where creating symlinks to different @@ -1053,7 +1060,7 @@ pub fn symLinkAtomic( temp_path[dirname.len + 1 ..][0..rand_len].* = std.fmt.hex(random_integer); if (dir.symLink(io, target_path, temp_path, flags)) { - return dir.rename(temp_path, dir, io, sym_link_path); + return dir.rename(temp_path, dir, sym_link_path, io); } else |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 59e0990eb0..92992f91b4 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -961,14 +961,14 @@ test "Dir.rename files" { const missing_file_path = try ctx.transformPath("missing_file_name"); const something_else_path = try ctx.transformPath("something_else"); - try testing.expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, something_else_path)); + try testing.expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, ctx.dir, something_else_path, io)); // Renaming files const test_file_name = try ctx.transformPath("test_file"); const renamed_test_file_name = try ctx.transformPath("test_file_renamed"); var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); - try ctx.dir.rename(test_file_name, renamed_test_file_name); + try ctx.dir.rename(test_file_name, ctx.dir, renamed_test_file_name, io); // Ensure the file was renamed try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, test_file_name, .{})); @@ -976,13 +976,13 @@ test "Dir.rename files" { file.close(io); // Rename to self succeeds - try ctx.dir.rename(renamed_test_file_name, renamed_test_file_name); + try ctx.dir.rename(renamed_test_file_name, ctx.dir, renamed_test_file_name, io); // Rename to existing file succeeds const existing_file_path = try ctx.transformPath("existing_file"); var existing_file = try ctx.dir.createFile(io, existing_file_path, .{ .read = true }); existing_file.close(io); - try ctx.dir.rename(renamed_test_file_name, existing_file_path); + try ctx.dir.rename(renamed_test_file_name, ctx.dir, existing_file_path, io); try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, renamed_test_file_name, .{})); file = try ctx.dir.openFile(io, existing_file_path, .{}); @@ -1007,7 +1007,7 @@ test "Dir.rename directories" { // Renaming directories try ctx.dir.makeDir(test_dir_path); - try ctx.dir.rename(test_dir_path, test_dir_renamed_path); + try ctx.dir.rename(test_dir_path, ctx.dir, test_dir_renamed_path, io); // Ensure the directory was renamed try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{})); @@ -1019,7 +1019,7 @@ test "Dir.rename directories" { dir.close(io); const test_dir_renamed_again_path = try ctx.transformPath("test_dir_renamed_again"); - try ctx.dir.rename(test_dir_renamed_path, test_dir_renamed_again_path); + try ctx.dir.rename(test_dir_renamed_path, ctx.dir, test_dir_renamed_again_path, io); // Ensure the directory was renamed and the file still exists in it try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_renamed_path, .{})); @@ -1044,7 +1044,7 @@ test "Dir.rename directory onto empty dir" { try ctx.dir.makeDir(test_dir_path); try ctx.dir.makeDir(target_dir_path); - try ctx.dir.rename(test_dir_path, target_dir_path); + try ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io); // Ensure the directory was renamed try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{})); @@ -1072,7 +1072,7 @@ test "Dir.rename directory onto non-empty dir" { target_dir.close(io); // Rename should fail with PathAlreadyExists if target_dir is non-empty - try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, target_dir_path)); + try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io)); // Ensure the directory was not renamed var dir = try ctx.dir.openDir(io, test_dir_path, .{}); @@ -1094,8 +1094,8 @@ test "Dir.rename file <-> dir" { var file = try ctx.dir.createFile(io, test_file_path, .{ .read = true }); file.close(io); try ctx.dir.makeDir(test_dir_path); - try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path)); - try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, test_file_path)); + try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, ctx.dir, test_dir_path, io)); + try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, ctx.dir, test_file_path, io)); } }.impl); } @@ -1114,7 +1114,7 @@ test "rename" { const renamed_test_file_name = "test_file_renamed"; var file = try tmp_dir1.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); - try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); + try Dir.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name, io); // ensure the file was renamed try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(io, test_file_name, .{})); @@ -1492,7 +1492,7 @@ test "pwritev, preadv" { var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); - var writer = src_file.writer(&.{}); + var writer = src_file.writer(io, &.{}); try writer.seekTo(16); try writer.interface.writeVecAll(&lines); @@ -1593,7 +1593,7 @@ test "sendfile" { var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); { - var fw = src_file.writer(&.{}); + var fw = src_file.writer(io, &.{}); try fw.interface.writeVecAll(&vecs); } @@ -1610,7 +1610,7 @@ test "sendfile" { var written_buf: [100]u8 = undefined; var file_reader = src_file.reader(io, &.{}); var fallback_buffer: [50]u8 = undefined; - var file_writer = dest_file.writer(&fallback_buffer); + var file_writer = dest_file.writer(io, &fallback_buffer); try file_writer.interface.writeVecAll(&headers); try file_reader.seekTo(1); try testing.expectEqual(10, try file_writer.interface.sendFileAll(&file_reader, .limited(10))); @@ -1648,7 +1648,7 @@ test "sendfile with buffered data" { try file_reader.interface.fill(8); var fallback_buffer: [32]u8 = undefined; - var file_writer = dest_file.writer(&fallback_buffer); + var file_writer = dest_file.writer(io, &fallback_buffer); try std.testing.expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4))); @@ -2051,7 +2051,7 @@ test "'.' and '..' in Io.Dir functions" { try ctx.dir.access(io, file_path, .{}); try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{}); - try ctx.dir.rename(copy_path, rename_path); + try ctx.dir.rename(copy_path, ctx.dir, rename_path, io); const renamed_file = try ctx.dir.openFile(io, rename_path, .{}); renamed_file.close(io); try ctx.dir.deleteFile(rename_path); @@ -2175,7 +2175,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.deleteDir(invalid_path)); - try testing.expectError(expected_err, ctx.dir.rename(invalid_path, invalid_path)); + try testing.expectError(expected_err, ctx.dir.rename(invalid_path, ctx.dir, invalid_path, io)); try testing.expectError(expected_err, ctx.dir.symLink(invalid_path, invalid_path, .{})); if (native_os == .wasi) { @@ -2208,7 +2208,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.realpathAlloc(testing.allocator, invalid_path)); } - try testing.expectError(expected_err, fs.rename(ctx.dir, invalid_path, ctx.dir, invalid_path)); + try testing.expectError(expected_err, Dir.rename(ctx.dir, invalid_path, ctx.dir, invalid_path, io)); if (native_os != .wasi and ctx.path_type != .relative) { try testing.expectError(expected_err, Dir.copyFileAbsolute(invalid_path, invalid_path, .{})); @@ -2334,7 +2334,7 @@ test "seekTo flushes buffered data" { defer file.close(io); { var buf: [16]u8 = undefined; - var file_writer = File.writer(file, &buf); + var file_writer = file.writer(io, file, &buf); try file_writer.interface.writeAll(contents); try file_writer.seekTo(8); diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index bacfdacf83..756ecfd63f 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -690,7 +690,7 @@ test "rename smoke test" { // Rename the file const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_file" }); defer a.free(new_file_path); - try posix.rename(file_path, new_file_path); + try Io.Dir.renameAbsolute(file_path, new_file_path); } { @@ -717,7 +717,7 @@ test "rename smoke test" { // Rename the directory const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_dir" }); defer a.free(new_file_path); - try posix.rename(file_path, new_file_path); + try Io.Dir.renameAbsolute(file_path, new_file_path); } { diff --git a/lib/std/tar.zig b/lib/std/tar.zig index b2a5306458..5e7215f1d7 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -612,7 +612,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp .file => { if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| { defer fs_file.close(io); - var file_writer = fs_file.writer(&file_contents_buffer); + var file_writer = fs_file.writer(io, &file_contents_buffer); try it.streamRemaining(file, &file_writer.interface); try file_writer.interface.flush(); } else |err| { diff --git a/lib/std/zip.zig b/lib/std/zip.zig index acb3fc65ab..5a91b73d6a 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -570,7 +570,7 @@ pub const Iterator = struct { }; defer out_file.close(io); var out_file_buffer: [1024]u8 = undefined; - var file_writer = out_file.writer(&out_file_buffer); + var file_writer = out_file.writer(io, &out_file_buffer); const local_data_file_offset: u64 = @as(u64, self.file_offset) + @as(u64, @sizeOf(LocalFileHeader)) + diff --git a/src/Compilation.zig b/src/Compilation.zig index 7fe217b385..3a6ae263a5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5303,7 +5303,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { defer tar_file.close(io); var buffer: [1024]u8 = undefined; - var tar_file_writer = tar_file.writer(&buffer); + var tar_file_writer = tar_file.writer(io, &buffer); var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty; defer seen_table.deinit(comp.gpa); @@ -6448,7 +6448,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_obj_path); - try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename); + try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename, io); break :blk digest; }; @@ -6696,7 +6696,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_res_path); - try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename); + try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename, io); break :blk digest; }; diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 860fb8974e..aaba0c28cf 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -567,7 +567,7 @@ fn runResource( .root_dir = cache_root, .sub_path = try std.fmt.allocPrint(arena, "p" ++ s ++ "{s}", .{computed_package_hash.toSlice()}), }; - renameTmpIntoCache(cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| { + renameTmpIntoCache(io, cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| { const src = try cache_root.join(arena, &.{tmp_dir_sub_path}); const dest = try cache_root.join(arena, &.{f.package_root.sub_path}); try eb.addRootErrorMessage(.{ .msg = try eb.printString( @@ -1319,7 +1319,7 @@ fn unzip( defer zip_file.close(io); var zip_file_buffer: [4096]u8 = undefined; var zip_file_reader = b: { - var zip_file_writer = zip_file.writer(&zip_file_buffer); + var zip_file_writer = zip_file.writer(io, &zip_file_buffer); _ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) { error.ReadFailed => return error.ReadFailed, @@ -1370,7 +1370,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = b: { - var pack_file_writer = pack_file.writer(&pack_file_buffer); + var pack_file_writer = pack_file.writer(io, &pack_file_buffer); const fetch_reader = &resource.fetch_stream.reader; _ = try fetch_reader.streamRemaining(&pack_file_writer.interface); try pack_file_writer.interface.flush(); @@ -1380,7 +1380,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; - var index_file_writer = index_file.writer(&index_file_buffer); + var index_file_writer = index_file.writer(io, &index_file_buffer); { const index_prog_node = f.prog_node.start("Index pack", 0); defer index_prog_node.end(); @@ -1454,11 +1454,11 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void } } -pub fn renameTmpIntoCache(cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void { +pub fn renameTmpIntoCache(io: Io, cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void { assert(dest_dir_sub_path[1] == fs.path.sep); var handled_missing_dir = false; while (true) { - cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) { + cache_dir.rename(tmp_dir_sub_path, cache_dir, dest_dir_sub_path, io) catch |err| switch (err) { error.FileNotFound => { if (handled_missing_dir) return err; cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) { diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index 3f0ffb04cd..4d5bcfc84b 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1594,7 +1594,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; - var index_file_writer = index_file.writer(&index_file_buffer); + var index_file_writer = index_file.writer(io, &index_file_buffer); try indexPack(testing.allocator, format, &pack_file_reader, &index_file_writer); // Arbitrary size limit on files read while checking the repository contents @@ -1730,7 +1730,7 @@ pub fn main() !void { var index_file = try git_dir.createFile(io, "idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [4096]u8 = undefined; - var index_file_writer = index_file.writer(&index_file_buffer); + var index_file_writer = index_file.writer(io, &index_file_buffer); try indexPack(allocator, format, &pack_file_reader, &index_file_writer); std.debug.print("Starting checkout...\n", .{}); diff --git a/src/Zcu.zig b/src/Zcu.zig index 1f2fe89236..2891da2407 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2986,7 +2986,13 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader return zir; } -pub fn saveZirCache(gpa: Allocator, cache_file: Io.File, stat: Io.File.Stat, zir: Zir) (Io.File.WriteError || Allocator.Error)!void { +pub fn saveZirCache( + gpa: Allocator, + io: Io, + cache_file: Io.File, + stat: Io.File.Stat, + zir: Zir, +) (Io.File.WriteError || Allocator.Error)!void { const safety_buffer = if (data_has_safety_tag) try gpa.alloc([8]u8, zir.instructions.len) else @@ -3020,13 +3026,13 @@ pub fn saveZirCache(gpa: Allocator, cache_file: Io.File, stat: Io.File.Stat, zir zir.string_bytes, @ptrCast(zir.extra), }; - var cache_fw = cache_file.writer(&.{}); + var cache_fw = cache_file.writer(io, &.{}); cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) { error.WriteFailed => return cache_fw.err.?, }; } -pub fn saveZoirCache(cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void { +pub fn saveZoirCache(io: Io, cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void { const header: Zoir.Header = .{ .nodes_len = @intCast(zoir.nodes.len), .extra_len = @intCast(zoir.extra.len), @@ -3050,7 +3056,7 @@ pub fn saveZoirCache(cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.Fil @ptrCast(zoir.compile_errors), @ptrCast(zoir.error_notes), }; - var cache_fw = cache_file.writer(&.{}); + var cache_fw = cache_file.writer(io, &.{}); cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) { error.WriteFailed => return cache_fw.err.?, }; diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 74196705c3..9d6a45ad26 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -278,18 +278,18 @@ pub fn updateFile( switch (file.getMode()) { .zig => { file.zir = try AstGen.generate(gpa, file.tree.?); - Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) { + Zcu.saveZirCache(gpa, io, cache_file, stat, file.zir.?) catch |err| switch (err) { error.OutOfMemory => |e| return e, - else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {s}", .{ - file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err), + else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {t}", .{ + file.path.fmt(comp), cache_directory, &hex_digest, err, }), }; }, .zon => { file.zoir = try ZonGen.generate(gpa, file.tree.?, .{}); - Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| { - log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {s}", .{ - file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err), + Zcu.saveZoirCache(io, cache_file, stat, file.zoir.?) catch |err| { + log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {t}", .{ + file.path.fmt(comp), cache_directory, &hex_digest, err, }); }; }, diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index 93f88b9689..9631ec22f9 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -380,7 +380,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true }); defer lib_final_file.close(io); var buffer: [1024]u8 = undefined; - var file_writer = lib_final_file.writer(&buffer); + var file_writer = lib_final_file.writer(io, &buffer); try implib.writeCoffArchive(gpa, &file_writer.interface, members); try file_writer.interface.flush(); } diff --git a/src/link.zig b/src/link.zig index 06d18ec2d5..5f3031ca0e 100644 --- a/src/link.zig +++ b/src/link.zig @@ -621,7 +621,7 @@ pub const File = struct { }); defer gpa.free(tmp_sub_path); try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, .{}); - try emit.root_dir.handle.rename(tmp_sub_path, emit.sub_path); + try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io); switch (builtin.os.tag) { .linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| { log.warn("ptrace failure: {s}", .{@errorName(err)}); diff --git a/src/link/Lld.zig b/src/link/Lld.zig index ca15a38bb0..6920c12762 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -1631,7 +1631,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi { defer rsp_file.close(io); var rsp_file_buffer: [1024]u8 = undefined; - var rsp_file_writer = rsp_file.writer(&rsp_file_buffer); + var rsp_file_writer = rsp_file.writer(io, &rsp_file_buffer); const rsp_writer = &rsp_file_writer.interface; for (argv[2..]) |arg| { try rsp_writer.writeByte('"'); diff --git a/src/main.zig b/src/main.zig index bef3a3efb5..88e236adef 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3412,7 +3412,7 @@ fn buildOutputType( const sub_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-stdin{s}", .{ &bin_digest, ext.canonicalName(target), }); - try dirs.local_cache.handle.rename(dump_path, sub_path); + try dirs.local_cache.handle.rename(dump_path, dirs.local_cache.handle, sub_path, io); // Convert `sub_path` to be relative to current working directory. src.src_path = try dirs.local_cache.join(arena, &.{sub_path}); @@ -7216,11 +7216,7 @@ fn createDependenciesModule( const hex_digest = hh.final(); const o_dir_sub_path = try arena.dupe(u8, "o" ++ fs.path.sep_str ++ hex_digest); - try Package.Fetch.renameTmpIntoCache( - dirs.local_cache.handle, - tmp_dir_sub_path, - o_dir_sub_path, - ); + try Package.Fetch.renameTmpIntoCache(io, dirs.local_cache.handle, tmp_dir_sub_path, o_dir_sub_path); const deps_mod = try Package.Module.create(arena, .{ .paths = .{ -- cgit v1.2.3 From 1dcfc8787e86ed94d216976e621a49fc488e8214 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 18:00:55 -0800 Subject: update all readFileAlloc() to accept Io instance --- lib/compiler/std-docs.zig | 6 ++++-- lib/std/Build/Cache.zig | 3 ++- lib/std/Build/Step/CheckFile.zig | 3 ++- lib/std/Build/Step/ConfigHeader.zig | 4 ++-- lib/std/Build/WebServer.zig | 5 +++-- lib/std/Io/Dir.zig | 6 +++--- lib/std/crypto/Certificate/Bundle/macos.zig | 3 +-- lib/std/fs/test.zig | 27 ++++++++++++++------------- lib/std/zig/LibCInstallation.zig | 8 ++------ lib/std/zig/WindowsSdk.zig | 2 +- src/Compilation.zig | 4 ++-- src/Package/Fetch/git.zig | 4 ++-- src/link.zig | 4 ++-- src/link/MachO.zig | 8 +++++--- src/link/MachO/CodeSignature.zig | 16 ++++++++-------- src/main.zig | 21 +++++++++------------ 16 files changed, 62 insertions(+), 62 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 8f02f05958..6452496305 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -179,10 +179,11 @@ fn serveDocsFile( content_type: []const u8, ) !void { const gpa = context.gpa; + const io = context.io; // The desired API is actually sendfile, which will require enhancing std.http.Server. // We load the file with every request so that the user can make changes to the file // and refresh the HTML page without restarting this server. - const file_contents = try context.lib_dir.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024)); + const file_contents = try context.lib_dir.readFileAlloc(io, name, gpa, .limited(10 * 1024 * 1024)); defer gpa.free(file_contents); try request.respond(file_contents, .{ .extra_headers = &.{ @@ -255,6 +256,7 @@ fn serveWasm( optimize_mode: std.builtin.OptimizeMode, ) !void { const gpa = context.gpa; + const io = context.io; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); @@ -273,7 +275,7 @@ fn serveWasm( }); // std.http.Server does not have a sendfile API yet. const bin_path = try wasm_base_path.join(arena, bin_name); - const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024)); + const file_contents = try bin_path.root_dir.handle.readFileAlloc(io, bin_path.sub_path, gpa, .limited(10 * 1024 * 1024)); defer gpa.free(file_contents); try request.respond(file_contents, .{ .extra_headers = &.{ diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 2d6dbc02fa..d2ba33c74d 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1075,7 +1075,8 @@ pub const Manifest = struct { fn addDepFileMaybePost(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void { const gpa = self.cache.gpa; - const dep_file_contents = try dir.readFileAlloc(dep_file_sub_path, gpa, .limited(manifest_file_size_max)); + const io = self.cache.io; + const dep_file_contents = try dir.readFileAlloc(io, dep_file_sub_path, gpa, .limited(manifest_file_size_max)); defer gpa.free(dep_file_contents); var error_buf: std.ArrayList(u8) = .empty; diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index 560b6ad050..1c3813ca82 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -51,11 +51,12 @@ pub fn setName(check_file: *CheckFile, name: []const u8) void { fn make(step: *Step, options: Step.MakeOptions) !void { _ = options; const b = step.owner; + const io = b.graph.io; const check_file: *CheckFile = @fieldParentPtr("step", step); try step.singleUnchangingWatchInput(check_file.source); const src_path = check_file.source.getPath2(b, step); - const contents = Io.Dir.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(io, src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 589110d4c4..250bae5009 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -208,7 +208,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .autoconf_undef, .autoconf_at => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(io, src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read autoconf input file '{s}': {s}", .{ src_path, @errorName(err), }); @@ -222,7 +222,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cmake => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(io, src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read cmake input file '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 38a7a73588..72306cbab9 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -469,11 +469,12 @@ pub fn serveFile( content_type: []const u8, ) !void { const gpa = ws.gpa; + const io = ws.graph.io; // The desired API is actually sendfile, which will require enhancing http.Server. // We load the file with every request so that the user can make changes to the file // and refresh the HTML page without restarting this server. - const file_contents = path.root_dir.handle.readFileAlloc(path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| { - log.err("failed to read '{f}': {s}", .{ path, @errorName(err) }); + const file_contents = path.root_dir.handle.readFileAlloc(io, path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| { + log.err("failed to read '{f}': {t}", .{ path, err }); return error.AlreadyReported; }; defer gpa.free(file_contents); diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 7d1f6212dc..cf5e2b4c72 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -1117,10 +1117,10 @@ pub fn readLink(dir: Dir, io: Io, sub_path: []const u8, buffer: []u8) ReadLinkEr /// On other platforms, `path` is an opaque sequence of bytes with no particular encoding. pub fn readLinkAbsolute(io: Io, absolute_path: []const u8, buffer: []u8) ReadLinkError!usize { assert(path.isAbsolute(absolute_path)); - return io.vtable.dirReadLink(io.userdata, .cwd(), path, buffer); + return io.vtable.dirReadLink(io.userdata, .cwd(), absolute_path, buffer); } -pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{ +pub const ReadFileAllocError = File.OpenError || File.Reader.Error || Allocator.Error || error{ /// File size reached or exceeded the provided limit. StreamTooLong, }; @@ -1603,7 +1603,7 @@ pub const CopyFileOptions = struct { pub const CopyFileError = File.OpenError || File.StatError || File.Atomic.InitError || File.Atomic.FinishError || - File.ReadError || File.WriteError || error{InvalidFileName}; + File.Reader.Error || File.WriteError || error{InvalidFileName}; /// Atomically creates a new file at `dest_path` within `dest_dir` with the /// same contents as `source_path` within `source_dir`, overwriting any already diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index 444d8da675..086c8feb3f 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -17,9 +17,8 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanM "/Library/Keychains/System.keychain", }; - _ = io; // TODO migrate file system to use std.Io for (keychain_paths) |keychain_path| { - const bytes = Io.Dir.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { + const bytes = Io.Dir.cwd().readFileAlloc(io, keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { error.StreamTooLong => return error.FileTooBig, else => |e| return e, }; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 92992f91b4..bb3a9da0f5 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -767,7 +767,7 @@ test "readFileAlloc" { var file = try tmp_dir.dir.createFile(io, "test_file", .{ .read = true }); defer file.close(io); - const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024)); + const buf1 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(1024)); defer testing.allocator.free(buf1); try testing.expectEqualStrings("", buf1); @@ -776,7 +776,7 @@ test "readFileAlloc" { { // max_bytes > file_size - const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024)); + const buf2 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(1024)); defer testing.allocator.free(buf2); try testing.expectEqualStrings(write_buf, buf2); } @@ -785,13 +785,13 @@ test "readFileAlloc" { // max_bytes == file_size try testing.expectError( error.StreamTooLong, - tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len)), + tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len)), ); } { // max_bytes == file_size + 1 - const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len + 1)); + const buf2 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len + 1)); defer testing.allocator.free(buf2); try testing.expectEqualStrings(write_buf, buf2); } @@ -799,7 +799,7 @@ test "readFileAlloc" { // max_bytes < file_size try testing.expectError( error.StreamTooLong, - tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len - 1)), + tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len - 1)), ); } @@ -877,16 +877,16 @@ test "file operations on directories" { switch (native_os) { .dragonfly, .netbsd => { // no error when reading a directory. See https://github.com/ziglang/zig/issues/5732 - const buf = try ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited); + const buf = try ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited); testing.allocator.free(buf); }, .wasi => { // WASI return EBADF, which gets mapped to NotOpenForReading. // See https://github.com/bytecodealliance/wasmtime/issues/1935 - try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited)); + try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited)); }, else => { - try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited)); + try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited)); }, } @@ -1679,14 +1679,14 @@ test "copyFile" { try ctx.dir.copyFile(src_file, ctx.dir, dest_file2, .{ .override_mode = File.default_mode }); defer ctx.dir.deleteFile(dest_file2) catch {}; - try expectFileContents(ctx.dir, dest_file, data); - try expectFileContents(ctx.dir, dest_file2, data); + try expectFileContents(io, ctx.dir, dest_file, data); + try expectFileContents(io, ctx.dir, dest_file2, data); } }.impl); } -fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void { - const contents = try dir.readFileAlloc(file_path, testing.allocator, .limited(1000)); +fn expectFileContents(io: Io, dir: Dir, file_path: []const u8, data: []const u8) !void { + const contents = try dir.readFileAlloc(io, file_path, testing.allocator, .limited(1000)); defer testing.allocator.free(contents); try testing.expectEqualSlices(u8, data, contents); @@ -1695,6 +1695,7 @@ fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void { test "AtomicFile" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const allocator = ctx.arena.allocator(); const test_out_file = try ctx.transformPath("tmp_atomic_file_test_dest.txt"); const test_content = @@ -1709,7 +1710,7 @@ test "AtomicFile" { try af.file_writer.interface.writeAll(test_content); try af.finish(); } - const content = try ctx.dir.readFileAlloc(test_out_file, allocator, .limited(9999)); + const content = try ctx.dir.readFileAlloc(io, test_out_file, allocator, .limited(9999)); try testing.expectEqualStrings(test_content, content); try ctx.dir.deleteFile(test_out_file); diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig index f3dc73838f..1463ff4a40 100644 --- a/lib/std/zig/LibCInstallation.zig +++ b/lib/std/zig/LibCInstallation.zig @@ -37,11 +37,7 @@ pub const FindError = error{ ZigIsTheCCompiler, }; -pub fn parse( - allocator: Allocator, - libc_file: []const u8, - target: *const std.Target, -) !LibCInstallation { +pub fn parse(allocator: Allocator, io: Io, libc_file: []const u8, target: *const std.Target) !LibCInstallation { var self: LibCInstallation = .{}; const fields = std.meta.fields(LibCInstallation); @@ -57,7 +53,7 @@ pub fn parse( } } - const contents = try Io.Dir.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); + const contents = try Io.Dir.cwd().readFileAlloc(io, libc_file, allocator, .limited(std.math.maxInt(usize))); defer allocator.free(contents); var it = std.mem.tokenizeScalar(u8, contents, '\n'); diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index dca474020a..3f6d58b6ba 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -775,7 +775,7 @@ const MsvcLibDir = struct { writer.writeByte(std.fs.path.sep) catch unreachable; writer.writeAll("state.json") catch unreachable; - const json_contents = instances_dir.readFileAlloc(writer.buffered(), allocator, .limited(std.math.maxInt(usize))) catch continue; + const json_contents = instances_dir.readFileAlloc(io, writer.buffered(), allocator, .limited(std.math.maxInt(usize))) catch continue; defer allocator.free(json_contents); var parsed = std.json.parseFromSlice(std.json.Value, allocator, json_contents, .{}) catch continue; diff --git a/src/Compilation.zig b/src/Compilation.zig index 3a6ae263a5..79b2d51635 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -6400,7 +6400,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr if (comp.file_system_inputs != null) { // Use the same file size limit as the cache code does for dependency files. - const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, gpa, .limited(Cache.manifest_file_size_max)); + const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, gpa, .limited(Cache.manifest_file_size_max)); defer gpa.free(dep_file_contents); var str_buf: std.ArrayList(u8) = .empty; @@ -6665,7 +6665,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 // Read depfile and update cache manifest { const dep_basename = fs.path.basename(out_dep_path); - const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, arena, .limited(50 * 1024 * 1024)); + const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, arena, .limited(50 * 1024 * 1024)); defer arena.free(dep_file_contents); const value = try std.json.parseFromSliceLeaky(std.json.Value, arena, dep_file_contents, .{}); diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index 4d5bcfc84b..1d6aa1a849 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1602,7 +1602,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u const max_file_size = 8192; if (!skip_checksums) { - const index_file_data = try git_dir.dir.readFileAlloc("testrepo.idx", testing.allocator, .limited(max_file_size)); + const index_file_data = try git_dir.dir.readFileAlloc(io, "testrepo.idx", testing.allocator, .limited(max_file_size)); defer testing.allocator.free(index_file_data); // testrepo.idx is generated by Git. The index created by this file should // match it exactly. Running `git verify-pack -v testrepo.pack` can verify @@ -1678,7 +1678,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u \\revision 19 \\ ; - const actual_file_contents = try worktree.dir.readFileAlloc("file", testing.allocator, .limited(max_file_size)); + const actual_file_contents = try worktree.dir.readFileAlloc(io, "file", testing.allocator, .limited(max_file_size)); defer testing.allocator.free(actual_file_contents); try testing.expectEqualStrings(expected_file_contents, actual_file_contents); } diff --git a/src/link.zig b/src/link.zig index 5f3031ca0e..ff6d938e6f 100644 --- a/src/link.zig +++ b/src/link.zig @@ -624,12 +624,12 @@ pub const File = struct { try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io); switch (builtin.os.tag) { .linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| { - log.warn("ptrace failure: {s}", .{@errorName(err)}); + log.warn("ptrace failure: {t}", .{err}); }, .maccatalyst, .macos => { const macho_file = base.cast(.macho).?; macho_file.ptraceAttach(pid) catch |err| { - log.warn("attaching failed with error: {s}", .{@errorName(err)}); + log.warn("attaching failed with error: {t}", .{err}); }; }, .windows => unreachable, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 6cd906340a..f996442f24 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -4347,11 +4347,13 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); + const io = comp.io; + const sdk_dir = switch (sdk_layout) { .sdk => comp.sysroot.?, .vendored => fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "libc", "darwin" }) catch return null, }; - if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| { + if (readSdkVersionFromSettings(arena, io, sdk_dir)) |ver| { return parseSdkVersion(ver); } else |_| { // Read from settings should always succeed when vendored. @@ -4374,9 +4376,9 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi // Official Apple SDKs ship with a `SDKSettings.json` located at the top of SDK fs layout. // Use property `MinimalDisplayName` to determine version. // The file/property is also available with vendored libc. -fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 { +fn readSdkVersionFromSettings(arena: Allocator, io: Io, dir: []const u8) ![]const u8 { const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" }); - const contents = try Io.Dir.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); + const contents = try Io.Dir.cwd().readFileAlloc(io, sdk_path, arena, .limited(std.math.maxInt(u16))); const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{}); if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string; return error.SdkVersionFailure; diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index ec516d4af0..0955c823b8 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -17,6 +17,12 @@ const MachO = @import("../MachO.zig"); const hash_size = Sha256.digest_length; +page_size: u16, +code_directory: CodeDirectory, +requirements: ?Requirements = null, +entitlements: ?Entitlements = null, +signature: ?Signature = null, + const Blob = union(enum) { code_directory: *CodeDirectory, requirements: *Requirements, @@ -220,12 +226,6 @@ const Signature = struct { } }; -page_size: u16, -code_directory: CodeDirectory, -requirements: ?Requirements = null, -entitlements: ?Entitlements = null, -signature: ?Signature = null, - pub fn init(page_size: u16) CodeSignature { return .{ .page_size = page_size, @@ -246,8 +246,8 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void { } } -pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void { - const inner = try Io.Dir.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); +pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, io: Io, path: []const u8) !void { + const inner = try Io.Dir.cwd().readFileAlloc(io, path, allocator, .limited(std.math.maxInt(u32))); self.entitlements = .{ .inner = inner }; } diff --git a/src/main.zig b/src/main.zig index 88e236adef..9bb88d373b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1029,9 +1029,8 @@ fn buildOutputType( if (mem.cutPrefix(u8, arg, "@")) |resp_file_path| { // This is a "compiler response file". We must parse the file and treat its // contents as command line parameters. - args_iter.resp_file = initArgIteratorResponseFile(arena, resp_file_path) catch |err| { - fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) }); - }; + args_iter.resp_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err| + fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err }); } else if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { try Io.File.stdout().writeAll(usage_build_generic); @@ -5441,7 +5440,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) // that are missing. const s = fs.path.sep_str; const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce; - const stdout = dirs.local_cache.handle.readFileAlloc(tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| { + const stdout = dirs.local_cache.handle.readFileAlloc(io, tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| { fatal("unable to read results of configure phase from '{f}{s}': {s}", .{ dirs.local_cache, tmp_sub_path, @errorName(err), }); @@ -5822,9 +5821,9 @@ pub fn lldMain( const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, .single_quotes = true }); /// Initialize the arguments from a Response File. "*.rsp" -fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile { +fn initArgIteratorResponseFile(allocator: Allocator, io: Io, resp_file_path: []const u8) !ArgIteratorResponseFile { const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit - const cmd_line = try Io.Dir.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); + const cmd_line = try Io.Dir.cwd().readFileAlloc(io, resp_file_path, allocator, .limited(max_bytes)); errdefer allocator.free(cmd_line); return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line); @@ -5952,7 +5951,7 @@ pub const ClangArgIterator = struct { }; } - fn next(self: *ClangArgIterator) !void { + fn next(self: *ClangArgIterator, io: Io) !void { assert(self.has_next); assert(self.next_index < self.argv.len); // In this state we know that the parameter we are looking at is a root parameter @@ -5970,10 +5969,8 @@ pub const ClangArgIterator = struct { const arena = self.arena; const resp_file_path = arg[1..]; - self.arg_iterator_response_file = - initArgIteratorResponseFile(arena, resp_file_path) catch |err| { - fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) }); - }; + self.arg_iterator_response_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err| + fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err }); // NOTE: The ArgIteratorResponseFile returns tokens from next() that are slices of an // internal buffer. This internal buffer is arena allocated, so it is not cleaned up here. @@ -7405,7 +7402,7 @@ const Templates = struct { } const max_bytes = 10 * 1024 * 1024; - const contents = templates.dir.readFileAlloc(template_path, arena, .limited(max_bytes)) catch |err| { + const contents = templates.dir.readFileAlloc(io, template_path, arena, .limited(max_bytes)) catch |err| { fatal("unable to read template file '{s}': {t}", .{ template_path, err }); }; templates.buffer.clearRetainingCapacity(); -- cgit v1.2.3 From 4a53e5b0b4131c6b8e18bb551e8215e425f8ac71 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 20:03:50 -0800 Subject: fix a handful of compilation errors related to std.fs migration --- lib/compiler/resinator/cli.zig | 2 +- lib/compiler/resinator/main.zig | 2 +- lib/compiler/std-docs.zig | 18 +++---- lib/std/Build.zig | 2 +- lib/std/Build/Cache.zig | 2 +- lib/std/Build/Step.zig | 2 +- lib/std/Build/Step/Run.zig | 4 +- lib/std/Build/WebServer.zig | 2 +- lib/std/Io/Dir.zig | 7 +-- lib/std/Io/File.zig | 4 ++ lib/std/Io/File/Atomic.zig | 7 ++- lib/std/Io/net/test.zig | 2 +- lib/std/Io/test.zig | 2 +- lib/std/fs/test.zig | 26 +++++----- lib/std/posix/test.zig | 2 +- lib/std/process/Child.zig | 94 +++++++++++++++++----------------- lib/std/tar.zig | 36 ++++++------- src/Compilation.zig | 15 +++--- src/Package/Fetch.zig | 12 ++--- src/link.zig | 14 +++-- src/link/Lld.zig | 8 +-- src/main.zig | 13 ++--- test/src/Cases.zig | 3 +- test/standalone/child_process/main.zig | 2 +- 24 files changed, 148 insertions(+), 133 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index 5588390197..d2dd71b1f6 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -2010,7 +2010,7 @@ test "maybeAppendRC" { // Now delete the file and try again. But this time change the input format // to non-rc. - try tmp.dir.deleteFile("foo"); + try tmp.dir.deleteFile(io, "foo"); options.input_format = .res; try options.maybeAppendRC(io, tmp.dir); try std.testing.expectEqualStrings("foo", options.input_source.filename); diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index afe1dcbe91..6c12903f06 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -440,7 +440,7 @@ const IoStream = struct { // Delete the output file on error file.close(io); // Failing to delete is not really a big deal, so swallow any errors - Io.Dir.cwd().deleteFile(self.name) catch {}; + Io.Dir.cwd().deleteFile(io, self.name) catch {}; }, .stdio, .memory, .closed => return, } diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 6452496305..538558fe48 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -72,8 +72,8 @@ pub fn main() !void { const url_with_newline = try std.fmt.allocPrint(arena, "http://127.0.0.1:{d}/\n", .{port}); std.Io.File.stdout().writeAll(url_with_newline) catch {}; if (should_open_browser) { - openBrowserTab(gpa, url_with_newline[0 .. url_with_newline.len - 1 :'\n']) catch |err| { - std.log.err("unable to open browser: {s}", .{@errorName(err)}); + openBrowserTab(gpa, io, url_with_newline[0 .. url_with_newline.len - 1 :'\n']) catch |err| { + std.log.err("unable to open browser: {t}", .{err}); }; } @@ -89,7 +89,7 @@ pub fn main() !void { while (true) { const connection = try http_server.accept(); _ = std.Thread.spawn(.{}, accept, .{ &context, connection }) catch |err| { - std.log.err("unable to accept connection: {s}", .{@errorName(err)}); + std.log.err("unable to accept connection: {t}", .{err}); connection.stream.close(io); continue; }; @@ -328,7 +328,7 @@ fn buildWasmBinary( child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; - try child.spawn(); + try child.spawn(io); var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{ .stdout = child.stdout.?, @@ -434,13 +434,13 @@ fn sendMessage(io: Io, file: std.Io.File, tag: std.zig.Client.Message.Tag) !void }; } -fn openBrowserTab(gpa: Allocator, url: []const u8) !void { +fn openBrowserTab(gpa: Allocator, io: Io, url: []const u8) !void { // Until https://github.com/ziglang/zig/issues/19205 is implemented, we // spawn a thread for this child process. - _ = try std.Thread.spawn(.{}, openBrowserTabThread, .{ gpa, url }); + _ = try std.Thread.spawn(.{}, openBrowserTabThread, .{ gpa, io, url }); } -fn openBrowserTabThread(gpa: Allocator, url: []const u8) !void { +fn openBrowserTabThread(gpa: Allocator, io: Io, url: []const u8) !void { const main_exe = switch (builtin.os.tag) { .windows => "explorer", .macos => "open", @@ -450,6 +450,6 @@ fn openBrowserTabThread(gpa: Allocator, url: []const u8) !void { child.stdin_behavior = .Ignore; child.stdout_behavior = .Ignore; child.stderr_behavior = .Ignore; - try child.spawn(); - _ = try child.wait(); + try child.spawn(io); + _ = try child.wait(io); } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 746b41860b..ae2ab1c4d0 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1838,7 +1838,7 @@ pub fn runAllowFail( child.env_map = &b.graph.env_map; try Step.handleVerbose2(b, null, child.env_map, argv); - try child.spawn(); + try child.spawn(io); var stdout_reader = child.stdout.?.readerStreaming(io, &.{}); const stdout = stdout_reader.interface.allocRemaining(b.allocator, .limited(max_output_size)) catch { diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index d2ba33c74d..b97efc7677 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1300,7 +1300,7 @@ fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { }); defer { file.close(io); - dir.deleteFile(test_out_file) catch {}; + dir.deleteFile(io, test_out_file) catch {}; } return (try file.stat(io)).mtime; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 8115aaa1a1..9c7fcc757f 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -455,7 +455,7 @@ pub fn evalZigProcess( child.request_resource_usage_statistics = true; child.progress_node = prog_node; - child.spawn() catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err }); + child.spawn(io) catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err }); const zp = try gpa.create(ZigProcess); zp.* = .{ diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 19d2e7e61c..95024061d8 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1689,7 +1689,7 @@ fn evalZigTest( }; while (true) { - try child.spawn(); + try child.spawn(io); var poller = std.Io.poll(gpa, StdioPollEnum, .{ .stdout = child.stdout.?, .stderr = child.stderr.?, @@ -2168,7 +2168,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult { const io = b.graph.io; const arena = b.allocator; - try child.spawn(); + try child.spawn(io); errdefer _ = child.kill(io) catch {}; try child.waitForSpawn(); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 72306cbab9..5f633c5948 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -580,7 +580,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; - try child.spawn(); + try child.spawn(io); var poller = Io.poll(gpa, enum { stdout, stderr }, .{ .stdout = child.stdout.?, diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index cf5e2b4c72..d6d6aa1be2 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -1366,7 +1366,7 @@ pub fn deleteTree(dir: Dir, io: Io, sub_path: []const u8) DeleteTreeError!void { => |e| return e, }; } else { - if (parent_dir.deleteFile(name)) { + if (parent_dir.deleteFile(io, name)) { continue :process_stack; } else |err| switch (err) { error.FileNotFound => continue :process_stack, @@ -1477,7 +1477,7 @@ fn deleteTreeMinStackSizeWithKindHint(parent: Dir, io: Io, sub_path: []const u8, dir_name = result; continue :scan_dir; } else { - if (dir.deleteFile(entry.name)) { + if (dir.deleteFile(io, entry.name)) { continue :dir_it; } else |err| switch (err) { error.FileNotFound => continue :dir_it, @@ -1567,7 +1567,7 @@ fn deleteTreeOpenInitialSubpath(dir: Dir, io: Io, sub_path: []const u8, kind_hin => |e| return e, }; } else { - if (dir.deleteFile(sub_path)) { + if (dir.deleteFile(io, sub_path)) { return null; } else |err| switch (err) { error.FileNotFound => return null, @@ -1588,6 +1588,7 @@ fn deleteTreeOpenInitialSubpath(dir: Dir, io: Io, sub_path: []const u8, kind_hin error.FileBusy, error.BadPathName, error.NetworkNotFound, + error.Canceled, error.Unexpected, => |e| return e, } diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 302d2a8ca5..6d4dd4f323 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -407,6 +407,10 @@ pub const Permissions = std.options.FilePermissions orelse if (is_windows) enum( return @intFromEnum(self); } + pub fn fromMode(mode: std.posix.mode_t) @This() { + return @enumFromInt(mode); + } + /// Returns `true` if and only if no class has write permissions. pub fn readOnly(self: @This()) bool { const mode = toMode(self); diff --git a/lib/std/Io/File/Atomic.zig b/lib/std/Io/File/Atomic.zig index ccb81815ed..7d412703ed 100644 --- a/lib/std/Io/File/Atomic.zig +++ b/lib/std/Io/File/Atomic.zig @@ -20,7 +20,7 @@ pub const InitError = File.OpenError; pub fn init( io: Io, dest_basename: []const u8, - mode: File.Mode, + permissions: File.Permissions, dir: Dir, close_dir_on_deinit: bool, write_buffer: []u8, @@ -28,7 +28,10 @@ pub fn init( while (true) { const random_integer = std.crypto.random.int(u64); const tmp_sub_path = std.fmt.hex(random_integer); - const file = dir.createFile(io, &tmp_sub_path, .{ .mode = mode, .exclusive = true }) catch |err| switch (err) { + const file = dir.createFile(io, &tmp_sub_path, .{ + .permissions = permissions, + .exclusive = true, + }) catch |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, }; diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig index c9ed0d3284..23edab5a5d 100644 --- a/lib/std/Io/net/test.zig +++ b/lib/std/Io/net/test.zig @@ -278,7 +278,7 @@ test "listen on a unix socket, send bytes, receive bytes" { defer testing.allocator.free(socket_path); const socket_addr = try net.UnixAddress.init(socket_path); - defer Io.Dir.cwd().deleteFile(socket_path) catch {}; + defer Io.Dir.cwd().deleteFile(io, socket_path) catch {}; var server = try socket_addr.listen(io, .{}); defer server.socket.close(io); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 9763fb2397..02bdb591d0 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -60,7 +60,7 @@ test "write a file, read it, then delete it" { try expect(mem.eql(u8, contents["begin".len .. contents.len - "end".len], &data)); try expect(mem.eql(u8, contents[contents.len - "end".len ..], "end")); } - try tmp.dir.deleteFile(tmp_file_name); + try tmp.dir.deleteFile(io, tmp_file_name); } test "File seek ops" { diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index edc15526b2..da0a0cff79 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -873,7 +873,7 @@ test "file operations on directories" { try ctx.dir.makeDir(io, test_dir_name, .default_dir); try testing.expectError(error.IsDir, ctx.dir.createFile(io, test_dir_name, .{})); - try testing.expectError(error.IsDir, ctx.dir.deleteFile(test_dir_name)); + try testing.expectError(error.IsDir, ctx.dir.deleteFile(io, test_dir_name)); switch (native_os) { .dragonfly, .netbsd => { // no error when reading a directory. See https://github.com/ziglang/zig/issues/5732 @@ -942,7 +942,7 @@ test "deleteDir" { try testing.expectError(error.DirNotEmpty, ctx.dir.deleteDir(test_dir_path)); // deleting an empty directory - try ctx.dir.deleteFile(test_file_path); + try ctx.dir.deleteFile(io, test_file_path); try ctx.dir.deleteDir(test_dir_path); } }.impl); @@ -1671,13 +1671,13 @@ test "copyFile" { const dest_file2 = try ctx.transformPath("tmp_test_copy_file3.txt"); try ctx.dir.writeFile(io, .{ .sub_path = src_file, .data = data }); - defer ctx.dir.deleteFile(src_file) catch {}; + defer ctx.dir.deleteFile(io, src_file) catch {}; try ctx.dir.copyFile(src_file, ctx.dir, dest_file, .{}); - defer ctx.dir.deleteFile(dest_file) catch {}; + defer ctx.dir.deleteFile(io, dest_file) catch {}; try ctx.dir.copyFile(src_file, ctx.dir, dest_file2, .{ .override_mode = File.default_mode }); - defer ctx.dir.deleteFile(dest_file2) catch {}; + defer ctx.dir.deleteFile(io, dest_file2) catch {}; try expectFileContents(io, ctx.dir, dest_file, data); try expectFileContents(io, ctx.dir, dest_file2, data); @@ -1713,7 +1713,7 @@ test "AtomicFile" { const content = try ctx.dir.readFileAlloc(io, test_out_file, allocator, .limited(9999)); try testing.expectEqualStrings(test_content, content); - try ctx.dir.deleteFile(test_out_file); + try ctx.dir.deleteFile(io, test_out_file); } }.impl); } @@ -2055,7 +2055,7 @@ test "'.' and '..' in Io.Dir functions" { try ctx.dir.rename(copy_path, ctx.dir, rename_path, io); const renamed_file = try ctx.dir.openFile(io, rename_path, .{}); renamed_file.close(io); - try ctx.dir.deleteFile(rename_path); + try ctx.dir.deleteFile(io, rename_path); try ctx.dir.writeFile(io, .{ .sub_path = update_path, .data = "something" }); var dir = ctx.dir; @@ -2113,19 +2113,19 @@ test "chmod" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile(io, "test_file", .{ .mode = 0o600 }); + const file = try tmp.dir.createFile(io, "test_file", .{ .permissions = .fromMode(0o600) }); defer file.close(io); - try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat(io)).mode & 0o7777); + try testing.expectEqual(@as(posix.mode_t, 0o600), (try file.stat(io)).permissions.toMode() & 0o7777); - try file.chmod(0o644); - try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat(io)).mode & 0o7777); + try file.setPermissions(io, .fromMode(0o644)); + try testing.expectEqual(@as(posix.mode_t, 0o644), (try file.stat(io)).permissions.toMode() & 0o7777); try tmp.dir.makeDir(io, "test_dir", .default_dir); var dir = try tmp.dir.openDir(io, "test_dir", .{ .iterate = true }); defer dir.close(io); - try dir.chmod(0o700); - try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat(io)).mode & 0o7777); + try dir.setPermissions(io, .fromMode(0o700)); + try testing.expectEqual(@as(posix.mode_t, 0o700), (try dir.stat(io)).permissions.toMode() & 0o7777); } test "chown" { diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 756ecfd63f..b5255ad2a1 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -144,7 +144,7 @@ test "linkat with different directories" { const subdir = try tmp.dir.makeOpenPath("subdir", .{}); - defer tmp.dir.deleteFile(target_name) catch {}; + defer tmp.dir.deleteFile(io, target_name) catch {}; try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" }); // Test 1: link from file in subdir back up to target in parent directory diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index dbbacc496c..17139e66b8 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -1,4 +1,4 @@ -const ChildProcess = @This(); +const Child = @This(); const builtin = @import("builtin"); const native_os = builtin.os.tag; @@ -31,7 +31,7 @@ pub const Id = switch (native_os) { id: Id, thread_handle: if (native_os == .windows) windows.HANDLE else void, -allocator: mem.Allocator, +allocator: Allocator, /// The writing end of the child process's standard input pipe. /// Usage requires `stdin_behavior == StdIo.Pipe`. @@ -229,7 +229,7 @@ pub const StdIo = enum { }; /// First argument in argv is the executable. -pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess { +pub fn init(argv: []const []const u8, allocator: Allocator) Child { return .{ .allocator = allocator, .argv = argv, @@ -252,7 +252,7 @@ pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess { }; } -pub fn setUserName(self: *ChildProcess, name: []const u8) !void { +pub fn setUserName(self: *Child, name: []const u8) !void { const user_info = try process.getUserInfo(name); self.uid = user_info.uid; self.gid = user_info.gid; @@ -260,7 +260,7 @@ pub fn setUserName(self: *ChildProcess, name: []const u8) !void { /// On success must call `kill` or `wait`. /// After spawning the `id` is available. -pub fn spawn(self: *ChildProcess) SpawnError!void { +pub fn spawn(self: *Child, io: Io) SpawnError!void { if (!process.can_spawn) { @compileError("the target operating system cannot spawn processes"); } @@ -268,17 +268,17 @@ pub fn spawn(self: *ChildProcess) SpawnError!void { if (native_os == .windows) { return self.spawnWindows(); } else { - return self.spawnPosix(); + return self.spawnPosix(io); } } -pub fn spawnAndWait(self: *ChildProcess) SpawnError!Term { - try self.spawn(); - return self.wait(); +pub fn spawnAndWait(child: *Child, io: Io) SpawnError!Term { + try child.spawn(io); + return child.wait(io); } /// Forcibly terminates child process and then cleans up all resources. -pub fn kill(self: *ChildProcess, io: Io) !Term { +pub fn kill(self: *Child, io: Io) !Term { if (native_os == .windows) { return self.killWindows(io, 1); } else { @@ -286,7 +286,7 @@ pub fn kill(self: *ChildProcess, io: Io) !Term { } } -pub fn killWindows(self: *ChildProcess, io: Io, exit_code: windows.UINT) !Term { +pub fn killWindows(self: *Child, io: Io, exit_code: windows.UINT) !Term { if (self.term) |term| { self.cleanupStreams(io); return term; @@ -308,7 +308,7 @@ pub fn killWindows(self: *ChildProcess, io: Io, exit_code: windows.UINT) !Term { return self.term.?; } -pub fn killPosix(self: *ChildProcess, io: Io) !Term { +pub fn killPosix(self: *Child, io: Io) !Term { if (self.term) |term| { self.cleanupStreams(io); return term; @@ -325,7 +325,7 @@ pub const WaitError = SpawnError || std.os.windows.GetProcessMemoryInfoError; /// On some targets, `spawn` may not report all spawn errors, such as `error.InvalidExe`. /// This function will block until any spawn errors can be reported, and return them. -pub fn waitForSpawn(self: *ChildProcess) SpawnError!void { +pub fn waitForSpawn(self: *Child) SpawnError!void { if (native_os == .windows) return; // `spawn` reports everything if (self.term) |term| { _ = term catch |spawn_err| return spawn_err; @@ -355,7 +355,7 @@ pub fn waitForSpawn(self: *ChildProcess) SpawnError!void { } /// Blocks until child process terminates and then cleans up all resources. -pub fn wait(self: *ChildProcess, io: Io) WaitError!Term { +pub fn wait(self: *Child, io: Io) WaitError!Term { try self.waitForSpawn(); // report spawn errors if (self.term) |term| { self.cleanupStreams(io); @@ -381,7 +381,7 @@ pub const RunResult = struct { /// /// The process must be started with stdout_behavior and stderr_behavior == .Pipe pub fn collectOutput( - child: ChildProcess, + child: Child, /// Used for `stdout` and `stderr`. allocator: Allocator, stdout: *ArrayList(u8), @@ -446,7 +446,7 @@ pub fn run(allocator: Allocator, io: Io, args: struct { expand_arg0: Arg0Expand = .no_expand, progress_node: std.Progress.Node = std.Progress.Node.none, }) RunError!RunResult { - var child = ChildProcess.init(args.argv, allocator); + var child = Child.init(args.argv, allocator); child.stdin_behavior = .Ignore; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; @@ -461,7 +461,7 @@ pub fn run(allocator: Allocator, io: Io, args: struct { var stderr: ArrayList(u8) = .empty; defer stderr.deinit(allocator); - try child.spawn(); + try child.spawn(io); errdefer { _ = child.kill(io) catch {}; } @@ -474,7 +474,7 @@ pub fn run(allocator: Allocator, io: Io, args: struct { }; } -fn waitUnwrappedWindows(self: *ChildProcess, io: Io) WaitError!void { +fn waitUnwrappedWindows(self: *Child, io: Io) WaitError!void { const result = windows.WaitForSingleObjectEx(self.id, windows.INFINITE, false); self.term = @as(SpawnError!Term, x: { @@ -496,7 +496,7 @@ fn waitUnwrappedWindows(self: *ChildProcess, io: Io) WaitError!void { return result; } -fn waitUnwrappedPosix(self: *ChildProcess, io: Io) void { +fn waitUnwrappedPosix(self: *Child, io: Io) void { const res: posix.WaitPidResult = res: { if (self.request_resource_usage_statistics) { switch (native_os) { @@ -531,11 +531,11 @@ fn waitUnwrappedPosix(self: *ChildProcess, io: Io) void { self.handleWaitResult(status); } -fn handleWaitResult(self: *ChildProcess, status: u32) void { +fn handleWaitResult(self: *Child, status: u32) void { self.term = statusToTerm(status); } -fn cleanupStreams(self: *ChildProcess, io: Io) void { +fn cleanupStreams(self: *Child, io: Io) void { if (self.stdin) |*stdin| { stdin.close(io); self.stdin = null; @@ -561,7 +561,7 @@ fn statusToTerm(status: u32) Term { Term{ .Unknown = status }; } -fn spawnPosix(self: *ChildProcess) SpawnError!void { +fn spawnPosix(self: *Child, io: Io) SpawnError!void { // The child process does need to access (one end of) these pipes. However, // we must initially set CLOEXEC to avoid a race condition. If another thread // is racing to spawn a different child process, we don't want it to inherit @@ -659,7 +659,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { })).ptr; } else { // TODO come up with a solution for this. - @panic("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); + @panic("missing std lib enhancement: std.process.Child implementation has no way to collect the environment variables to forward to the child process"); } }; @@ -671,41 +671,41 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { const pid_result = try posix.fork(); if (pid_result == 0) { // we are the child - setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); - setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); - setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); + setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err); + setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err); + setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err); if (self.cwd_dir) |cwd| { - posix.fchdir(cwd.handle) catch |err| forkChildErrReport(err_pipe[1], err); + posix.fchdir(cwd.handle) catch |err| forkChildErrReport(io, err_pipe[1], err); } else if (self.cwd) |cwd| { - posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); + posix.chdir(cwd) catch |err| forkChildErrReport(io, err_pipe[1], err); } // Must happen after fchdir above, the cwd file descriptor might be // equal to prog_fileno and be clobbered by this dup2 call. - if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err); + if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(io, err_pipe[1], err); if (self.gid) |gid| { - posix.setregid(gid, gid) catch |err| forkChildErrReport(err_pipe[1], err); + posix.setregid(gid, gid) catch |err| forkChildErrReport(io, err_pipe[1], err); } if (self.uid) |uid| { - posix.setreuid(uid, uid) catch |err| forkChildErrReport(err_pipe[1], err); + posix.setreuid(uid, uid) catch |err| forkChildErrReport(io, err_pipe[1], err); } if (self.pgid) |pid| { - posix.setpgid(0, pid) catch |err| forkChildErrReport(err_pipe[1], err); + posix.setpgid(0, pid) catch |err| forkChildErrReport(io, err_pipe[1], err); } if (self.start_suspended) { - posix.kill(posix.getpid(), .STOP) catch |err| forkChildErrReport(err_pipe[1], err); + posix.kill(posix.getpid(), .STOP) catch |err| forkChildErrReport(io, err_pipe[1], err); } const err = switch (self.expand_arg0) { .expand => posix.execvpeZ_expandArg0(.expand, argv_buf.ptr[0].?, argv_buf.ptr, envp), .no_expand => posix.execvpeZ_expandArg0(.no_expand, argv_buf.ptr[0].?, argv_buf.ptr, envp), }; - forkChildErrReport(err_pipe[1], err); + forkChildErrReport(io, err_pipe[1], err); } // we are the parent @@ -750,7 +750,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { self.progress_node.setIpcFd(prog_pipe[0]); } -fn spawnWindows(self: *ChildProcess) SpawnError!void { +fn spawnWindows(self: *Child) SpawnError!void { var saAttr = windows.SECURITY_ATTRIBUTES{ .nLength = @sizeOf(windows.SECURITY_ATTRIBUTES), .bInheritHandle = windows.TRUE, @@ -880,7 +880,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void { const app_name_wtf8 = self.argv[0]; const app_name_is_absolute = fs.path.isAbsolute(app_name_wtf8); - // the cwd set in ChildProcess is in effect when choosing the executable path + // the cwd set in Child is in effect when choosing the executable path // to match posix semantics var cwd_path_w_needs_free = false; const cwd_path_w = x: { @@ -965,7 +965,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void { // If the app name had path separators, that disallows PATH searching, // and there's no need to search the PATH if the app name is absolute. // We still search the path if the cwd is absolute because of the - // "cwd set in ChildProcess is in effect when choosing the executable path + // "cwd set in Child is in effect when choosing the executable path // to match posix semantics" behavior--we don't want to skip searching // the PATH just because we were trying to set the cwd of the child process. if (app_dirname_w != null or app_name_is_absolute) { @@ -1039,8 +1039,8 @@ fn destroyPipe(pipe: [2]posix.fd_t) void { // Child of fork calls this to report an error to the fork parent. // Then the child exits. -fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { - writeIntFd(fd, @as(ErrInt, @intFromError(err))) catch {}; +fn forkChildErrReport(io: Io, fd: i32, err: Child.SpawnError) noreturn { + writeIntFd(io, fd, @as(ErrInt, @intFromError(err))) catch {}; // If we're linking libc, some naughty applications may have registered atexit handlers // which we really do not want to run in the fork child. I caught LLVM doing this and // it caused a deadlock instead of doing an exit syscall. In the words of Avril Lavigne, @@ -1052,9 +1052,9 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { posix.system.exit(1); } -fn writeIntFd(fd: i32, value: ErrInt) !void { +fn writeIntFd(io: Io, fd: i32, value: ErrInt) !void { var buffer: [8]u8 = undefined; - var fw: File.Writer = .initStreaming(.{ .handle = fd }, &buffer); + var fw: File.Writer = .initStreaming(.{ .handle = fd }, io, &buffer); fw.interface.writeInt(u64, value, .little) catch unreachable; fw.interface.flush() catch return error.SystemResources; } @@ -1078,7 +1078,7 @@ const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8); /// Note: `app_buf` should not contain any leading path separators. /// Note: If the dir is the cwd, dir_buf should be empty (len = 0). fn windowsCreateProcessPathExt( - allocator: mem.Allocator, + allocator: Allocator, dir_buf: *ArrayList(u16), app_buf: *ArrayList(u16), pathext: [:0]const u16, @@ -1525,9 +1525,9 @@ const WindowsCommandLineCache = struct { script_cmd_line: ?[:0]u16 = null, cmd_exe_path: ?[:0]u16 = null, argv: []const []const u8, - allocator: mem.Allocator, + allocator: Allocator, - fn init(allocator: mem.Allocator, argv: []const []const u8) WindowsCommandLineCache { + fn init(allocator: Allocator, argv: []const []const u8) WindowsCommandLineCache { return .{ .allocator = allocator, .argv = argv, @@ -1571,7 +1571,7 @@ const WindowsCommandLineCache = struct { /// Returns the absolute path of `cmd.exe` within the Windows system directory. /// The caller owns the returned slice. -fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 { +fn windowsCmdExePath(allocator: Allocator) error{ OutOfMemory, Unexpected }![:0]u16 { var buf = try ArrayList(u16).initCapacity(allocator, 128); errdefer buf.deinit(allocator); while (true) { @@ -1608,7 +1608,7 @@ const ArgvToCommandLineError = error{ OutOfMemory, InvalidWtf8, InvalidArg0 }; /// /// When executing `.bat`/`.cmd` scripts, use `argvToScriptCommandLineWindows` instead. fn argvToCommandLineWindows( - allocator: mem.Allocator, + allocator: Allocator, argv: []const []const u8, ) ArgvToCommandLineError![:0]u16 { var buf = std.array_list.Managed(u8).init(allocator); @@ -1784,7 +1784,7 @@ const ArgvToScriptCommandLineError = error{ /// Should only be used when spawning `.bat`/`.cmd` scripts, see `argvToCommandLineWindows` otherwise. /// The `.bat`/`.cmd` file must be known to both have the `.bat`/`.cmd` extension and exist on the filesystem. fn argvToScriptCommandLineWindows( - allocator: mem.Allocator, + allocator: Allocator, /// Path to the `.bat`/`.cmd` script. If this path is relative, it is assumed to be relative to the CWD. /// The script must have been verified to exist at this path before calling this function. script_path: []const u16, diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 5e7215f1d7..7bb20a9959 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -610,7 +610,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } }, .file => { - if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| { + if (createDirAndFile(io, dir, file_name, filePermissions(file.mode, options))) |fs_file| { defer fs_file.close(io); var file_writer = fs_file.writer(io, &file_contents_buffer); try it.streamRemaining(file, &file_writer.interface); @@ -638,12 +638,12 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } } -fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { - const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }) catch |err| { +fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, permissions: Io.File.Permissions) !Io.File { + const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions }) catch |err| { if (err == error.FileNotFound) { if (std.fs.path.dirname(file_name)) |dir_name| { try dir.makePath(io, dir_name); - return try dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }); + return try dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions }); } } return err; @@ -880,9 +880,9 @@ test "create file and symlink" { var root = testing.tmpDir(.{}); defer root.cleanup(); - var file = try createDirAndFile(io, root.dir, "file1", default_mode); + var file = try createDirAndFile(io, root.dir, "file1", .default_file); file.close(io); - file = try createDirAndFile(io, root.dir, "a/b/c/file2", default_mode); + file = try createDirAndFile(io, root.dir, "a/b/c/file2", .default_file); file.close(io); createDirAndSymlink(io, root.dir, "a/b/c/file2", "symlink1") catch |err| { @@ -894,7 +894,7 @@ test "create file and symlink" { // Danglink symlnik, file created later try createDirAndSymlink(io, root.dir, "../../../g/h/i/file4", "j/k/l/symlink3"); - file = try createDirAndFile(io, root.dir, "g/h/i/file4", default_mode); + file = try createDirAndFile(io, root.dir, "g/h/i/file4", .default_file); file.close(io); } @@ -1118,30 +1118,30 @@ fn normalizePath(bytes: []u8) []u8 { return bytes; } -const default_mode = Io.File.default_mode; - // File system mode based on tar header mode and mode_mode options. -fn fileMode(mode: u32, options: PipeOptions) Io.File.Mode { +fn filePermissions(mode: u32, options: PipeOptions) Io.File.Permissions { + const default_mode = 0o666; + if (!std.fs.has_executable_bit or options.mode_mode == .ignore) - return default_mode; + return .fromMode(default_mode); const S = std.posix.S; // The mode from the tar file is inspected for the owner executable bit. if (mode & S.IXUSR == 0) - return default_mode; + return .fromMode(default_mode); // This bit is copied to the group and other executable bits. // Other bits of the mode are left as the default when creating files. - return default_mode | S.IXUSR | S.IXGRP | S.IXOTH; + return .fromMode(default_mode | S.IXUSR | S.IXGRP | S.IXOTH); } -test fileMode { +test filePermissions { if (!std.fs.has_executable_bit) return error.SkipZigTest; - try testing.expectEqual(default_mode, fileMode(0o744, PipeOptions{ .mode_mode = .ignore })); - try testing.expectEqual(0o777, fileMode(0o744, PipeOptions{})); - try testing.expectEqual(0o666, fileMode(0o644, PipeOptions{})); - try testing.expectEqual(0o666, fileMode(0o655, PipeOptions{})); + try testing.expectEqual(0o666, filePermissions(0o744, PipeOptions{ .mode_mode = .ignore })); + try testing.expectEqual(0o777, filePermissions(0o744, PipeOptions{})); + try testing.expectEqual(0o666, filePermissions(0o644, PipeOptions{})); + try testing.expectEqual(0o666, filePermissions(0o655, PipeOptions{})); } test "executable bit" { diff --git a/src/Compilation.zig b/src/Compilation.zig index 617421e279..c63fa9a3c1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5782,7 +5782,7 @@ pub fn translateC( } // Just to save disk space, we delete the file because it is never needed again. - cache_tmp_dir.deleteFile(dep_basename) catch |err| { + cache_tmp_dir.deleteFile(io, dep_basename) catch |err| { log.warn("failed to delete '{s}': {t}", .{ dep_file_path, err }); }; } @@ -6314,11 +6314,11 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr } // Just to save disk space, we delete the files that are never needed again. - defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(diag_file_path)) catch |err| switch (err) { + defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(diag_file_path)) catch |err| switch (err) { error.FileNotFound => {}, // the file wasn't created due to an error we reported else => log.warn("failed to delete '{s}': {s}", .{ diag_file_path, @errorName(err) }), }; - defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(dep_file_path)) catch |err| switch (err) { + defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(dep_file_path)) catch |err| switch (err) { error.FileNotFound => {}, // the file wasn't created due to an error we reported else => log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }), }; @@ -6329,7 +6329,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - const term = child.spawnAndWait() catch |err| { + const term = child.spawnAndWait(io) catch |err| { return comp.failCObj(c_object, "failed to spawn zig clang (passthrough mode) {s}: {s}", .{ argv.items[0], @errorName(err) }); }; switch (term) { @@ -6347,7 +6347,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr child.stdout_behavior = .Ignore; child.stderr_behavior = .Pipe; - try child.spawn(); + try child.spawn(io); var stderr_reader = child.stderr.?.readerStreaming(io, &.{}); const stderr = try stderr_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32))); @@ -6723,6 +6723,7 @@ fn spawnZigRc( argv: []const []const u8, child_progress_node: std.Progress.Node, ) !void { + const io = comp.io; var node_name: std.ArrayList(u8) = .empty; defer node_name.deinit(arena); @@ -6732,8 +6733,8 @@ fn spawnZigRc( child.stderr_behavior = .Pipe; child.progress_node = child_progress_node; - child.spawn() catch |err| { - return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) }); + child.spawn(io) catch |err| { + return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {t}", .{ argv[0], err }); }; var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{ diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 3552a7fc06..e9753734e9 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -1347,7 +1347,7 @@ fn unzip( .diagnostics = &diagnostics, }) catch |err| return f.fail(f.location_tok, try eb.printString("zip extract failed: {t}", .{err})); - cache_root.handle.deleteFile(&zip_path) catch |err| + cache_root.handle.deleteFile(io, &zip_path) catch |err| return f.fail(f.location_tok, try eb.printString("delete temporary zip failed: {t}", .{err})); return .{ .root_dir = diagnostics.root_dir }; @@ -1547,7 +1547,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute .fs_path = fs_path, .failure = undefined, // to be populated by the worker }; - group.async(io, workerDeleteFile, .{ root_dir, deleted_file }); + group.async(io, workerDeleteFile, .{ io, root_dir, deleted_file }); try deleted_files.append(deleted_file); continue; } @@ -1669,8 +1669,8 @@ fn workerHashFile(dir: Io.Dir, hashed_file: *HashedFile) void { hashed_file.failure = hashFileFallible(dir, hashed_file); } -fn workerDeleteFile(dir: Io.Dir, deleted_file: *DeletedFile) void { - deleted_file.failure = deleteFileFallible(dir, deleted_file); +fn workerDeleteFile(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) void { + deleted_file.failure = deleteFileFallible(io, dir, deleted_file); } fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Error!void { @@ -1712,8 +1712,8 @@ fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Er hashed_file.size = file_size; } -fn deleteFileFallible(dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void { - try dir.deleteFile(deleted_file.fs_path); +fn deleteFileFallible(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void { + try dir.deleteFile(io, deleted_file.fs_path); } fn setExecutable(file: Io.File) !void { diff --git a/src/link.zig b/src/link.zig index ff6d938e6f..7ebe1c6ba2 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1235,22 +1235,26 @@ pub const File = struct { ty: InternPool.Index, }; - pub fn determineMode( + pub fn determinePermissions( output_mode: std.builtin.OutputMode, link_mode: std.builtin.LinkMode, - ) Io.File.Mode { + ) Io.File.Permissions { // On common systems with a 0o022 umask, 0o777 will still result in a file created // with 0o755 permissions, but it works appropriately if the system is configured // more leniently. As another data point, C's fopen seems to open files with the // 666 mode. - const executable_mode = if (builtin.target.os.tag == .windows) 0 else 0o777; + const executable_mode: Io.FilePermissions = if (builtin.target.os.tag == .windows) + .default_file + else + .fromMode(0o777); + switch (output_mode) { .Lib => return switch (link_mode) { .dynamic => executable_mode, - .static => Io.File.default_mode, + .static => .default_file, }, .Exe => return executable_mode, - .Obj => return Io.File.default_mode, + .Obj => return .default_file, } } diff --git a/src/link/Lld.zig b/src/link/Lld.zig index 6920c12762..e3127b24bf 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -1608,13 +1608,13 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - break :term child.spawnAndWait(); + break :term child.spawnAndWait(io); } else term: { child.stdin_behavior = .Ignore; child.stdout_behavior = .Ignore; child.stderr_behavior = .Pipe; - child.spawn() catch |err| break :term err; + child.spawn(io) catch |err| break :term err; var stderr_reader = child.stderr.?.readerStreaming(io, &.{}); stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited); break :term child.wait(); @@ -1658,13 +1658,13 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi rsp_child.stdout_behavior = .Inherit; rsp_child.stderr_behavior = .Inherit; - break :term rsp_child.spawnAndWait() catch |err| break :err err; + break :term rsp_child.spawnAndWait(io) catch |err| break :err err; } else { rsp_child.stdin_behavior = .Ignore; rsp_child.stdout_behavior = .Ignore; rsp_child.stderr_behavior = .Pipe; - rsp_child.spawn() catch |err| break :err err; + rsp_child.spawn(io) catch |err| break :err err; var stderr_reader = rsp_child.stderr.?.readerStreaming(io, &.{}); stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited); break :term rsp_child.wait() catch |err| break :err err; diff --git a/src/main.zig b/src/main.zig index a832a79dbe..3835e75949 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4457,7 +4457,7 @@ fn runOrTest( const term_result = t: { std.debug.lockStdErr(); defer std.debug.unlockStdErr(); - break :t child.spawnAndWait(); + break :t child.spawnAndWait(io); }; const term = term_result catch |err| { try warnAboutForeignBinaries(io, arena, arg_mode, target, link_libc); @@ -4512,6 +4512,7 @@ fn runOrTestHotSwap( all_args: []const []const u8, runtime_args_start: ?usize, ) !std.process.Child.Id { + const io = comp.io; const lf = comp.bin_file.?; const exe_path = switch (builtin.target.os.tag) { @@ -4593,7 +4594,7 @@ fn runOrTestHotSwap( child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - try child.spawn(); + try child.spawn(io); return child.id; }, @@ -5419,8 +5420,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) const term = t: { std.debug.lockStdErr(); defer std.debug.unlockStdErr(); - break :t child.spawnAndWait() catch |err| { - fatal("failed to spawn build runner {s}: {s}", .{ child_argv.items[0], @errorName(err) }); + break :t child.spawnAndWait(io) catch |err| { + fatal("failed to spawn build runner {s}: {t}", .{ child_argv.items[0], err }); }; }; @@ -5444,7 +5445,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) dirs.local_cache, tmp_sub_path, @errorName(err), }); }; - dirs.local_cache.handle.deleteFile(tmp_sub_path) catch {}; + dirs.local_cache.handle.deleteFile(io, tmp_sub_path) catch {}; var it = mem.splitScalar(u8, stdout, '\n'); var any_errors = false; @@ -5685,7 +5686,7 @@ fn jitCmd( child.stdout_behavior = if (options.capture == null) .Inherit else .Pipe; child.stderr_behavior = .Inherit; - try child.spawn(); + try child.spawn(io); if (options.capture) |ptr| { var stdout_reader = child.stdout.?.readerStreaming(io, &.{}); diff --git a/test/src/Cases.zig b/test/src/Cases.zig index bf3eade8f4..82b59f722d 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -461,6 +461,7 @@ pub fn lowerToBuildSteps( parent_step: *std.Build.Step, options: CaseTestOptions, ) void { + const io = self.io; const host = b.resolveTargetQuery(.{}); const cases_dir_path = b.build_root.join(b.allocator, &.{ "test", "cases" }) catch @panic("OOM"); @@ -595,7 +596,7 @@ pub fn lowerToBuildSteps( }, .Execution => |expected_stdout| no_exec: { const run = if (case.target.result.ofmt == .c) run_step: { - if (getExternalExecutor(&host.result, &case.target.result, .{ .link_libc = true }) != .native) { + if (getExternalExecutor(io, &host.result, &case.target.result, .{ .link_libc = true }) != .native) { // We wouldn't be able to run the compiled C code. break :no_exec; } diff --git a/test/standalone/child_process/main.zig b/test/standalone/child_process/main.zig index 2a28845a56..3aaec0f2b2 100644 --- a/test/standalone/child_process/main.zig +++ b/test/standalone/child_process/main.zig @@ -29,7 +29,7 @@ pub fn main() !void { child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Inherit; - try child.spawn(); + try child.spawn(io); const child_stdin = child.stdin.?; try child_stdin.writeAll("hello from stdin"); // verified in child child_stdin.close(io); -- cgit v1.2.3 From b042e935228db5d46271d4d3d17afeb9ba5d7ce3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 Dec 2025 20:55:11 -0800 Subject: std: update tty config references in the build system --- lib/compiler/build_runner.zig | 208 ++++++++++++++++++++--------------------- lib/std/Build.zig | 60 ++++++------ lib/std/Build/Fuzz.zig | 20 ++-- lib/std/Build/Step.zig | 9 +- lib/std/Build/Step/Compile.zig | 10 +- lib/std/Build/Step/Run.zig | 68 +++++++------- lib/std/Build/WebServer.zig | 4 - lib/std/Io/File/Writer.zig | 41 +++++++- lib/std/debug.zig | 2 +- lib/std/log.zig | 30 +----- lib/std/testing.zig | 42 ++++----- lib/std/zig.zig | 12 +-- lib/std/zig/ErrorBundle.zig | 59 ++++++------ 13 files changed, 271 insertions(+), 294 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 05cd21ecdb..acba1a4621 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -14,7 +14,6 @@ const WebServer = std.Build.WebServer; const Allocator = std.mem.Allocator; const fatal = std.process.fatal; const Writer = std.Io.Writer; -const tty = std.Io.tty; pub const root = @import("@build"); pub const dependencies = @import("@dependencies"); @@ -435,9 +434,7 @@ pub fn main() !void { if (builtin.single_threaded) fatal("'--webui' is not yet supported on single-threaded hosts", .{}); } - const ttyconf = color.detectTtyConf(io); - - const main_progress_node = std.Progress.start(.{ + const main_progress_node = std.Progress.start(io, .{ .disable_printing = (color == .off), }); defer main_progress_node.end(); @@ -509,8 +506,6 @@ pub fn main() !void { .error_style = error_style, .multiline_errors = multiline_errors, .summary = summary orelse if (watch or webui_listen != null) .line else .failures, - - .ttyconf = ttyconf, }; defer { run.memory_blocked_steps.deinit(gpa); @@ -524,10 +519,10 @@ pub fn main() !void { prepare(arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) { error.DependencyLoopDetected => { - // Perhaps in the future there could be an Advanced Options flag such as - // --debug-build-runner-leaks which would make this code return instead of - // calling exit. - std.debug.lockStdErr(); + // Perhaps in the future there could be an Advanced Options flag + // such as --debug-build-runner-leaks which would make this code + // return instead of calling exit. + _ = std.debug.lockStderrWriter(&.{}); process.exit(1); }, else => |e| return e, @@ -545,7 +540,6 @@ pub fn main() !void { if (builtin.single_threaded) unreachable; // `fatal` above break :ws .init(.{ .gpa = gpa, - .ttyconf = ttyconf, .graph = &graph, .all_steps = run.step_stack.keys(), .root_prog_node = main_progress_node, @@ -560,9 +554,9 @@ pub fn main() !void { } rebuild: while (true) : (if (run.error_style.clearOnUpdate()) { - const bw, _ = std.debug.lockStderrWriter(&stdio_buffer_allocation); + const stderr = std.debug.lockStderrWriter(&stdio_buffer_allocation); defer std.debug.unlockStderrWriter(); - try bw.writeAll("\x1B[2J\x1B[3J\x1B[H"); + try stderr.writeAllUnescaped("\x1B[2J\x1B[3J\x1B[H"); }) { if (run.web_server) |*ws| ws.startBuild(); @@ -663,9 +657,6 @@ const Run = struct { memory_blocked_steps: std.ArrayList(*Step), /// Allocated into `gpa`. step_stack: std.AutoArrayHashMapUnmanaged(*Step, void), - /// Similar to the `tty.Config` returned by `std.debug.lockStderrWriter`, - /// but also respects the '--color' flag. - ttyconf: tty.Config, claimed_rss: usize, error_style: ErrorStyle, @@ -839,7 +830,6 @@ fn runStepNames( var f = std.Build.Fuzz.init( gpa, io, - run.ttyconf, step_stack.keys(), parent_prog_node, mode, @@ -866,18 +856,20 @@ fn runStepNames( .none => break :summary, } - const w, _ = std.debug.lockStderrWriter(&stdio_buffer_allocation); + const stderr = std.debug.lockStderrWriter(&stdio_buffer_allocation); defer std.debug.unlockStderrWriter(); - const ttyconf = run.ttyconf; + + const w = &stderr.interface; + const fwm = stderr.mode; const total_count = success_count + failure_count + pending_count + skipped_count; - ttyconf.setColor(w, .cyan) catch {}; - ttyconf.setColor(w, .bold) catch {}; + fwm.setColor(w, .cyan) catch {}; + fwm.setColor(w, .bold) catch {}; w.writeAll("Build Summary: ") catch {}; - ttyconf.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; w.print("{d}/{d} steps succeeded", .{ success_count, total_count }) catch {}; { - ttyconf.setColor(w, .dim) catch {}; + fwm.setColor(w, .dim) catch {}; var first = true; if (skipped_count > 0) { w.print("{s}{d} skipped", .{ if (first) " (" else ", ", skipped_count }) catch {}; @@ -888,12 +880,12 @@ fn runStepNames( first = false; } if (!first) w.writeByte(')') catch {}; - ttyconf.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; } if (test_count > 0) { w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {}; - ttyconf.setColor(w, .dim) catch {}; + fwm.setColor(w, .dim) catch {}; var first = true; if (test_skip_count > 0) { w.print("{s}{d} skipped", .{ if (first) " (" else ", ", test_skip_count }) catch {}; @@ -912,7 +904,7 @@ fn runStepNames( first = false; } if (!first) w.writeByte(')') catch {}; - ttyconf.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; } w.writeAll("\n") catch {}; @@ -926,7 +918,7 @@ fn runStepNames( var print_node: PrintNode = .{ .parent = null }; if (step_names.len == 0) { print_node.last = true; - printTreeStep(b, b.default_step, run, w, ttyconf, &print_node, &step_stack_copy) catch {}; + printTreeStep(b, b.default_step, run, w, fwm, &print_node, &step_stack_copy) catch {}; } else { const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: { var i: usize = step_names.len; @@ -945,7 +937,7 @@ fn runStepNames( for (step_names, 0..) |step_name, i| { const tls = b.top_level_steps.get(step_name).?; print_node.last = i + 1 == last_index; - printTreeStep(b, &tls.step, run, w, ttyconf, &print_node, &step_stack_copy) catch {}; + printTreeStep(b, &tls.step, run, w, fwm, &print_node, &step_stack_copy) catch {}; } } w.writeByte('\n') catch {}; @@ -962,7 +954,7 @@ fn runStepNames( if (run.error_style.verboseContext()) break :code 1; // failure; print build command break :code 2; // failure; do not print build command }; - std.debug.lockStdErr(); + _ = std.debug.lockStderrWriter(&.{}); process.exit(code); } @@ -971,31 +963,31 @@ const PrintNode = struct { last: bool = false, }; -fn printPrefix(node: *PrintNode, stderr: *Writer, ttyconf: tty.Config) !void { +fn printPrefix(node: *PrintNode, w: *Writer, fwm: File.Writer.Mode) !void { const parent = node.parent orelse return; if (parent.parent == null) return; - try printPrefix(parent, stderr, ttyconf); + try printPrefix(parent, w, fwm); if (parent.last) { - try stderr.writeAll(" "); + try w.writeAll(" "); } else { - try stderr.writeAll(switch (ttyconf) { - .no_color, .windows_api => "| ", - .escape_codes => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │ + try w.writeAll(switch (fwm) { + .terminal_escaped => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │ + else => "| ", }); } } -fn printChildNodePrefix(stderr: *Writer, ttyconf: tty.Config) !void { - try stderr.writeAll(switch (ttyconf) { - .no_color, .windows_api => "+- ", - .escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─ +fn printChildNodePrefix(w: *Writer, fwm: File.Writer.Mode) !void { + try w.writeAll(switch (fwm) { + .terminal_escaped => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─ + else => "+- ", }); } fn printStepStatus( s: *Step, stderr: *Writer, - ttyconf: tty.Config, + fwm: File.Writer.Mode, run: *const Run, ) !void { switch (s.state) { @@ -1005,13 +997,13 @@ fn printStepStatus( .running => unreachable, .dependency_failure => { - try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .dim); try stderr.writeAll(" transitive failure\n"); - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); }, .success => { - try ttyconf.setColor(stderr, .green); + try fwm.setColor(stderr, .green); if (s.result_cached) { try stderr.writeAll(" cached"); } else if (s.test_results.test_count > 0) { @@ -1019,19 +1011,19 @@ fn printStepStatus( assert(s.test_results.test_count == pass_count + s.test_results.skip_count); try stderr.print(" {d} pass", .{pass_count}); if (s.test_results.skip_count > 0) { - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .yellow); + try fwm.setColor(stderr, .yellow); try stderr.print("{d} skip", .{s.test_results.skip_count}); } - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); try stderr.print(" ({d} total)", .{s.test_results.test_count}); } else { try stderr.writeAll(" success"); } - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); if (s.result_duration_ns) |ns| { - try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .dim); if (ns >= std.time.ns_per_min) { try stderr.print(" {d}m", .{ns / std.time.ns_per_min}); } else if (ns >= std.time.ns_per_s) { @@ -1043,11 +1035,11 @@ fn printStepStatus( } else { try stderr.print(" {d}ns", .{ns}); } - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); } if (s.result_peak_rss != 0) { const rss = s.result_peak_rss; - try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .dim); if (rss >= 1000_000_000) { try stderr.print(" MaxRSS:{d}G", .{rss / 1000_000_000}); } else if (rss >= 1000_000) { @@ -1057,25 +1049,25 @@ fn printStepStatus( } else { try stderr.print(" MaxRSS:{d}B", .{rss}); } - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); } try stderr.writeAll("\n"); }, .skipped, .skipped_oom => |skip| { - try ttyconf.setColor(stderr, .yellow); + try fwm.setColor(stderr, .yellow); try stderr.writeAll(" skipped"); if (skip == .skipped_oom) { try stderr.writeAll(" (not enough memory)"); - try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .dim); try stderr.print(" upper bound of {d} exceeded runner limit ({d})", .{ s.max_rss, run.max_rss }); - try ttyconf.setColor(stderr, .yellow); + try fwm.setColor(stderr, .yellow); } try stderr.writeAll("\n"); - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); }, .failure => { - try printStepFailure(s, stderr, ttyconf, false); - try ttyconf.setColor(stderr, .reset); + try printStepFailure(s, stderr, fwm, false); + try fwm.setColor(stderr, .reset); }, } } @@ -1083,48 +1075,48 @@ fn printStepStatus( fn printStepFailure( s: *Step, stderr: *Writer, - ttyconf: tty.Config, + fwm: File.Writer.Mode, dim: bool, ) !void { if (s.result_error_bundle.errorMessageCount() > 0) { - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.print(" {d} errors\n", .{ s.result_error_bundle.errorMessageCount(), }); } else if (!s.test_results.isSuccess()) { // These first values include all of the test "statuses". Every test is either passsed, // skipped, failed, crashed, or timed out. - try ttyconf.setColor(stderr, .green); + try fwm.setColor(stderr, .green); try stderr.print(" {d} pass", .{s.test_results.passCount()}); - try ttyconf.setColor(stderr, .reset); - if (dim) try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .reset); + if (dim) try fwm.setColor(stderr, .dim); if (s.test_results.skip_count > 0) { try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .yellow); + try fwm.setColor(stderr, .yellow); try stderr.print("{d} skip", .{s.test_results.skip_count}); - try ttyconf.setColor(stderr, .reset); - if (dim) try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .reset); + if (dim) try fwm.setColor(stderr, .dim); } if (s.test_results.fail_count > 0) { try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.print("{d} fail", .{s.test_results.fail_count}); - try ttyconf.setColor(stderr, .reset); - if (dim) try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .reset); + if (dim) try fwm.setColor(stderr, .dim); } if (s.test_results.crash_count > 0) { try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.print("{d} crash", .{s.test_results.crash_count}); - try ttyconf.setColor(stderr, .reset); - if (dim) try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .reset); + if (dim) try fwm.setColor(stderr, .dim); } if (s.test_results.timeout_count > 0) { try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.print("{d} timeout", .{s.test_results.timeout_count}); - try ttyconf.setColor(stderr, .reset); - if (dim) try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .reset); + if (dim) try fwm.setColor(stderr, .dim); } try stderr.print(" ({d} total)", .{s.test_results.test_count}); @@ -1134,10 +1126,10 @@ fn printStepFailure( // 2 pass, 1 skip, 2 fail (5 total); 2 leaks if (s.test_results.leak_count > 0) { try stderr.writeAll("; "); - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.print("{d} leaks", .{s.test_results.leak_count}); - try ttyconf.setColor(stderr, .reset); - if (dim) try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .reset); + if (dim) try fwm.setColor(stderr, .dim); } // It's usually not helpful to know how many error logs there were because they tend to @@ -1151,19 +1143,19 @@ fn printStepFailure( }; if (show_err_logs) { try stderr.writeAll("; "); - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.print("{d} error logs", .{s.test_results.log_err_count}); - try ttyconf.setColor(stderr, .reset); - if (dim) try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .reset); + if (dim) try fwm.setColor(stderr, .dim); } try stderr.writeAll("\n"); } else if (s.result_error_msgs.items.len > 0) { - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.writeAll(" failure\n"); } else { assert(s.result_stderr.len > 0); - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.writeAll(" stderr\n"); } } @@ -1173,7 +1165,7 @@ fn printTreeStep( s: *Step, run: *const Run, stderr: *Writer, - ttyconf: tty.Config, + fwm: File.Writer.Mode, parent_node: *PrintNode, step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), ) !void { @@ -1186,26 +1178,26 @@ fn printTreeStep( .failures => s.state == .success, }; if (skip) return; - try printPrefix(parent_node, stderr, ttyconf); + try printPrefix(parent_node, stderr, fwm); if (parent_node.parent != null) { if (parent_node.last) { - try printChildNodePrefix(stderr, ttyconf); + try printChildNodePrefix(stderr, fwm); } else { - try stderr.writeAll(switch (ttyconf) { - .no_color, .windows_api => "+- ", - .escape_codes => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─ + try stderr.writeAll(switch (fwm) { + .terminal_escaped => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─ + else => "+- ", }); } } - if (!first) try ttyconf.setColor(stderr, .dim); + if (!first) try fwm.setColor(stderr, .dim); // dep_prefix omitted here because it is redundant with the tree. try stderr.writeAll(s.name); if (first) { - try printStepStatus(s, stderr, ttyconf, run); + try printStepStatus(s, stderr, fwm, run); const last_index = if (summary == .all) s.dependencies.items.len -| 1 else blk: { var i: usize = s.dependencies.items.len; @@ -1227,7 +1219,7 @@ fn printTreeStep( .parent = parent_node, .last = i == last_index, }; - try printTreeStep(b, dep, run, stderr, ttyconf, &print_node, step_stack); + try printTreeStep(b, dep, run, stderr, fwm, &print_node, step_stack); } } else { if (s.dependencies.items.len == 0) { @@ -1237,7 +1229,7 @@ fn printTreeStep( s.dependencies.items.len, }); } - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); } } @@ -1368,7 +1360,6 @@ fn workerMakeOneStep( .progress_node = sub_prog_node, .watch = run.watch, .web_server = if (run.web_server) |*ws| ws else null, - .ttyconf = run.ttyconf, .unit_test_timeout_ns = run.unit_test_timeout_ns, .gpa = gpa, }); @@ -1378,10 +1369,9 @@ fn workerMakeOneStep( const show_error_msgs = s.result_error_msgs.items.len > 0; const show_stderr = s.result_stderr.len > 0; if (show_error_msgs or show_compile_errors or show_stderr) { - const bw, _ = std.debug.lockStderrWriter(&stdio_buffer_allocation); + const stderr = std.debug.lockStderrWriter(&stdio_buffer_allocation); defer std.debug.unlockStderrWriter(); - const ttyconf = run.ttyconf; - printErrorMessages(gpa, s, .{}, bw, ttyconf, run.error_style, run.multiline_errors) catch {}; + printErrorMessages(gpa, s, .{}, &stderr.interface, stderr.mode, run.error_style, run.multiline_errors) catch {}; } handle_result: { @@ -1449,7 +1439,7 @@ pub fn printErrorMessages( failing_step: *Step, options: std.zig.ErrorBundle.RenderOptions, stderr: *Writer, - ttyconf: tty.Config, + fwm: File.Writer.Mode, error_style: ErrorStyle, multiline_errors: MultilineErrors, ) !void { @@ -1464,29 +1454,29 @@ pub fn printErrorMessages( } // Now, `step_stack` has the subtree that we want to print, in reverse order. - try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .dim); var indent: usize = 0; while (step_stack.pop()) |s| : (indent += 1) { if (indent > 0) { try stderr.splatByteAll(' ', (indent - 1) * 3); - try printChildNodePrefix(stderr, ttyconf); + try printChildNodePrefix(stderr, fwm); } try stderr.writeAll(s.name); if (s == failing_step) { - try printStepFailure(s, stderr, ttyconf, true); + try printStepFailure(s, stderr, fwm, true); } else { try stderr.writeAll("\n"); } } - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); } else { // Just print the failing step itself. - try ttyconf.setColor(stderr, .dim); + try fwm.setColor(stderr, .dim); try stderr.writeAll(failing_step.name); - try printStepFailure(failing_step, stderr, ttyconf, true); - try ttyconf.setColor(stderr, .reset); + try printStepFailure(failing_step, stderr, fwm, true); + try fwm.setColor(stderr, .reset); } if (failing_step.result_stderr.len > 0) { @@ -1496,12 +1486,12 @@ pub fn printErrorMessages( } } - try failing_step.result_error_bundle.renderToWriter(options, stderr, ttyconf); + try failing_step.result_error_bundle.renderToWriter(options, stderr, fwm); for (failing_step.result_error_msgs.items) |msg| { - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.writeAll("error:"); - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); if (std.mem.indexOfScalar(u8, msg, '\n') == null) { try stderr.print(" {s}\n", .{msg}); } else switch (multiline_errors) { @@ -1519,9 +1509,9 @@ pub fn printErrorMessages( if (error_style.verboseContext()) { if (failing_step.result_failed_command) |cmd_str| { - try ttyconf.setColor(stderr, .red); + try fwm.setColor(stderr, .red); try stderr.writeAll("failed command: "); - try ttyconf.setColor(stderr, .reset); + try fwm.setColor(stderr, .reset); try stderr.writeAll(cmd_str); try stderr.writeByte('\n'); } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index ae2ab1c4d0..085254fdde 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -5,9 +5,8 @@ const std = @import("std.zig"); const Io = std.Io; const fs = std.fs; const mem = std.mem; -const debug = std.debug; const panic = std.debug.panic; -const assert = debug.assert; +const assert = std.debug.assert; const log = std.log; const StringHashMap = std.StringHashMap; const Allocator = std.mem.Allocator; @@ -2090,7 +2089,7 @@ pub fn dependencyFromBuildZig( } const full_path = b.pathFromRoot("build.zig.zon"); - debug.panic("'{}' is not a build.zig struct of a dependency in '{s}'", .{ build_zig, full_path }); + std.debug.panic("'{}' is not a build.zig struct of a dependency in '{s}'", .{ build_zig, full_path }); } fn userValuesAreSame(lhs: UserValue, rhs: UserValue) bool { @@ -2249,9 +2248,9 @@ pub const GeneratedFile = struct { pub fn getPath2(gen: GeneratedFile, src_builder: *Build, asking_step: ?*Step) []const u8 { return gen.path orelse { - const w, const ttyconf = debug.lockStderrWriter(&.{}); - dumpBadGetPathHelp(gen.step, w, ttyconf, src_builder, asking_step) catch {}; - debug.unlockStderrWriter(); + const stderr = std.debug.lockStderrWriter(&.{}); + dumpBadGetPathHelp(gen.step, &stderr.interface, stderr.mode, src_builder, asking_step) catch {}; + std.debug.unlockStderrWriter(); @panic("misconfigured build script"); }; } @@ -2458,9 +2457,9 @@ pub const LazyPath = union(enum) { var file_path: Cache.Path = .{ .root_dir = Cache.Directory.cwd(), .sub_path = gen.file.path orelse { - const w, const ttyconf = debug.lockStderrWriter(&.{}); - dumpBadGetPathHelp(gen.file.step, w, ttyconf, src_builder, asking_step) catch {}; - debug.unlockStderrWriter(); + const stderr = std.debug.lockStderrWriter(&.{}); + dumpBadGetPathHelp(gen.file.step, &stderr.interface, stderr.mode, src_builder, asking_step) catch {}; + std.debug.unlockStderrWriter(); @panic("misconfigured build script"); }, }; @@ -2550,37 +2549,40 @@ fn dumpBadDirnameHelp( comptime msg: []const u8, args: anytype, ) anyerror!void { - const w, const tty_config = debug.lockStderrWriter(&.{}); - defer debug.unlockStderrWriter(); + const stderr = std.debug.lockStderrWriter(&.{}); + defer std.debug.unlockStderrWriter(); + + const w = &stderr.interface; + const fwm = stderr.mode; try w.print(msg, args); if (fail_step) |s| { - tty_config.setColor(w, .red) catch {}; + fwm.setColor(w, .red) catch {}; try w.writeAll(" The step was created by this stack trace:\n"); - tty_config.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; - s.dump(w, tty_config); + s.dump(w, fwm); } if (asking_step) |as| { - tty_config.setColor(w, .red) catch {}; + fwm.setColor(w, .red) catch {}; try w.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name}); - tty_config.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; - as.dump(w, tty_config); + as.dump(w, fwm); } - tty_config.setColor(w, .red) catch {}; + fwm.setColor(w, .red) catch {}; try w.writeAll(" Hope that helps. Proceeding to panic.\n"); - tty_config.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; } /// In this function the stderr mutex has already been locked. pub fn dumpBadGetPathHelp( s: *Step, - w: *std.Io.Writer, - tty_config: std.Io.tty.Config, + w: *Io.Writer, + fwm: File.Writer.Mode, src_builder: *Build, asking_step: ?*Step, ) anyerror!void { @@ -2594,21 +2596,21 @@ pub fn dumpBadGetPathHelp( s.name, }); - tty_config.setColor(w, .red) catch {}; + fwm.setColor(w, .red) catch {}; try w.writeAll(" The step was created by this stack trace:\n"); - tty_config.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; - s.dump(w, tty_config); + s.dump(w, fwm); if (asking_step) |as| { - tty_config.setColor(w, .red) catch {}; + fwm.setColor(w, .red) catch {}; try w.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name}); - tty_config.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; - as.dump(w, tty_config); + as.dump(w, fwm); } - tty_config.setColor(w, .red) catch {}; + fwm.setColor(w, .red) catch {}; try w.writeAll(" Hope that helps. Proceeding to panic.\n"); - tty_config.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; } pub const InstallDir = union(enum) { diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index 8837d7d527..c2b7d1e9e9 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -9,14 +9,12 @@ const Allocator = std.mem.Allocator; const log = std.log; const Coverage = std.debug.Coverage; const abi = Build.abi.fuzz; -const tty = std.Io.tty; const Fuzz = @This(); const build_runner = @import("root"); gpa: Allocator, io: Io, -ttyconf: tty.Config, mode: Mode, /// Allocated into `gpa`. @@ -77,7 +75,6 @@ const CoverageMap = struct { pub fn init( gpa: Allocator, io: Io, - ttyconf: tty.Config, all_steps: []const *Build.Step, root_prog_node: std.Progress.Node, mode: Mode, @@ -95,7 +92,7 @@ pub fn init( if (run.producer == null) continue; if (run.fuzz_tests.items.len == 0) continue; try steps.append(gpa, run); - rebuild_group.async(io, rebuildTestsWorkerRun, .{ run, gpa, ttyconf, rebuild_node }); + rebuild_group.async(io, rebuildTestsWorkerRun, .{ run, gpa, rebuild_node }); } if (steps.items.len == 0) fatal("no fuzz tests found", .{}); @@ -115,7 +112,6 @@ pub fn init( return .{ .gpa = gpa, .io = io, - .ttyconf = ttyconf, .mode = mode, .run_steps = run_steps, .group = .init, @@ -154,14 +150,14 @@ pub fn deinit(fuzz: *Fuzz) void { fuzz.gpa.free(fuzz.run_steps); } -fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: tty.Config, parent_prog_node: std.Progress.Node) void { - rebuildTestsWorkerRunFallible(run, gpa, ttyconf, parent_prog_node) catch |err| { +fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, parent_prog_node: std.Progress.Node) void { + rebuildTestsWorkerRunFallible(run, gpa, parent_prog_node) catch |err| { const compile = run.producer.?; log.err("step '{s}': failed to rebuild in fuzz mode: {t}", .{ compile.step.name, err }); }; } -fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: tty.Config, parent_prog_node: std.Progress.Node) !void { +fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, parent_prog_node: std.Progress.Node) !void { const compile = run.producer.?; const prog_node = parent_prog_node.start(compile.step.name, 0); defer prog_node.end(); @@ -174,9 +170,9 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: tty.Co if (show_error_msgs or show_compile_errors or show_stderr) { var buf: [256]u8 = undefined; - const w, _ = std.debug.lockStderrWriter(&buf); + const stderr = std.debug.lockStderrWriter(&buf); defer std.debug.unlockStderrWriter(); - build_runner.printErrorMessages(gpa, &compile.step, .{}, w, ttyconf, .verbose, .indent) catch {}; + build_runner.printErrorMessages(gpa, &compile.step, .{}, &stderr.interface, stderr.mode, .verbose, .indent) catch {}; } const rebuilt_bin_path = result catch |err| switch (err) { @@ -200,9 +196,9 @@ fn fuzzWorkerRun( run.rerunInFuzzMode(fuzz, unit_test_index, prog_node) catch |err| switch (err) { error.MakeFailed => { var buf: [256]u8 = undefined; - const w, _ = std.debug.lockStderrWriter(&buf); + const stderr = std.debug.lockStderrWriter(&buf); defer std.debug.unlockStderrWriter(); - build_runner.printErrorMessages(gpa, &run.step, .{}, w, fuzz.ttyconf, .verbose, .indent) catch {}; + build_runner.printErrorMessages(gpa, &run.step, .{}, &stderr.interface, stderr.mode, .verbose, .indent) catch {}; return; }, else => { diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 9c7fcc757f..0df58b24b7 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -117,7 +117,6 @@ pub const MakeOptions = struct { // it currently breaks because `std.net.Address` doesn't work there. Work around for now. .wasm32 => void, }, - ttyconf: std.Io.tty.Config, /// If set, this is a timeout to enforce on all individual unit tests, in nanoseconds. unit_test_timeout_ns: ?u64, /// Not to be confused with `Build.allocator`, which is an alias of `Build.graph.arena`. @@ -329,16 +328,16 @@ pub fn cast(step: *Step, comptime T: type) ?*T { } /// For debugging purposes, prints identifying information about this Step. -pub fn dump(step: *Step, w: *Io.Writer, tty_config: Io.tty.Config) void { +pub fn dump(step: *Step, w: *Io.Writer, fwm: Io.File.Writer.Mode) void { if (step.debug_stack_trace.instruction_addresses.len > 0) { w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {}; - std.debug.writeStackTrace(&step.debug_stack_trace, w, tty_config) catch {}; + std.debug.writeStackTrace(&step.debug_stack_trace, w, fwm) catch {}; } else { const field = "debug_stack_frames_count"; comptime assert(@hasField(Build, field)); - tty_config.setColor(w, .yellow) catch {}; + fwm.setColor(w, .yellow) catch {}; w.print("name: '{s}'. no stack trace collected for this step, see std.Build." ++ field ++ "\n", .{step.name}) catch {}; - tty_config.setColor(w, .reset) catch {}; + fwm.setColor(w, .reset) catch {}; } } diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index dfa5460981..d703e55b87 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -926,15 +926,15 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking const maybe_path: ?*GeneratedFile = @field(compile, tag_name); const generated_file = maybe_path orelse { - const w, const ttyconf = std.debug.lockStderrWriter(&.{}); - std.Build.dumpBadGetPathHelp(&compile.step, w, ttyconf, compile.step.owner, asking_step) catch {}; + const stderr = std.debug.lockStderrWriter(&.{}); + std.Build.dumpBadGetPathHelp(&compile.step, &stderr.interface, stderr.mode, compile.step.owner, asking_step) catch {}; std.debug.unlockStderrWriter(); @panic("missing emit option for " ++ tag_name); }; const path = generated_file.path orelse { - const w, const ttyconf = std.debug.lockStderrWriter(&.{}); - std.Build.dumpBadGetPathHelp(&compile.step, w, ttyconf, compile.step.owner, asking_step) catch {}; + const stderr = std.debug.lockStderrWriter(&.{}); + std.Build.dumpBadGetPathHelp(&compile.step, &stderr.interface, stderr.mode, compile.step.owner, asking_step) catch {}; std.debug.unlockStderrWriter(); @panic(tag_name ++ " is null. Is there a missing step dependency?"); }; @@ -1904,7 +1904,7 @@ fn checkCompileErrors(compile: *Compile) !void { try actual_eb.renderToWriter(.{ .include_reference_trace = false, .include_source_line = false, - }, &aw.writer, .no_color); + }, &aw.writer, .streaming); break :ae try aw.toOwnedSlice(); }; diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 95024061d8..8aafc77820 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -5,7 +5,7 @@ const std = @import("std"); const Io = std.Io; const Build = std.Build; const Step = std.Build.Step; -const fs = std.fs; +const Dir = std.Io.Dir; const mem = std.mem; const process = std.process; const EnvMap = std.process.EnvMap; @@ -26,19 +26,7 @@ cwd: ?Build.LazyPath, env_map: ?*EnvMap, /// Controls the `NO_COLOR` and `CLICOLOR_FORCE` environment variables. -color: enum { - /// `CLICOLOR_FORCE` is set, and `NO_COLOR` is unset. - enable, - /// `NO_COLOR` is set, and `CLICOLOR_FORCE` is unset. - disable, - /// If the build runner is using color, equivalent to `.enable`. Otherwise, equivalent to `.disable`. - inherit, - /// If stderr is captured or checked, equivalent to `.disable`. Otherwise, equivalent to `.inherit`. - auto, - /// The build runner does not modify the `CLICOLOR_FORCE` or `NO_COLOR` environment variables. - /// They are treated like normal variables, so can be controlled through `setEnvironmentVariable`. - manual, -} = .auto, +color: Color = .auto, /// When `true` prevents `ZIG_PROGRESS` environment variable from being passed /// to the child process, which otherwise would be used for the child to send @@ -112,6 +100,20 @@ rebuilt_executable: ?Path, /// If this Run step was produced by a Compile step, it is tracked here. producer: ?*Step.Compile, +pub const Color = enum { + /// `CLICOLOR_FORCE` is set, and `NO_COLOR` is unset. + enable, + /// `NO_COLOR` is set, and `CLICOLOR_FORCE` is unset. + disable, + /// If the build runner is using color, equivalent to `.enable`. Otherwise, equivalent to `.disable`. + inherit, + /// If stderr is captured or checked, equivalent to `.disable`. Otherwise, equivalent to `.inherit`. + auto, + /// The build runner does not modify the `CLICOLOR_FORCE` or `NO_COLOR` environment variables. + /// They are treated like normal variables, so can be controlled through `setEnvironmentVariable`. + manual, +}; + pub const StdIn = union(enum) { none, bytes: []const u8, @@ -565,7 +567,7 @@ pub fn addPathDir(run: *Run, search_path: []const u8) void { if (prev_path) |pp| { const new_path = b.fmt("{s}{c}{s}", .{ pp, - if (use_wine) fs.path.delimiter_windows else fs.path.delimiter, + if (use_wine) Dir.path.delimiter_windows else Dir.path.delimiter, search_path, }); env_map.put(key, new_path) catch @panic("OOM"); @@ -748,7 +750,7 @@ fn checksContainStderr(checks: []const StdIo.Check) bool { fn convertPathArg(run: *Run, path: Build.Cache.Path) []const u8 { const b = run.step.owner; const path_str = path.toString(b.graph.arena) catch @panic("OOM"); - if (std.fs.path.isAbsolute(path_str)) { + if (Dir.path.isAbsolute(path_str)) { // Absolute paths don't need changing. return path_str; } @@ -756,19 +758,19 @@ fn convertPathArg(run: *Run, path: Build.Cache.Path) []const u8 { const child_lazy_cwd = run.cwd orelse break :rel path_str; const child_cwd = child_lazy_cwd.getPath3(b, &run.step).toString(b.graph.arena) catch @panic("OOM"); // Convert it from relative to *our* cwd, to relative to the *child's* cwd. - break :rel std.fs.path.relative(b.graph.arena, child_cwd, path_str) catch @panic("OOM"); + break :rel Dir.path.relative(b.graph.arena, child_cwd, path_str) catch @panic("OOM"); }; // Not every path can be made relative, e.g. if the path and the child cwd are on different // disk designators on Windows. In that case, `relative` will return an absolute path which we can // just return. - if (std.fs.path.isAbsolute(child_cwd_rel)) { + if (Dir.path.isAbsolute(child_cwd_rel)) { return child_cwd_rel; } // We're not done yet. In some cases this path must be prefixed with './': // * On POSIX, the executable name cannot be a single component like 'foo' // * Some executables might treat a leading '-' like a flag, which we must avoid // There's no harm in it, so just *always* apply this prefix. - return std.fs.path.join(b.graph.arena, &.{ ".", child_cwd_rel }) catch @panic("OOM"); + return Dir.path.join(b.graph.arena, &.{ ".", child_cwd_rel }) catch @panic("OOM"); } const IndexedOutput = struct { @@ -965,11 +967,11 @@ fn make(step: *Step, options: Step.MakeOptions) !void { &digest, ); - const output_dir_path = "o" ++ fs.path.sep_str ++ &digest; + const output_dir_path = "o" ++ Dir.path.sep_str ++ &digest; for (output_placeholders.items) |placeholder| { const output_sub_path = b.pathJoin(&.{ output_dir_path, placeholder.output.basename }); const output_sub_dir_path = switch (placeholder.tag) { - .output_file => fs.path.dirname(output_sub_path).?, + .output_file => Dir.path.dirname(output_sub_path).?, .output_directory => output_sub_path, else => unreachable, }; @@ -995,13 +997,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void { // We do not know the final output paths yet, use temp paths to run the command. const rand_int = std.crypto.random.int(u64); - const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int); + const tmp_dir_path = "tmp" ++ Dir.path.sep_str ++ std.fmt.hex(rand_int); for (output_placeholders.items) |placeholder| { const output_components = .{ tmp_dir_path, placeholder.output.basename }; const output_sub_path = b.pathJoin(&output_components); const output_sub_dir_path = switch (placeholder.tag) { - .output_file => fs.path.dirname(output_sub_path).?, + .output_file => Dir.path.dirname(output_sub_path).?, .output_directory => output_sub_path, else => unreachable, }; @@ -1023,7 +1025,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, options, null); - const dep_file_dir = Io.Dir.cwd(); + const dep_file_dir = Dir.cwd(); const dep_file_basename = dep_output_file.generated_file.getPath2(b, step); if (has_side_effects) try man.addDepFile(dep_file_dir, dep_file_basename) @@ -1040,7 +1042,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { // Rename into place if (any_output) { - const o_sub_path = "o" ++ fs.path.sep_str ++ &digest; + const o_sub_path = "o" ++ Dir.path.sep_str ++ &digest; b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |err| { if (err == error.PathAlreadyExists) { @@ -1139,12 +1141,11 @@ pub fn rerunInFuzzMode( const has_side_effects = false; const rand_int = std.crypto.random.int(u64); - const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int); + const tmp_dir_path = "tmp" ++ Dir.path.sep_str ++ std.fmt.hex(rand_int); try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, .{ .progress_node = prog_node, .watch = undefined, // not used by `runCommand` .web_server = null, // only needed for time reports - .ttyconf = fuzz.ttyconf, .unit_test_timeout_ns = null, // don't time out fuzz tests for now .gpa = fuzz.gpa, }, .{ @@ -1266,10 +1267,7 @@ fn runCommand( try env_map.put("NO_COLOR", "1"); env_map.remove("CLICOLOR_FORCE"); }, - .inherit => switch (options.ttyconf) { - .no_color, .windows_api => continue :color .disable, - .escape_codes => continue :color .enable, - }, + .inherit => {}, .auto => { const capture_stderr = run.captured_stderr != null or switch (run.stdio) { .check => |checks| checksContainStderr(checks.items), @@ -1464,7 +1462,7 @@ fn runCommand( captured.output.generated_file.path = output_path; const sub_path = b.pathJoin(&output_components); - const sub_path_dirname = fs.path.dirname(sub_path).?; + const sub_path_dirname = Dir.path.dirname(sub_path).?; b.cache_root.handle.makePath(io, sub_path_dirname) catch |err| { return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, sub_path_dirname, @errorName(err), @@ -1650,8 +1648,8 @@ fn spawnChildAndCollect( if (!run.disable_zig_progress and !inherit) { child.progress_node = options.progress_node; } - if (inherit) std.debug.lockStdErr(); - defer if (inherit) std.debug.unlockStdErr(); + if (inherit) _ = std.debug.lockStderrWriter(&.{}); + defer if (inherit) std.debug.unlockStderrWriter(); var timer = try std.time.Timer.start(); const res = try evalGeneric(run, &child); run.step.result_duration_ns = timer.read(); @@ -2277,7 +2275,7 @@ fn addPathForDynLibs(run: *Run, artifact: *Step.Compile) void { if (compile.root_module.resolved_target.?.result.os.tag == .windows and compile.isDynamicLibrary()) { - addPathDir(run, fs.path.dirname(compile.getEmittedBin().getPath2(b, &run.step)).?); + addPathDir(run, Dir.path.dirname(compile.getEmittedBin().getPath2(b, &run.step)).?); } } } diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 5f633c5948..ccb7159192 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -2,7 +2,6 @@ gpa: Allocator, graph: *const Build.Graph, all_steps: []const *Build.Step, listen_address: net.IpAddress, -ttyconf: Io.tty.Config, root_prog_node: std.Progress.Node, watch: bool, @@ -52,7 +51,6 @@ pub fn notifyUpdate(ws: *WebServer) void { pub const Options = struct { gpa: Allocator, - ttyconf: Io.tty.Config, graph: *const std.Build.Graph, all_steps: []const *Build.Step, root_prog_node: std.Progress.Node, @@ -98,7 +96,6 @@ pub fn init(opts: Options) WebServer { return .{ .gpa = opts.gpa, - .ttyconf = opts.ttyconf, .graph = opts.graph, .all_steps = all_steps, .listen_address = opts.listen_address, @@ -233,7 +230,6 @@ pub fn finishBuild(ws: *WebServer, opts: struct { ws.fuzz = Fuzz.init( ws.gpa, ws.graph.io, - ws.ttyconf, ws.all_steps, ws.root_prog_node, .{ .forever = .{ .ws = ws } }, diff --git a/lib/std/Io/File/Writer.zig b/lib/std/Io/File/Writer.zig index 8f9f573a4b..03e4ad8a15 100644 --- a/lib/std/Io/File/Writer.zig +++ b/lib/std/Io/File/Writer.zig @@ -99,7 +99,7 @@ pub const Mode = union(enum) { pub const SetColorError = std.os.windows.SetConsoleTextAttributeError || Io.Writer.Error; - pub fn setColor(mode: Mode, io_w: *Io.Writer, color: Color) Mode.SetColorError!void { + pub fn setColor(mode: Mode, io_w: *Io.Writer, color: Color) SetColorError!void { switch (mode) { .streaming, .positional, .streaming_simple, .positional_simple, .failure => return, .terminal_escaped => { @@ -155,6 +155,34 @@ pub const Mode = union(enum) { }, } } + + fn DecorateArgs(comptime Args: type) type { + const fields = @typeInfo(Args).@"struct".fields; + var new_fields: [fields.len]type = undefined; + for (fields, &new_fields) |old, *new| { + if (old.type == std.debug.FormatStackTrace) { + new.* = std.debug.FormatStackTrace.Decorated; + } else { + new.* = old.type; + } + } + return @Tuple(&new_fields); + } + + pub fn decorateArgs(file_writer_mode: std.Io.File.Writer.Mode, args: anytype) DecorateArgs(@TypeOf(args)) { + var new_args: DecorateArgs(@TypeOf(args)) = undefined; + inline for (args, &new_args) |old, *new| { + if (@TypeOf(old) == std.debug.FormatStackTrace) { + new.* = .{ + .stack_trace = old.stack_trace, + .file_writer_mode = file_writer_mode, + }; + } else { + new.* = old; + } + } + return new_args; + } }; pub const Error = error{ @@ -426,6 +454,8 @@ pub fn end(w: *Writer) EndError!void { .streaming, .streaming_simple, + .terminal_escaped, + .terminal_winapi, .failure, => {}, } @@ -453,10 +483,11 @@ pub const Color = enum { reset, }; -pub const SetColorError = Mode.SetColorError; - -pub fn setColor(w: *Writer, color: Color) SetColorError!void { - return w.mode.setColor(&w.interface, color); +pub fn setColor(w: *Writer, color: Color) Io.Writer.Error!void { + return w.mode.setColor(&w.interface, color) catch |err| switch (err) { + error.WriteFailed => |e| return e, + else => |e| w.err = e, + }; } pub fn disableEscape(w: *Writer) Mode { diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 69d8c942c4..67f7d3a9fe 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -306,7 +306,7 @@ pub fn print(comptime fmt: []const u8, args: anytype) void { var buffer: [64]u8 = undefined; const stderr = lockStderrWriter(&buffer); defer unlockStderrWriter(); - stderr.interface.print(fmt, args) catch return; + stderr.interface.print(fmt, stderr.mode.decorateArgs(args)) catch return; } } diff --git a/lib/std/log.zig b/lib/std/log.zig index 029f724f44..b75bada580 100644 --- a/lib/std/log.zig +++ b/lib/std/log.zig @@ -120,35 +120,7 @@ pub fn defaultLogFileWriter( } fw.interface.writeAll(": ") catch return; fw.setColor(.reset) catch {}; - fw.interface.print(format ++ "\n", decorateArgs(args, fw.mode)) catch return; -} - -fn DecorateArgs(comptime Args: type) type { - const fields = @typeInfo(Args).@"struct".fields; - var new_fields: [fields.len]type = undefined; - for (fields, &new_fields) |old, *new| { - if (old.type == std.debug.FormatStackTrace) { - new.* = std.debug.FormatStackTrace.Decorated; - } else { - new.* = old.type; - } - } - return @Tuple(&new_fields); -} - -fn decorateArgs(args: anytype, file_writer_mode: std.Io.File.Writer.Mode) DecorateArgs(@TypeOf(args)) { - var new_args: DecorateArgs(@TypeOf(args)) = undefined; - inline for (args, &new_args) |old, *new| { - if (@TypeOf(old) == std.debug.FormatStackTrace) { - new.* = .{ - .stack_trace = old.stack_trace, - .file_writer_mode = file_writer_mode, - }; - } else { - new.* = old; - } - } - return new_args; + fw.interface.print(format ++ "\n", fw.mode.decorateArgs(args)) catch return; } /// Returns a scoped logging namespace that logs all messages using the scope diff --git a/lib/std/testing.zig b/lib/std/testing.zig index ab9f1f73c3..092b65d71a 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -354,11 +354,10 @@ test expectApproxEqRel { } } -/// This function is intended to be used only in tests. When the two slices are not -/// equal, prints diagnostics to stderr to show exactly how they are not equal (with -/// the differences highlighted in red), then returns a test failure error. -/// The colorized output is optional and controlled by the return of `Io.tty.Config.detect`. -/// If your inputs are UTF-8 encoded strings, consider calling `expectEqualStrings` instead. +/// This function is intended to be used only in tests. When the two slices are +/// not equal, prints diagnostics to stderr to show exactly how they are not +/// equal (with the differences highlighted in red), then returns a test +/// failure error. pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const T) !void { const diff_index: usize = diff_index: { const shortest = @min(expected.len, actual.len); @@ -369,20 +368,13 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const break :diff_index if (expected.len == actual.len) return else shortest; }; if (!backend_can_print) return error.TestExpectedEqual; - const stderr_w, const ttyconf = std.debug.lockStderrWriter(&.{}); + const stderr = std.debug.lockStderrWriter(&.{}); defer std.debug.unlockStderrWriter(); - failEqualSlices(T, expected, actual, diff_index, stderr_w, ttyconf) catch {}; + failEqualSlices(T, expected, actual, diff_index, &stderr.interface, stderr.mode) catch {}; return error.TestExpectedEqual; } -fn failEqualSlices( - comptime T: type, - expected: []const T, - actual: []const T, - diff_index: usize, - w: *Io.Writer, - ttyconf: Io.tty.Config, -) !void { +fn failEqualSlices(comptime T: type, expected: []const T, actual: []const T, diff_index: usize, w: *Io.Writer, fwm: Io.File.Writer.Mode) !void { try w.print("slices differ. first difference occurs at index {d} (0x{X})\n", .{ diff_index, diff_index }); // TODO: Should this be configurable by the caller? @@ -404,12 +396,12 @@ fn failEqualSlices( var differ = if (T == u8) BytesDiffer{ .expected = expected_window, .actual = actual_window, - .ttyconf = ttyconf, + .file_writer_mode = fwm, } else SliceDiffer(T){ .start_index = window_start, .expected = expected_window, .actual = actual_window, - .ttyconf = ttyconf, + .file_writer_mode = fwm, }; // Print indexes as hex for slices of u8 since it's more likely to be binary data where @@ -466,7 +458,7 @@ fn SliceDiffer(comptime T: type) type { start_index: usize, expected: []const T, actual: []const T, - ttyconf: Io.tty.Config, + file_writer_mode: Io.File.Writer.Mode, const Self = @This(); @@ -474,13 +466,13 @@ fn SliceDiffer(comptime T: type) type { for (self.expected, 0..) |value, i| { const full_index = self.start_index + i; const diff = if (i < self.actual.len) !std.meta.eql(self.actual[i], value) else true; - if (diff) try self.ttyconf.setColor(writer, .red); + if (diff) try self.file_writer_mode.setColor(writer, .red); if (@typeInfo(T) == .pointer) { try writer.print("[{}]{*}: {any}\n", .{ full_index, value, value }); } else { try writer.print("[{}]: {any}\n", .{ full_index, value }); } - if (diff) try self.ttyconf.setColor(writer, .reset); + if (diff) try self.file_writer_mode.setColor(writer, .reset); } } }; @@ -489,7 +481,7 @@ fn SliceDiffer(comptime T: type) type { const BytesDiffer = struct { expected: []const u8, actual: []const u8, - ttyconf: Io.tty.Config, + file_writer_mode: Io.File.Writer.Mode, pub fn write(self: BytesDiffer, writer: *Io.Writer) !void { var expected_iterator = std.mem.window(u8, self.expected, 16, 16); @@ -516,7 +508,7 @@ const BytesDiffer = struct { try self.writeDiff(writer, "{c}", .{byte}, diff); } else { // TODO: remove this `if` when https://github.com/ziglang/zig/issues/7600 is fixed - if (self.ttyconf == .windows_api) { + if (self.file_writer_mode == .terminal_winapi) { try self.writeDiff(writer, ".", .{}, diff); continue; } @@ -538,9 +530,9 @@ const BytesDiffer = struct { } fn writeDiff(self: BytesDiffer, writer: *Io.Writer, comptime fmt: []const u8, args: anytype, diff: bool) !void { - if (diff) try self.ttyconf.setColor(writer, .red); + if (diff) try self.file_writer_mode.setColor(writer, .red); try writer.print(fmt, args); - if (diff) try self.ttyconf.setColor(writer, .reset); + if (diff) try self.file_writer_mode.setColor(writer, .reset); } }; @@ -1162,7 +1154,6 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime } else |err| switch (err) { error.OutOfMemory => { if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) { - const tty_config: Io.tty.Config = .detect(.stderr()); print( "\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\nallocation that was made to fail: {f}", .{ @@ -1174,7 +1165,6 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime failing_allocator_inst.deallocations, std.debug.FormatStackTrace{ .stack_trace = failing_allocator_inst.getStackTrace(), - .tty_config = tty_config, }, }, ); diff --git a/lib/std/zig.zig b/lib/std/zig.zig index ac6a0787de..b6203326f2 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -53,18 +53,18 @@ pub const Color = enum { /// Assume stderr is a terminal. on, - pub fn getTtyConf(color: Color, detected: Io.tty.Config) Io.tty.Config { + pub fn getTtyConf(color: Color, detected: Io.File.Writer.Mode) Io.File.Writer.Mode { return switch (color) { .auto => detected, - .on => .escape_codes, - .off => .no_color, + .on => .terminal_escaped, + .off => .streaming, }; } - pub fn detectTtyConf(color: Color, io: Io) Io.tty.Config { + pub fn detectTtyConf(color: Color, io: Io) Io.File.Writer.Mode { return switch (color) { .auto => .detect(io, .stderr()), - .on => .escape_codes, - .off => .no_color, + .on => .terminal_escaped, + .off => .streaming, }; } }; diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index ef6203cbe8..cc6182c8aa 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -164,15 +164,20 @@ pub const RenderOptions = struct { pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions, color: std.zig.Color) void { var buffer: [256]u8 = undefined; - const w, const ttyconf = std.debug.lockStderrWriter(&buffer); + const stderr = std.debug.lockStderrWriter(&buffer); defer std.debug.unlockStderrWriter(); - renderToWriter(eb, options, w, color.getTtyConf(ttyconf)) catch return; + renderToWriter(eb, options, &stderr.interface, color.getTtyConf(stderr.mode)) catch return; } -pub fn renderToWriter(eb: ErrorBundle, options: RenderOptions, w: *Writer, ttyconf: Io.tty.Config) (Writer.Error || std.posix.UnexpectedError)!void { +pub fn renderToWriter( + eb: ErrorBundle, + options: RenderOptions, + w: *Writer, + fwm: Io.File.Writer.Mode, +) Io.File.Writer.Mode.SetColorError!void { if (eb.extra.len == 0) return; for (eb.getMessages()) |err_msg| { - try renderErrorMessageToWriter(eb, options, err_msg, w, ttyconf, "error", .red, 0); + try renderErrorMessageToWriter(eb, options, err_msg, w, fwm, "error", .red, 0); } if (options.include_log_text) { @@ -189,18 +194,18 @@ fn renderErrorMessageToWriter( options: RenderOptions, err_msg_index: MessageIndex, w: *Writer, - ttyconf: Io.tty.Config, + fwm: Io.File.Writer.Mode, kind: []const u8, - color: Io.tty.Color, + color: Io.File.Writer.Color, indent: usize, -) (Writer.Error || std.posix.UnexpectedError)!void { +) Io.File.Writer.Mode.SetColorError!void { const err_msg = eb.getErrorMessage(err_msg_index); if (err_msg.src_loc != .none) { const src = eb.extraData(SourceLocation, @intFromEnum(err_msg.src_loc)); var prefix: Writer.Discarding = .init(&.{}); try w.splatByteAll(' ', indent); prefix.count += indent; - try ttyconf.setColor(w, .bold); + try fwm.setColor(w, .bold); try w.print("{s}:{d}:{d}: ", .{ eb.nullTerminatedString(src.data.src_path), src.data.line + 1, @@ -211,7 +216,7 @@ fn renderErrorMessageToWriter( src.data.line + 1, src.data.column + 1, }); - try ttyconf.setColor(w, color); + try fwm.setColor(w, color); try w.writeAll(kind); prefix.count += kind.len; try w.writeAll(": "); @@ -219,17 +224,17 @@ fn renderErrorMessageToWriter( // This is the length of the part before the error message: // e.g. "file.zig:4:5: error: " const prefix_len: usize = @intCast(prefix.count); - try ttyconf.setColor(w, .reset); - try ttyconf.setColor(w, .bold); + try fwm.setColor(w, .reset); + try fwm.setColor(w, .bold); if (err_msg.count == 1) { try writeMsg(eb, err_msg, w, prefix_len); try w.writeByte('\n'); } else { try writeMsg(eb, err_msg, w, prefix_len); - try ttyconf.setColor(w, .dim); + try fwm.setColor(w, .dim); try w.print(" ({d} times)\n", .{err_msg.count}); } - try ttyconf.setColor(w, .reset); + try fwm.setColor(w, .reset); if (src.data.source_line != 0 and options.include_source_line) { const line = eb.nullTerminatedString(src.data.source_line); for (line) |b| switch (b) { @@ -242,19 +247,19 @@ fn renderErrorMessageToWriter( // -1 since span.main includes the caret const after_caret = src.data.span_end -| src.data.span_main -| 1; try w.splatByteAll(' ', src.data.column - before_caret); - try ttyconf.setColor(w, .green); + try fwm.setColor(w, .green); try w.splatByteAll('~', before_caret); try w.writeByte('^'); try w.splatByteAll('~', after_caret); try w.writeByte('\n'); - try ttyconf.setColor(w, .reset); + try fwm.setColor(w, .reset); } for (eb.getNotes(err_msg_index)) |note| { - try renderErrorMessageToWriter(eb, options, note, w, ttyconf, "note", .cyan, indent); + try renderErrorMessageToWriter(eb, options, note, w, fwm, "note", .cyan, indent); } if (src.data.reference_trace_len > 0 and options.include_reference_trace) { - try ttyconf.setColor(w, .reset); - try ttyconf.setColor(w, .dim); + try fwm.setColor(w, .reset); + try fwm.setColor(w, .dim); try w.print("referenced by:\n", .{}); var ref_index = src.end; for (0..src.data.reference_trace_len) |_| { @@ -281,25 +286,25 @@ fn renderErrorMessageToWriter( ); } } - try ttyconf.setColor(w, .reset); + try fwm.setColor(w, .reset); } } else { - try ttyconf.setColor(w, color); + try fwm.setColor(w, color); try w.splatByteAll(' ', indent); try w.writeAll(kind); try w.writeAll(": "); - try ttyconf.setColor(w, .reset); + try fwm.setColor(w, .reset); const msg = eb.nullTerminatedString(err_msg.msg); if (err_msg.count == 1) { try w.print("{s}\n", .{msg}); } else { try w.print("{s}", .{msg}); - try ttyconf.setColor(w, .dim); + try fwm.setColor(w, .dim); try w.print(" ({d} times)\n", .{err_msg.count}); } - try ttyconf.setColor(w, .reset); + try fwm.setColor(w, .reset); for (eb.getNotes(err_msg_index)) |note| { - try renderErrorMessageToWriter(eb, options, note, w, ttyconf, "note", .cyan, indent + 4); + try renderErrorMessageToWriter(eb, options, note, w, fwm, "note", .cyan, indent + 4); } } } @@ -806,12 +811,10 @@ pub const Wip = struct { }; defer bundle.deinit(std.testing.allocator); - const ttyconf: Io.tty.Config = .no_color; - var bundle_buf: Writer.Allocating = .init(std.testing.allocator); const bundle_bw = &bundle_buf.interface; defer bundle_buf.deinit(); - try bundle.renderToWriter(.{ .ttyconf = ttyconf }, bundle_bw); + try bundle.renderToWriter(bundle_bw); var copy = copy: { var wip: ErrorBundle.Wip = undefined; @@ -827,7 +830,7 @@ pub const Wip = struct { var copy_buf: Writer.Allocating = .init(std.testing.allocator); const copy_bw = ©_buf.interface; defer copy_buf.deinit(); - try copy.renderToWriter(.{ .ttyconf = ttyconf }, copy_bw); + try copy.renderToWriter(copy_bw); try std.testing.expectEqualStrings(bundle_bw.written(), copy_bw.written()); } -- cgit v1.2.3 From 1925e0319f1337b4856bd5a181bf4f6d3ac7d428 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 Dec 2025 19:04:35 -0800 Subject: update lockStderrWriter sites use the application's Io implementation where possible. This correctly makes writing to stderr cancelable, fallible, and participate in the application's event loop. It also removes one more hard-coded dependency on a secondary Io implementation. --- lib/compiler/build_runner.zig | 18 +++---- lib/compiler/resinator/cli.zig | 8 ++-- lib/compiler/resinator/errors.zig | 8 ++-- lib/compiler/resinator/main.zig | 98 ++++++++++++++++++++------------------- lib/compiler/std-docs.zig | 2 +- lib/compiler/test_runner.zig | 3 -- lib/std/Build.zig | 31 +++++++++---- lib/std/Build/Fuzz.zig | 21 +++++---- lib/std/Build/Step/Compile.zig | 17 ++++--- lib/std/Build/Step/Run.zig | 5 +- lib/std/Build/WebServer.zig | 2 +- lib/std/Progress.zig | 2 +- lib/std/json/dynamic.zig | 5 +- lib/std/process.zig | 2 +- lib/std/testing.zig | 16 +++++-- lib/std/zig.zig | 4 +- lib/std/zig/ErrorBundle.zig | 10 ++-- lib/std/zig/parser_test.zig | 30 +++++++----- src/Air/print.zig | 28 +++++++---- src/Compilation.zig | 53 ++++++++++++--------- src/InternPool.zig | 63 ++++++++++++------------- src/Sema.zig | 8 ++-- src/Zcu/PerThread.zig | 5 +- src/codegen/aarch64/Select.zig | 11 +++-- src/crash_report.zig | 9 ++-- src/fmt.zig | 8 ++-- src/libs/mingw.zig | 29 ++++++++---- src/libs/mingw/def.zig | 32 +++++++++---- src/link.zig | 2 +- src/link/Coff.zig | 15 ++++-- src/link/Elf2.zig | 14 ++++-- src/main.zig | 33 ++++++------- 32 files changed, 345 insertions(+), 247 deletions(-) (limited to 'lib/std/Build/WebServer.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index acba1a4621..d284d85747 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -522,7 +522,7 @@ pub fn main() !void { // Perhaps in the future there could be an Advanced Options flag // such as --debug-build-runner-leaks which would make this code // return instead of calling exit. - _ = std.debug.lockStderrWriter(&.{}); + _ = io.lockStderrWriter(&.{}) catch {}; process.exit(1); }, else => |e| return e, @@ -554,8 +554,8 @@ pub fn main() !void { } rebuild: while (true) : (if (run.error_style.clearOnUpdate()) { - const stderr = std.debug.lockStderrWriter(&stdio_buffer_allocation); - defer std.debug.unlockStderrWriter(); + const stderr = try io.lockStderrWriter(&stdio_buffer_allocation); + defer io.unlockStderrWriter(); try stderr.writeAllUnescaped("\x1B[2J\x1B[3J\x1B[H"); }) { if (run.web_server) |*ws| ws.startBuild(); @@ -856,8 +856,8 @@ fn runStepNames( .none => break :summary, } - const stderr = std.debug.lockStderrWriter(&stdio_buffer_allocation); - defer std.debug.unlockStderrWriter(); + const stderr = try io.lockStderrWriter(&stdio_buffer_allocation); + defer io.unlockStderrWriter(); const w = &stderr.interface; const fwm = stderr.mode; @@ -954,7 +954,7 @@ fn runStepNames( if (run.error_style.verboseContext()) break :code 1; // failure; print build command break :code 2; // failure; do not print build command }; - _ = std.debug.lockStderrWriter(&.{}); + _ = io.lockStderrWriter(&.{}) catch {}; process.exit(code); } @@ -1369,8 +1369,10 @@ fn workerMakeOneStep( const show_error_msgs = s.result_error_msgs.items.len > 0; const show_stderr = s.result_stderr.len > 0; if (show_error_msgs or show_compile_errors or show_stderr) { - const stderr = std.debug.lockStderrWriter(&stdio_buffer_allocation); - defer std.debug.unlockStderrWriter(); + const stderr = io.lockStderrWriter(&stdio_buffer_allocation) catch |err| switch (err) { + error.Canceled => return, + }; + defer io.unlockStderrWriter(); printErrorMessages(gpa, s, .{}, &stderr.interface, stderr.mode, run.error_style, run.multiline_errors) catch {}; } diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index d2dd71b1f6..08befbe1fa 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -125,10 +125,10 @@ pub const Diagnostics = struct { try self.errors.append(self.allocator, error_details); } - pub fn renderToStdErr(self: *Diagnostics, args: []const []const u8) void { - const stderr, const ttyconf = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - self.renderToWriter(args, stderr, ttyconf) catch return; + pub fn renderToStderr(self: *Diagnostics, io: Io, args: []const []const u8) void { + const stderr = io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); + self.renderToWriter(args, &stderr.interface, stderr.mode) catch return; } pub fn renderToWriter(self: *Diagnostics, args: []const []const u8, writer: *std.Io.Writer, config: std.Io.tty.Config) !void { diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig index 14ff28a697..031b5d3574 100644 --- a/lib/compiler/resinator/errors.zig +++ b/lib/compiler/resinator/errors.zig @@ -67,12 +67,12 @@ pub const Diagnostics = struct { return @intCast(index); } - pub fn renderToStdErr(self: *Diagnostics, cwd: Io.Dir, source: []const u8, source_mappings: ?SourceMappings) void { + pub fn renderToStderr(self: *Diagnostics, cwd: Io.Dir, source: []const u8, source_mappings: ?SourceMappings) void { const io = self.io; - const stderr, const ttyconf = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); + const stderr = io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); for (self.errors.items) |err_details| { - renderErrorMessage(io, stderr, ttyconf, cwd, err_details, source, self.strings.items, source_mappings) catch return; + renderErrorMessage(io, &stderr.interface, stderr.mode, cwd, err_details, source, self.strings.items, source_mappings) catch return; } } diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index 6c12903f06..bd2fcc912e 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -24,6 +24,10 @@ pub fn main() !void { defer std.debug.assert(debug_allocator.deinit() == .ok); const gpa = debug_allocator.allocator(); + var threaded: std.Io.Threaded = .init(gpa); + defer threaded.deinit(); + const io = threaded.io(); + var arena_state = std.heap.ArenaAllocator.init(gpa); defer arena_state.deinit(); const arena = arena_state.allocator(); @@ -31,8 +35,8 @@ pub fn main() !void { const args = try std.process.argsAlloc(arena); if (args.len < 2) { - const w, const ttyconf = std.debug.lockStderrWriter(&.{}); - try renderErrorMessage(w, ttyconf, .err, "expected zig lib dir as first argument", .{}); + const stderr = io.lockStderrWriter(&.{}); + try renderErrorMessage(&stderr.interface, stderr.mode, .err, "expected zig lib dir as first argument", .{}); std.process.exit(1); } const zig_lib_dir = args[1]; @@ -45,7 +49,7 @@ pub fn main() !void { } var stdout_buffer: [1024]u8 = undefined; - var stdout_writer = Io.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer); const stdout = &stdout_writer.interface; var error_handler: ErrorHandler = switch (zig_integration) { true => .{ @@ -71,24 +75,20 @@ pub fn main() !void { if (!zig_integration) { // print any warnings/notes - cli_diagnostics.renderToStdErr(cli_args); + cli_diagnostics.renderToStderr(io, cli_args); // If there was something printed, then add an extra newline separator // so that there is a clear separation between the cli diagnostics and whatever // gets printed after if (cli_diagnostics.errors.items.len > 0) { - const stderr, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - try stderr.writeByte('\n'); + const stderr = io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); + try stderr.interface.writeByte('\n'); } } break :options options; }; defer options.deinit(); - var threaded: std.Io.Threaded = .init(gpa); - defer threaded.deinit(); - const io = threaded.io(); - if (options.print_help_and_exit) { try cli.writeUsage(stdout, "zig rc"); try stdout.flush(); @@ -130,10 +130,10 @@ pub fn main() !void { var stderr_buf: [512]u8 = undefined; var diagnostics: aro.Diagnostics = .{ .output = output: { if (zig_integration) break :output .{ .to_list = .{ .arena = .init(gpa) } }; - const w, const ttyconf = std.debug.lockStderrWriter(&stderr_buf); + const stderr = io.lockStderrWriter(&stderr_buf); break :output .{ .to_writer = .{ - .writer = w, - .color = ttyconf, + .writer = &stderr.interface, + .color = stderr.mode, } }; } }; defer { @@ -175,11 +175,11 @@ pub fn main() !void { std.process.exit(1); }, error.FileTooBig => { - try error_handler.emitMessage(gpa, .err, "failed during preprocessing: maximum file size exceeded", .{}); + try error_handler.emitMessage(gpa, io, .err, "failed during preprocessing: maximum file size exceeded", .{}); std.process.exit(1); }, error.WriteFailed => { - try error_handler.emitMessage(gpa, .err, "failed during preprocessing: error writing the preprocessed output", .{}); + try error_handler.emitMessage(gpa, io, .err, "failed during preprocessing: error writing the preprocessed output", .{}); std.process.exit(1); }, error.OutOfMemory => |e| return e, @@ -191,13 +191,13 @@ pub fn main() !void { .stdio => |file| { var file_reader = file.reader(io, &.{}); break :full_input file_reader.interface.allocRemaining(gpa, .unlimited) catch |err| { - try error_handler.emitMessage(gpa, .err, "unable to read input from stdin: {s}", .{@errorName(err)}); + try error_handler.emitMessage(gpa, io, .err, "unable to read input from stdin: {s}", .{@errorName(err)}); std.process.exit(1); }; }, .filename => |input_filename| { break :full_input Io.Dir.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| { - try error_handler.emitMessage(gpa, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) }); std.process.exit(1); }; }, @@ -228,12 +228,12 @@ pub fn main() !void { } else if (options.input_format == .res) IoStream.fromIoSource(options.input_source, .input) catch |err| { - try error_handler.emitMessage(gpa, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) }); std.process.exit(1); } else IoStream.fromIoSource(options.output_source, .output) catch |err| { - try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) }); std.process.exit(1); }; defer res_stream.deinit(gpa); @@ -246,17 +246,17 @@ pub fn main() !void { var mapping_results = parseAndRemoveLineCommands(gpa, full_input, full_input, .{ .initial_filename = options.input_source.filename }) catch |err| switch (err) { error.InvalidLineCommand => { // TODO: Maybe output the invalid line command - try error_handler.emitMessage(gpa, .err, "invalid line command in the preprocessed source", .{}); + try error_handler.emitMessage(gpa, io, .err, "invalid line command in the preprocessed source", .{}); if (options.preprocess == .no) { - try error_handler.emitMessage(gpa, .note, "line commands must be of the format: #line \"\"", .{}); + try error_handler.emitMessage(gpa, io, .note, "line commands must be of the format: #line \"\"", .{}); } else { - try error_handler.emitMessage(gpa, .note, "this is likely to be a bug, please report it", .{}); + try error_handler.emitMessage(gpa, io, .note, "this is likely to be a bug, please report it", .{}); } std.process.exit(1); }, error.LineNumberOverflow => { // TODO: Better error message - try error_handler.emitMessage(gpa, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)}); + try error_handler.emitMessage(gpa, io, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)}); std.process.exit(1); }, error.OutOfMemory => |e| return e, @@ -306,13 +306,13 @@ pub fn main() !void { // print any warnings/notes if (!zig_integration) { - diagnostics.renderToStdErr(Io.Dir.cwd(), final_input, mapping_results.mappings); + diagnostics.renderToStderr(io, Io.Dir.cwd(), final_input, mapping_results.mappings); } // write the depfile if (options.depfile_path) |depfile_path| { var depfile = Io.Dir.cwd().createFile(io, depfile_path, .{}) catch |err| { - try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) }); std.process.exit(1); }; defer depfile.close(io); @@ -340,7 +340,7 @@ pub fn main() !void { if (options.output_format != .coff) return; break :res_data res_stream.source.readAll(gpa, io) catch |err| { - try error_handler.emitMessage(gpa, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) }); std.process.exit(1); }; }; @@ -353,14 +353,14 @@ pub fn main() !void { var res_reader: std.Io.Reader = .fixed(res_data.bytes); break :resources cvtres.parseRes(gpa, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| { // TODO: Better errors - try error_handler.emitMessage(gpa, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) }); std.process.exit(1); }; }; defer resources.deinit(); var coff_stream = IoStream.fromIoSource(options.output_source, .output) catch |err| { - try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) }); std.process.exit(1); }; defer coff_stream.deinit(gpa); @@ -373,7 +373,7 @@ pub fn main() !void { switch (err) { error.DuplicateResource => { const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource]; - try error_handler.emitMessage(gpa, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{ + try error_handler.emitMessage(gpa, io, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{ duplicate_resource.name_value, fmtResourceType(duplicate_resource.type_value), duplicate_resource.language, @@ -381,8 +381,8 @@ pub fn main() !void { }, error.ResourceDataTooLong => { const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource]; - try error_handler.emitMessage(gpa, .err, "resource has a data length that is too large to be written into a coff section", .{}); - try error_handler.emitMessage(gpa, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{ + try error_handler.emitMessage(gpa, io, .err, "resource has a data length that is too large to be written into a coff section", .{}); + try error_handler.emitMessage(gpa, io, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{ overflow_resource.name_value, fmtResourceType(overflow_resource.type_value), overflow_resource.language, @@ -390,15 +390,15 @@ pub fn main() !void { }, error.TotalResourceDataTooLong => { const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource]; - try error_handler.emitMessage(gpa, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{}); - try error_handler.emitMessage(gpa, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{ + try error_handler.emitMessage(gpa, io, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{}); + try error_handler.emitMessage(gpa, io, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{ overflow_resource.name_value, fmtResourceType(overflow_resource.type_value), overflow_resource.language, }); }, else => { - try error_handler.emitMessage(gpa, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) }); + try error_handler.emitMessage(gpa, io, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) }); }, } // Delete the output file on error @@ -550,16 +550,16 @@ const LazyIncludePaths = struct { else => |e| { switch (e) { error.UnsupportedAutoIncludesMachineType => { - try error_handler.emitMessage(self.arena, .err, "automatic include path detection is not supported for target '{s}'", .{@tagName(self.target_machine_type)}); + try error_handler.emitMessage(self.arena, io, .err, "automatic include path detection is not supported for target '{s}'", .{@tagName(self.target_machine_type)}); }, error.MsvcIncludesNotFound => { - try error_handler.emitMessage(self.arena, .err, "MSVC include paths could not be automatically detected", .{}); + try error_handler.emitMessage(self.arena, io, .err, "MSVC include paths could not be automatically detected", .{}); }, error.MingwIncludesNotFound => { - try error_handler.emitMessage(self.arena, .err, "MinGW include paths could not be automatically detected", .{}); + try error_handler.emitMessage(self.arena, io, .err, "MinGW include paths could not be automatically detected", .{}); }, } - try error_handler.emitMessage(self.arena, .note, "to disable auto includes, use the option /:auto-includes none", .{}); + try error_handler.emitMessage(self.arena, io, .note, "to disable auto includes, use the option /:auto-includes none", .{}); std.process.exit(1); }, }; @@ -664,6 +664,7 @@ const ErrorHandler = union(enum) { pub fn emitCliDiagnostics( self: *ErrorHandler, allocator: Allocator, + io: Io, args: []const []const u8, diagnostics: *cli.Diagnostics, ) !void { @@ -674,7 +675,7 @@ const ErrorHandler = union(enum) { try server.serveErrorBundle(error_bundle); }, - .stderr => diagnostics.renderToStdErr(args), + .stderr => diagnostics.renderToStderr(io, args), } } @@ -684,6 +685,7 @@ const ErrorHandler = union(enum) { fail_msg: []const u8, comp: *aro.Compilation, ) !void { + const io = comp.io; switch (self.*) { .server => |*server| { var error_bundle = try compiler_util.aroDiagnosticsToErrorBundle( @@ -697,9 +699,9 @@ const ErrorHandler = union(enum) { }, .stderr => { // aro errors have already been emitted - const stderr, const ttyconf = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - try renderErrorMessage(stderr, ttyconf, .err, "{s}", .{fail_msg}); + const stderr = io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); + try renderErrorMessage(&stderr.interface, stderr.mode, .err, "{s}", .{fail_msg}); }, } } @@ -707,6 +709,7 @@ const ErrorHandler = union(enum) { pub fn emitDiagnostics( self: *ErrorHandler, allocator: Allocator, + io: Io, cwd: Io.Dir, source: []const u8, diagnostics: *Diagnostics, @@ -719,13 +722,14 @@ const ErrorHandler = union(enum) { try server.serveErrorBundle(error_bundle); }, - .stderr => diagnostics.renderToStdErr(cwd, source, mappings), + .stderr => diagnostics.renderToStderr(io, cwd, source, mappings), } } pub fn emitMessage( self: *ErrorHandler, allocator: Allocator, + io: Io, msg_type: @import("utils.zig").ErrorMessageType, comptime format: []const u8, args: anytype, @@ -741,9 +745,9 @@ const ErrorHandler = union(enum) { try server.serveErrorBundle(error_bundle); }, .stderr => { - const stderr, const ttyconf = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - try renderErrorMessage(stderr, ttyconf, msg_type, format, args); + const stderr = io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); + try renderErrorMessage(&stderr.interface, stderr.mode, msg_type, format, args); }, } } diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 538558fe48..9ff199d07b 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -407,7 +407,7 @@ fn buildWasmBinary( } if (result_error_bundle.errorMessageCount() > 0) { - result_error_bundle.renderToStdErr(.{}, true); + result_error_bundle.renderToStderr(io, .{}, true); std.log.err("the following command failed with {d} compilation errors:\n{s}", .{ result_error_bundle.errorMessageCount(), try std.Build.Step.allocPrintCmd(arena, null, argv.items), diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index f7f59a5bf3..3a135ab8fd 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -411,16 +411,13 @@ pub fn fuzz( std.debug.writeStackTrace(trace, &stderr.interface, stderr.mode) catch break :p; } stderr.interface.print("failed with error.{t}\n", .{err}) catch break :p; - stderr.interface.flush() catch break :p; } - stderr.interface.flush() catch {}; std.process.exit(1); }, }; if (log_err_count != 0) { const stderr = std.debug.lockStderrWriter(&.{}); stderr.interface.print("error logs detected\n", .{}) catch {}; - stderr.interface.flush() catch {}; std.process.exit(1); } } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index b7300cb5df..de2985d2b5 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -2238,7 +2238,7 @@ pub const GeneratedFile = struct { /// This value must be set in the `fn make()` of the `step` and must not be `null` afterwards. path: ?[]const u8 = null, - /// Deprecated, see `getPath2`. + /// Deprecated, see `getPath3`. pub fn getPath(gen: GeneratedFile) []const u8 { return gen.step.owner.pathFromCwd(gen.path orelse std.debug.panic( "getPath() was called on a GeneratedFile that wasn't built yet. Is there a missing Step dependency on step '{s}'?", @@ -2246,11 +2246,18 @@ pub const GeneratedFile = struct { )); } + /// Deprecated, see `getPath3`. pub fn getPath2(gen: GeneratedFile, src_builder: *Build, asking_step: ?*Step) []const u8 { + return getPath3(gen, src_builder, asking_step) catch |err| switch (err) { + error.Canceled => std.process.exit(1), + }; + } + + pub fn getPath3(gen: GeneratedFile, src_builder: *Build, asking_step: ?*Step) Io.Cancelable![]const u8 { return gen.path orelse { - const stderr = std.debug.lockStderrWriter(&.{}); + const io = gen.step.owner.graph.io; + const stderr = try io.lockStderrWriter(&.{}); dumpBadGetPathHelp(gen.step, &stderr.interface, stderr.mode, src_builder, asking_step) catch {}; - std.debug.unlockStderrWriter(); @panic("misconfigured build script"); }; } @@ -2425,22 +2432,29 @@ pub const LazyPath = union(enum) { } } - /// Deprecated, see `getPath3`. + /// Deprecated, see `getPath4`. pub fn getPath(lazy_path: LazyPath, src_builder: *Build) []const u8 { return getPath2(lazy_path, src_builder, null); } - /// Deprecated, see `getPath3`. + /// Deprecated, see `getPath4`. pub fn getPath2(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) []const u8 { const p = getPath3(lazy_path, src_builder, asking_step); return src_builder.pathResolve(&.{ p.root_dir.path orelse ".", p.sub_path }); } + /// Deprecated, see `getPath4`. + pub fn getPath3(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Cache.Path { + return getPath4(lazy_path, src_builder, asking_step) catch |err| switch (err) { + error.Canceled => std.process.exit(1), + }; + } + /// Intended to be used during the make phase only. /// /// `asking_step` is only used for debugging purposes; it's the step being /// run that is asking for the path. - pub fn getPath3(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Cache.Path { + pub fn getPath4(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Io.Cancelable!Cache.Path { switch (lazy_path) { .src_path => |sp| return .{ .root_dir = sp.owner.build_root, @@ -2457,9 +2471,10 @@ pub const LazyPath = union(enum) { var file_path: Cache.Path = .{ .root_dir = Cache.Directory.cwd(), .sub_path = gen.file.path orelse { - const stderr = std.debug.lockStderrWriter(&.{}); + const io = src_builder.graph.io; + const stderr = try io.lockStderrWriter(&.{}); dumpBadGetPathHelp(gen.file.step, &stderr.interface, stderr.mode, src_builder, asking_step) catch {}; - std.debug.unlockStderrWriter(); + io.unlockStderrWriter(); @panic("misconfigured build script"); }, }; diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index c2b7d1e9e9..4770e03a0c 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -158,6 +158,7 @@ fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, parent_prog_node: std.P } fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, parent_prog_node: std.Progress.Node) !void { + const io = run.step.owner.graph.io; const compile = run.producer.?; const prog_node = parent_prog_node.start(compile.step.name, 0); defer prog_node.end(); @@ -170,8 +171,8 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, parent_prog_nod if (show_error_msgs or show_compile_errors or show_stderr) { var buf: [256]u8 = undefined; - const stderr = std.debug.lockStderrWriter(&buf); - defer std.debug.unlockStderrWriter(); + const stderr = try io.lockStderrWriter(&buf); + defer io.unlockStderrWriter(); build_runner.printErrorMessages(gpa, &compile.step, .{}, &stderr.interface, stderr.mode, .verbose, .indent) catch {}; } @@ -182,12 +183,10 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, parent_prog_nod run.rebuilt_executable = try rebuilt_bin_path.join(gpa, compile.out_filename); } -fn fuzzWorkerRun( - fuzz: *Fuzz, - run: *Step.Run, - unit_test_index: u32, -) void { - const gpa = run.step.owner.allocator; +fn fuzzWorkerRun(fuzz: *Fuzz, run: *Step.Run, unit_test_index: u32) void { + const owner = run.step.owner; + const gpa = owner.allocator; + const io = owner.graph.io; const test_name = run.cached_test_metadata.?.testName(unit_test_index); const prog_node = fuzz.prog_node.start(test_name, 0); @@ -196,8 +195,10 @@ fn fuzzWorkerRun( run.rerunInFuzzMode(fuzz, unit_test_index, prog_node) catch |err| switch (err) { error.MakeFailed => { var buf: [256]u8 = undefined; - const stderr = std.debug.lockStderrWriter(&buf); - defer std.debug.unlockStderrWriter(); + const stderr = io.lockStderrWriter(&buf) catch |e| switch (e) { + error.Canceled => return, + }; + defer io.unlockStderrWriter(); build_runner.printErrorMessages(gpa, &run.step, .{}, &stderr.interface, stderr.mode, .verbose, .indent) catch {}; return; }, diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index d703e55b87..888775884d 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -922,20 +922,23 @@ const CliNamedModules = struct { } }; -fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking_step: ?*Step) []const u8 { +fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking_step: ?*Step) ![]const u8 { + const step = &compile.step; + const b = step.owner; + const io = b.graph.io; const maybe_path: ?*GeneratedFile = @field(compile, tag_name); const generated_file = maybe_path orelse { - const stderr = std.debug.lockStderrWriter(&.{}); + const stderr = try io.lockStderrWriter(&.{}); std.Build.dumpBadGetPathHelp(&compile.step, &stderr.interface, stderr.mode, compile.step.owner, asking_step) catch {}; - std.debug.unlockStderrWriter(); + io.unlockStderrWriter(); @panic("missing emit option for " ++ tag_name); }; const path = generated_file.path orelse { - const stderr = std.debug.lockStderrWriter(&.{}); + const stderr = try io.lockStderrWriter(&.{}); std.Build.dumpBadGetPathHelp(&compile.step, &stderr.interface, stderr.mode, compile.step.owner, asking_step) catch {}; - std.debug.unlockStderrWriter(); + io.unlockStderrWriter(); @panic(tag_name ++ " is null. Is there a missing step dependency?"); }; @@ -1149,9 +1152,9 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { // For everything else, we directly link // against the library file. const full_path_lib = if (other_produces_implib) - other.getGeneratedFilePath("generated_implib", &compile.step) + try other.getGeneratedFilePath("generated_implib", &compile.step) else - other.getGeneratedFilePath("generated_bin", &compile.step); + try other.getGeneratedFilePath("generated_bin", &compile.step); try zig_args.append(full_path_lib); total_linker_objects += 1; diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index f1e5313905..67cf201374 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1559,6 +1559,7 @@ fn spawnChildAndCollect( ) !?EvalGenericResult { const b = run.step.owner; const arena = b.allocator; + const io = b.graph.io; if (fuzz_context != null) { assert(!has_side_effects); @@ -1625,10 +1626,10 @@ fn spawnChildAndCollect( child.progress_node = options.progress_node; } if (inherit) { - const stderr = std.debug.lockStderrWriter(&.{}); + const stderr = try io.lockStderrWriter(&.{}); try setColorEnvironmentVariables(run, env_map, stderr.mode); } - defer if (inherit) std.debug.unlockStderrWriter(); + defer if (inherit) io.unlockStderrWriter(); var timer = try std.time.Timer.start(); const res = try evalGeneric(run, &child); run.step.result_duration_ns = timer.read(); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index ccb7159192..a2b35e3522 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -655,7 +655,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim } if (result_error_bundle.errorMessageCount() > 0) { - result_error_bundle.renderToStdErr(.{}, .auto); + try result_error_bundle.renderToStderr(io, .{}, .auto); log.err("the following command failed with {d} compilation errors:\n{s}", .{ result_error_bundle.errorMessageCount(), try Build.Step.allocPrintCmd(arena, null, argv.items), diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 603f53091e..6edbf9e471 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -764,7 +764,7 @@ fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize { } } -pub fn clearWrittenWithEscapeCodes(file_writer: *Io.File.Writer) anyerror!void { +pub fn clearWrittenWithEscapeCodes(file_writer: *Io.File.Writer) Io.Writer.Error!void { if (noop_impl or !global_progress.need_clear) return; try file_writer.writeAllUnescaped(clear ++ progress_remove); global_progress.need_clear = false; diff --git a/lib/std/json/dynamic.zig b/lib/std/json/dynamic.zig index c3cccd1a91..f85916670b 100644 --- a/lib/std/json/dynamic.zig +++ b/lib/std/json/dynamic.zig @@ -47,10 +47,9 @@ pub const Value = union(enum) { } pub fn dump(v: Value) void { - const w, _ = std.debug.lockStderrWriter(&.{}); + const stderr = std.debug.lockStderrWriter(&.{}); defer std.debug.unlockStderrWriter(); - - json.Stringify.value(v, .{}, w) catch return; + json.Stringify.value(v, .{}, &stderr.interface) catch return; } pub fn jsonStringify(value: @This(), jws: anytype) !void { diff --git a/lib/std/process.zig b/lib/std/process.zig index 216e793cbe..4b4c27fe20 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1849,7 +1849,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!u64 { /// and does not return. pub fn cleanExit(io: Io) void { if (builtin.mode == .Debug) return; - _ = io.lockStderrWriter(&.{}); + _ = io.lockStderrWriter(&.{}) catch {}; exit(0); } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 092b65d71a..64ea3d1a4d 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -368,13 +368,21 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const break :diff_index if (expected.len == actual.len) return else shortest; }; if (!backend_can_print) return error.TestExpectedEqual; - const stderr = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - failEqualSlices(T, expected, actual, diff_index, &stderr.interface, stderr.mode) catch {}; + if (io.lockStderrWriter(&.{})) |stderr| { + defer io.unlockStderrWriter(); + failEqualSlices(T, expected, actual, diff_index, &stderr.interface, stderr.mode) catch {}; + } else |_| {} return error.TestExpectedEqual; } -fn failEqualSlices(comptime T: type, expected: []const T, actual: []const T, diff_index: usize, w: *Io.Writer, fwm: Io.File.Writer.Mode) !void { +fn failEqualSlices( + comptime T: type, + expected: []const T, + actual: []const T, + diff_index: usize, + w: *Io.Writer, + fwm: Io.File.Writer.Mode, +) !void { try w.print("slices differ. first difference occurs at index {d} (0x{X})\n", .{ diff_index, diff_index }); // TODO: Should this be configurable by the caller? diff --git a/lib/std/zig.zig b/lib/std/zig.zig index b6203326f2..a094dee83f 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -639,7 +639,7 @@ pub fn readSourceFileToEndAlloc(gpa: Allocator, file_reader: *Io.File.Reader) ![ return buffer.toOwnedSliceSentinel(gpa, 0); } -pub fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color: Color) !void { +pub fn printAstErrorsToStderr(gpa: Allocator, io: Io, tree: Ast, path: []const u8, color: Color) !void { var wip_errors: std.zig.ErrorBundle.Wip = undefined; try wip_errors.init(gpa); defer wip_errors.deinit(); @@ -648,7 +648,7 @@ pub fn printAstErrorsToStderr(gpa: Allocator, tree: Ast, path: []const u8, color var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); } pub fn putAstErrorsIntoBundle( diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index cc6182c8aa..0342874cf9 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -162,11 +162,13 @@ pub const RenderOptions = struct { include_log_text: bool = true, }; -pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions, color: std.zig.Color) void { +pub const RenderToStderrError = Io.Cancelable || Io.File.Writer.Mode.SetColorError; + +pub fn renderToStderr(eb: ErrorBundle, io: Io, options: RenderOptions, color: std.zig.Color) RenderToStderrError!void { var buffer: [256]u8 = undefined; - const stderr = std.debug.lockStderrWriter(&buffer); - defer std.debug.unlockStderrWriter(); - renderToWriter(eb, options, &stderr.interface, color.getTtyConf(stderr.mode)) catch return; + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + try renderToWriter(eb, options, &stderr.interface, color.getTtyConf(stderr.mode)); } pub fn renderToWriter( diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 9d9a6a48ce..5318eab2df 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -1,7 +1,6 @@ const std = @import("std"); -const mem = std.mem; -const print = std.debug.print; -const maxInt = std.math.maxInt; +const Io = std.Io; +const Allocator = std.mem.Allocator; test "zig fmt: remove extra whitespace at start and end of file with comment between" { try testTransform( @@ -6332,10 +6331,10 @@ test "ampersand" { var fixed_buffer_mem: [100 * 1024]u8 = undefined; -fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 { +fn testParse(io: Io, source: [:0]const u8, allocator: Allocator, anything_changed: *bool) ![]u8 { var buffer: [64]u8 = undefined; - const stderr = std.debug.lockStderrWriter(&buffer); - defer std.debug.unlockStderrWriter(); + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); var tree = try std.zig.Ast.parse(allocator, source, .zig); defer tree.deinit(allocator); @@ -6359,27 +6358,36 @@ fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: * } const formatted = try tree.renderAlloc(allocator); - anything_changed.* = !mem.eql(u8, formatted, source); + anything_changed.* = !std.mem.eql(u8, formatted, source); return formatted; } -fn testTransformImpl(allocator: mem.Allocator, fba: *std.heap.FixedBufferAllocator, source: [:0]const u8, expected_source: []const u8) !void { +fn testTransformImpl( + io: Io, + allocator: Allocator, + fba: *std.heap.FixedBufferAllocator, + source: [:0]const u8, + expected_source: []const u8, +) !void { // reset the fixed buffer allocator each run so that it can be re-used for each // iteration of the failing index fba.reset(); var anything_changed: bool = undefined; - const result_source = try testParse(source, allocator, &anything_changed); + const result_source = try testParse(io, source, allocator, &anything_changed); try std.testing.expectEqualStrings(expected_source, result_source); const changes_expected = source.ptr != expected_source.ptr; if (anything_changed != changes_expected) { - print("std.zig.render returned {} instead of {}\n", .{ anything_changed, changes_expected }); + std.debug.print("std.zig.render returned {} instead of {}\n", .{ anything_changed, changes_expected }); return error.TestFailed; } try std.testing.expect(anything_changed == changes_expected); allocator.free(result_source); } fn testTransform(source: [:0]const u8, expected_source: []const u8) !void { + const io = std.testing.io; var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - return std.testing.checkAllAllocationFailures(fixed_allocator.allocator(), testTransformImpl, .{ &fixed_allocator, source, expected_source }); + return std.testing.checkAllAllocationFailures(fixed_allocator.allocator(), testTransformImpl, .{ + io, &fixed_allocator, source, expected_source, + }); } fn testCanonical(source: [:0]const u8) !void { return testTransform(source, source); diff --git a/src/Air/print.zig b/src/Air/print.zig index 95c8a1fcda..05614a0ed3 100644 --- a/src/Air/print.zig +++ b/src/Air/print.zig @@ -9,7 +9,7 @@ const Type = @import("../Type.zig"); const Air = @import("../Air.zig"); const InternPool = @import("../InternPool.zig"); -pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) void { +pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) !void { comptime assert(build_options.enable_debug_extensions); const instruction_bytes = air.instructions.len * // Here we don't use @sizeOf(Air.Inst.Data) because it would include @@ -24,7 +24,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air liveness_special_bytes + tomb_bytes; // zig fmt: off - stream.print( + try stream.print( \\# Total AIR+Liveness bytes: {Bi} \\# AIR Instructions: {d} ({Bi}) \\# AIR Extra Data: {d} ({Bi}) @@ -39,7 +39,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air tomb_bytes, if (liveness) |l| l.extra.len else 0, liveness_extra_bytes, if (liveness) |l| l.special.count() else 0, liveness_special_bytes, - }) catch return; + }); // zig fmt: on var writer: Writer = .{ @@ -50,7 +50,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air .indent = 2, .skip_body = false, }; - writer.writeBody(stream, air.getMainBody()) catch return; + try writer.writeBody(stream, air.getMainBody()); } pub fn writeInst( @@ -73,15 +73,23 @@ pub fn writeInst( } pub fn dump(air: Air, pt: Zcu.PerThread, liveness: ?Air.Liveness) void { - const stderr_bw, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - air.write(stderr_bw, pt, liveness); + const comp = pt.zcu.comp; + const io = comp.io; + var buffer: [512]u8 = undefined; + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + air.write(w, pt, liveness); } pub fn dumpInst(air: Air, inst: Air.Inst.Index, pt: Zcu.PerThread, liveness: ?Air.Liveness) void { - const stderr_bw, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - air.writeInst(stderr_bw, inst, pt, liveness); + const comp = pt.zcu.comp; + const io = comp.io; + var buffer: [512]u8 = undefined; + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + air.writeInst(w, inst, pt, liveness); } const Writer = struct { diff --git a/src/Compilation.zig b/src/Compilation.zig index 9e76b8feca..47098317b5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2088,12 +2088,13 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, if (options.verbose_llvm_cpu_features) { if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: { - const stderr_w, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - stderr_w.print("compilation: {s}\n", .{options.root_name}) catch break :print; - stderr_w.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print; - stderr_w.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print; - stderr_w.print(" features: {s}\n", .{cf}) catch {}; + const stderr = try io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + w.print("compilation: {s}\n", .{options.root_name}) catch break :print; + w.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print; + w.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print; + w.print(" features: {s}\n", .{cf}) catch {}; } } @@ -4257,12 +4258,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle { // However, we haven't reported any such error. // This is a compiler bug. print_ctx: { - var stderr_w, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - stderr_w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx; - stderr_w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx; + const stderr = try io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx; + w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx; while (ref) |r| { - stderr_w.print("referenced by: {f}{s}\n", .{ + w.print("referenced by: {f}{s}\n", .{ zcu.fmtAnalUnit(r.referencer), if (zcu.transitive_failed_analysis.contains(r.referencer)) " [transitive failure]" else "", }) catch break :print_ctx; @@ -5756,7 +5758,7 @@ pub fn translateC( try argv.appendSlice(comp.global_cc_argv); try argv.appendSlice(owner_mod.cc_argv); try argv.appendSlice(&.{ source_path, "-o", translated_path }); - if (comp.verbose_cimport) dump_argv(argv.items); + if (comp.verbose_cimport) dumpArgv(io, argv.items); } var stdout: []u8 = undefined; @@ -6264,7 +6266,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr } if (comp.verbose_cc) { - dump_argv(argv.items); + dumpArgv(io, argv.items); } const err = std.process.execv(arena, argv.items); @@ -6310,7 +6312,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr } if (comp.verbose_cc) { - dump_argv(argv.items); + dumpArgv(io, argv.items); } // Just to save disk space, we delete the files that are never needed again. @@ -7773,17 +7775,22 @@ pub fn lockAndSetMiscFailure( return setMiscFailure(comp, tag, format, args); } -pub fn dump_argv(argv: []const []const u8) void { +pub fn dumpArgv(io: Io, argv: []const []const u8) Io.Cancelable!void { var buffer: [64]u8 = undefined; - const stderr, _ = std.debug.lockStderrWriter(&buffer); - defer std.debug.unlockStderrWriter(); - nosuspend { - for (argv, 0..) |arg, i| { - if (i != 0) stderr.writeByte(' ') catch return; - stderr.writeAll(arg) catch return; - } - stderr.writeByte('\n') catch return; + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + return dumpArgvWriter(w, argv) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; +} + +fn dumpArgvWriter(w: *Io.Writer, argv: []const []const u8) Io.Writer.Error!void { + for (argv, 0..) |arg, i| { + if (i != 0) try w.writeByte(' '); + try w.writeAll(arg); } + try w.writeByte('\n'); } pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend { diff --git a/src/InternPool.zig b/src/InternPool.zig index 5568c493d9..539cb441c5 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,8 +1,11 @@ //! All interned objects have both a value and a type. //! This data structure is self-contained. +const InternPool = @This(); const builtin = @import("builtin"); + const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; @@ -11,10 +14,9 @@ const Cache = std.Build.Cache; const Io = std.Io; const Limb = std.math.big.Limb; const Hash = std.hash.Wyhash; +const Zir = std.zig.Zir; -const InternPool = @This(); const Zcu = @import("Zcu.zig"); -const Zir = std.zig.Zir; /// One item per thread, indexed by `tid`, which is dense and unique per thread. locals: []Local, @@ -11165,12 +11167,16 @@ pub fn mutateVarInit(ip: *InternPool, io: Io, index: Index, init_index: Index) v @atomicStore(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?], @intFromEnum(init_index), .release); } -pub fn dump(ip: *const InternPool) void { - dumpStatsFallible(ip, std.heap.page_allocator) catch return; - dumpAllFallible(ip) catch return; +pub fn dump(ip: *const InternPool, io: Io) Io.Cancelable!void { + var buffer: [4096]u8 = undefined; + const stderr_writer = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr_writer.interface; + try dumpStatsFallible(ip, w, std.heap.page_allocator); + try dumpAllFallible(ip, w); } -fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { +fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) anyerror!void { var items_len: usize = 0; var extra_len: usize = 0; var limbs_len: usize = 0; @@ -11423,18 +11429,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { }; counts.sort(SortContext{ .map = &counts }); const len = @min(50, counts.count()); - std.debug.print(" top 50 tags:\n", .{}); + w.print(" top 50 tags:\n", .{}); for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { - std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ - @tagName(tag), stats.count, stats.bytes, - }); + w.print(" {t}: {d} occurrences, {d} total bytes\n", .{ tag, stats.count, stats.bytes }); } } -fn dumpAllFallible(ip: *const InternPool) anyerror!void { - var buffer: [4096]u8 = undefined; - const stderr_bw, _ = std.debug.lockStderrWriter(&buffer); - defer std.debug.unlockStderrWriter(); +fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void { for (ip.locals, 0..) |*local, tid| { const items = local.shared.items.view(); for ( @@ -11443,12 +11444,12 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { 0.., ) |tag, data, index| { const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip); - try stderr_bw.print("${d} = {s}(", .{ i, @tagName(tag) }); + try w.print("${d} = {s}(", .{ i, @tagName(tag) }); switch (tag) { .removed => {}, - .simple_type => try stderr_bw.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}), - .simple_value => try stderr_bw.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}), + .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}), + .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}), .type_int_signed, .type_int_unsigned, @@ -11521,23 +11522,27 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .func_coerced, .union_value, .memoized_call, - => try stderr_bw.print("{d}", .{data}), + => try w.print("{d}", .{data}), .opt_null, .type_slice, .only_possible_value, - => try stderr_bw.print("${d}", .{data}), + => try w.print("${d}", .{data}), } - try stderr_bw.writeAll(")\n"); + try w.writeAll(")\n"); } } } -pub fn dumpGenericInstances(ip: *const InternPool, allocator: Allocator) void { - ip.dumpGenericInstancesFallible(allocator) catch return; +pub fn dumpGenericInstances(ip: *const InternPool, io: Io, allocator: Allocator) Io.Cancelable!void { + var buffer: [4096]u8 = undefined; + const stderr_writer = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr_writer.interface; + try ip.dumpGenericInstancesFallible(allocator, w); } -pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) anyerror!void { +pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator, w: *Io.Writer) !void { var arena_allocator = std.heap.ArenaAllocator.init(allocator); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); @@ -11564,10 +11569,6 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) } } - var buffer: [4096]u8 = undefined; - const stderr_bw, _ = std.debug.lockStderrWriter(&buffer); - defer std.debug.unlockStderrWriter(); - const SortContext = struct { values: []std.ArrayList(Index), pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { @@ -11579,19 +11580,19 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) var it = instances.iterator(); while (it.next()) |entry| { const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav); - try stderr_bw.print("{f} ({d}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len }); + try w.print("{f} ({d}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len }); for (entry.value_ptr.items) |index| { const unwrapped_index = index.unwrap(ip); const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip)); const owner_nav = ip.getNav(func.owner_nav); - try stderr_bw.print(" {f}: (", .{owner_nav.name.fmt(ip)}); + try w.print(" {f}: (", .{owner_nav.name.fmt(ip)}); for (func.comptime_args.get(ip)) |arg| { if (arg != .none) { const key = ip.indexToKey(arg); - try stderr_bw.print(" {} ", .{key}); + try w.print(" {} ", .{key}); } } - try stderr_bw.writeAll(")\n"); + try w.writeAll(")\n"); } } } diff --git a/src/Sema.zig b/src/Sema.zig index fec6850c4c..1f6a577f60 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2668,16 +2668,18 @@ fn failWithTypeMismatch(sema: *Sema, block: *Block, src: LazySrcLoc, expected: T pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg) error{ AnalysisFail, OutOfMemory } { @branchHint(.cold); - const gpa = sema.gpa; const zcu = sema.pt.zcu; + const comp = zcu.comp; + const gpa = comp.gpa; + const io = comp.io; - if (build_options.enable_debug_extensions and zcu.comp.debug_compile_errors) { + if (build_options.enable_debug_extensions and comp.debug_compile_errors) { var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch @panic("out of memory"); Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, false) catch @panic("out of memory"); std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory"); - error_bundle.renderToStdErr(.{}, .auto); + error_bundle.renderToStderr(io, .{}, .auto); std.debug.panicExtra(@returnAddress(), "unexpected compile error occurred", .{}); } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 2333d7f286..408f16bd74 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -4556,8 +4556,9 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e defer if (liveness) |*l| l.deinit(gpa); if (build_options.enable_debug_extensions and comp.verbose_air) { - const stderr, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); + const io = comp.io; + const stderr = try io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); stderr.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)}) catch {}; air.write(stderr, pt, liveness); stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {}; diff --git a/src/codegen/aarch64/Select.zig b/src/codegen/aarch64/Select.zig index f390d83f03..138c70fecf 100644 --- a/src/codegen/aarch64/Select.zig +++ b/src/codegen/aarch64/Select.zig @@ -11273,15 +11273,18 @@ fn initValueAdvanced( return @enumFromInt(isel.values.items.len); } pub fn dumpValues(isel: *Select, which: enum { only_referenced, all }) void { - errdefer |err| @panic(@errorName(err)); - const stderr, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - const zcu = isel.pt.zcu; + const io = zcu.comp.io; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const nav = ip.getNav(isel.nav_index); + errdefer |err| @panic(@errorName(err)); + + const stderr_writer = io.lockStderrWriter(&.{}) catch return; + defer io.unlockStderrWriter(); + const stderr = &stderr_writer.interface; + var reverse_live_values: std.AutoArrayHashMapUnmanaged(Value.Index, std.ArrayList(Air.Inst.Index)) = .empty; defer { for (reverse_live_values.values()) |*list| list.deinit(gpa); diff --git a/src/crash_report.zig b/src/crash_report.zig index 76503fdc53..f23806b758 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -97,17 +97,18 @@ fn dumpCrashContext() Io.Writer.Error!void { // and the actual panic printing, which would be quite confusing. const stderr = std.debug.lockStderrWriter(&.{}); defer std.debug.unlockStderrWriter(); + const w = &stderr.interface; - try stderr.interface.writeAll("Compiler crash context:\n"); + try w.writeAll("Compiler crash context:\n"); if (CodegenFunc.current) |*cg| { const func_nav = cg.zcu.funcInfo(cg.func_index).owner_nav; const func_fqn = cg.zcu.intern_pool.getNav(func_nav).fqn; - try stderr.interface.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)}); + try w.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)}); } else if (AnalyzeBody.current) |anal| { - try dumpCrashContextSema(anal, &stderr.interface, &S.crash_heap); + try dumpCrashContextSema(anal, w, &S.crash_heap); } else { - try stderr.interface.writeAll("(no context)\n\n"); + try w.writeAll("(no context)\n\n"); } } fn dumpCrashContextSema(anal: *AnalyzeBody, stderr: *Io.Writer, crash_heap: []u8) Io.Writer.Error!void { diff --git a/src/fmt.zig b/src/fmt.zig index 55a68d1dc8..b9b1cc1363 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -124,7 +124,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! try wip_errors.addZirErrorMessages(zir, tree, source_code, ""); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); process.exit(2); } } else { @@ -138,7 +138,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! try wip_errors.addZoirErrorMessages(zoir, tree, source_code, ""); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); process.exit(2); } } @@ -319,7 +319,7 @@ fn fmtPathFile( try wip_errors.addZirErrorMessages(zir, tree, source_code, file_path); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(.{}, fmt.color); + error_bundle.renderToStderr(io, .{}, fmt.color); fmt.any_error = true; } }, @@ -334,7 +334,7 @@ fn fmtPathFile( try wip_errors.addZoirErrorMessages(zoir, tree, source_code, file_path); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(.{}, fmt.color); + error_bundle.renderToStderr(io, .{}, fmt.color); fmt.any_error = true; } }, diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index 05a9de71e7..a76fec9237 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -312,11 +312,17 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" }); - if (comp.verbose_cc) print: { - var stderr, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - nosuspend stderr.print("def file: {s}\n", .{def_file_path}) catch break :print; - nosuspend stderr.print("include dir: {s}\n", .{include_dir}) catch break :print; + if (comp.verbose_cc) { + var buffer: [256]u8 = undefined; + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + w.print("def file: {s}\n", .{def_file_path}) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; + w.print("include dir: {s}\n", .{include_dir}) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; } try aro_comp.search_path.append(gpa, .{ .path = include_dir, .kind = .normal }); @@ -333,11 +339,13 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { if (aro_comp.diagnostics.output.to_list.messages.items.len != 0) { var buffer: [64]u8 = undefined; - const w, const ttyconf = std.debug.lockStderrWriter(&buffer); - defer std.debug.unlockStderrWriter(); + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); for (aro_comp.diagnostics.output.to_list.messages.items) |msg| { if (msg.kind == .@"fatal error" or msg.kind == .@"error") { - msg.write(w, ttyconf, true) catch {}; + msg.write(&stderr.interface, stderr.mode, true) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; return error.AroPreprocessorFailed; } } @@ -357,8 +365,9 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { error.OutOfMemory => |e| return e, error.ParseError => { var buffer: [64]u8 = undefined; - const w, _ = std.debug.lockStderrWriter(&buffer); - defer std.debug.unlockStderrWriter(); + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr.interface; try w.writeAll("error: "); try def_diagnostics.writeMsg(w, input); try w.writeByte('\n'); diff --git a/src/libs/mingw/def.zig b/src/libs/mingw/def.zig index 24dc95c13c..9a105b6182 100644 --- a/src/libs/mingw/def.zig +++ b/src/libs/mingw/def.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; pub const ModuleDefinitionType = enum { mingw, @@ -663,7 +664,9 @@ test parse { \\ ; - try testParse(.AMD64, source, "foo.dll", &[_]ModuleDefinition.Export{ + const io = std.testing.io; + + try testParse(io, .AMD64, source, "foo.dll", &[_]ModuleDefinition.Export{ .{ .name = "foo", .mangled_symbol_name = null, @@ -743,7 +746,7 @@ test parse { }, }); - try testParse(.I386, source, "foo.dll", &[_]ModuleDefinition.Export{ + try testParse(io, .I386, source, "foo.dll", &[_]ModuleDefinition.Export{ .{ .name = "_foo", .mangled_symbol_name = null, @@ -823,7 +826,7 @@ test parse { }, }); - try testParse(.ARMNT, source, "foo.dll", &[_]ModuleDefinition.Export{ + try testParse(io, .ARMNT, source, "foo.dll", &[_]ModuleDefinition.Export{ .{ .name = "foo", .mangled_symbol_name = null, @@ -903,7 +906,7 @@ test parse { }, }); - try testParse(.ARM64, source, "foo.dll", &[_]ModuleDefinition.Export{ + try testParse(io, .ARM64, source, "foo.dll", &[_]ModuleDefinition.Export{ .{ .name = "foo", .mangled_symbol_name = null, @@ -997,7 +1000,9 @@ test "ntdll" { \\RtlActivateActivationContextUnsafeFast@0 ; - try testParse(.AMD64, source, "ntdll.dll", &[_]ModuleDefinition.Export{ + const io = std.testing.io; + + try testParse(io, .AMD64, source, "ntdll.dll", &[_]ModuleDefinition.Export{ .{ .name = "RtlDispatchAPC@12", .mangled_symbol_name = null, @@ -1023,15 +1028,22 @@ test "ntdll" { }); } -fn testParse(machine_type: std.coff.IMAGE.FILE.MACHINE, source: [:0]const u8, expected_module_name: []const u8, expected_exports: []const ModuleDefinition.Export) !void { +fn testParse( + io: Io, + machine_type: std.coff.IMAGE.FILE.MACHINE, + source: [:0]const u8, + expected_module_name: []const u8, + expected_exports: []const ModuleDefinition.Export, +) !void { var diagnostics: Diagnostics = undefined; const module = parse(std.testing.allocator, source, machine_type, .mingw, &diagnostics) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.ParseError => { - const stderr, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - try diagnostics.writeMsg(stderr, source); - try stderr.writeByte('\n'); + const stderr = try io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + try diagnostics.writeMsg(w, source); + try w.writeByte('\n'); return err; }, }; diff --git a/src/link.zig b/src/link.zig index e37ba98cd9..a4729f296c 100644 --- a/src/link.zig +++ b/src/link.zig @@ -2246,7 +2246,7 @@ fn resolvePathInputLib( var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); std.process.exit(1); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 8954266e3f..d379669613 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -4,6 +4,7 @@ const builtin = @import("builtin"); const native_endian = builtin.cpu.arch.endian(); const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const log = std.log.scoped(.link); @@ -2377,10 +2378,16 @@ pub fn deleteExport(coff: *Coff, exported: Zcu.Exported, name: InternPool.NullTe _ = name; } -pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) void { - const w, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - coff.printNode(tid, w, .root, 0) catch {}; +pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) Io.Cancelable!void { + const comp = coff.base.comp; + const io = comp.io; + var buffer: [512]u8 = undefined; + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + coff.printNode(tid, w, .root, 0) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; } pub fn printNode( diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index fc4794a7b0..3c511f1ee9 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -3729,10 +3729,16 @@ pub fn deleteExport(elf: *Elf, exported: Zcu.Exported, name: InternPool.NullTerm _ = name; } -pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) void { - const w, _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); - elf.printNode(tid, w, .root, 0) catch {}; +pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) Io.Cancelable!void { + const comp = elf.base.comp; + const io = comp.io; + var buffer: [512]u8 = undefined; + const stderr = try io.lockStderrWriter(&buffer); + defer io.unlockStderrWriter(); + const w = &stderr.interface; + elf.printNode(tid, w, .root, 0) catch |err| switch (err) { + error.WriteFailed => return stderr.err.?, + }; } pub fn printNode( diff --git a/src/main.zig b/src/main.zig index 115c1b5212..f735f671c6 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4429,9 +4429,9 @@ fn runOrTest( // the error message and invocation below. if (process.can_execv and arg_mode == .run) { // execv releases the locks; no need to destroy the Compilation here. - _ = std.debug.lockStderrWriter(&.{}); + _ = try io.lockStderrWriter(&.{}); const err = process.execve(gpa, argv.items, &env_map); - std.debug.unlockStderrWriter(); + io.unlockStderrWriter(); try warnAboutForeignBinaries(io, arena, arg_mode, target, link_libc); const cmd = try std.mem.join(arena, " ", argv.items); fatal("the following command failed to execve with '{t}':\n{s}", .{ err, cmd }); @@ -4448,8 +4448,8 @@ fn runOrTest( comp_destroyed.* = true; const term_result = t: { - _ = std.debug.lockStderrWriter(); - defer std.debug.unlockStderrWriter(); + _ = try io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); break :t child.spawnAndWait(io); }; const term = term_result catch |err| { @@ -4606,7 +4606,8 @@ fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) defer errors.deinit(comp.gpa); if (errors.errorMessageCount() > 0) { - errors.renderToStdErr(.{}, color); + const io = comp.io; + errors.renderToStderr(io, .{}, color); return error.CompileErrorsReported; } } @@ -4659,7 +4660,7 @@ fn cmdTranslateC( return; } else { const color: Color = .auto; - result.errors.renderToStdErr(.{}, color); + result.errors.renderToStderr(io, .{}, color); process.exit(1); } } @@ -5280,7 +5281,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) if (fetch.error_bundle.root_list.items.len > 0) { var errors = try fetch.error_bundle.toOwnedBundle(""); - errors.renderToStdErr(.{}, color); + errors.renderToStderr(io, .{}, color); process.exit(1); } @@ -5412,8 +5413,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) child.stderr_behavior = .Inherit; const term = t: { - _ = std.debug.lockStderrWriter(&.{}); - defer std.debug.unlockStderrWriter(); + _ = try io.lockStderrWriter(&.{}); + defer io.unlockStderrWriter(); break :t child.spawnAndWait(io) catch |err| fatal("failed to spawn build runner {s}: {t}", .{ child_argv.items[0], err }); }; @@ -6212,7 +6213,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { try wip_errors.init(arena); try wip_errors.addZirErrorMessages(zir, tree, source, display_path); var error_bundle = try wip_errors.toOwnedBundle(""); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); if (zir.loweringFailed()) { process.exit(1); } @@ -6283,7 +6284,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { try wip_errors.init(arena); try wip_errors.addZoirErrorMessages(zoir, tree, source, display_path); var error_bundle = try wip_errors.toOwnedBundle(""); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); process.exit(1); } @@ -6557,7 +6558,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { try wip_errors.init(arena); try wip_errors.addZirErrorMessages(old_zir, old_tree, old_source, old_source_path); var error_bundle = try wip_errors.toOwnedBundle(""); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); process.exit(1); } @@ -6569,7 +6570,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { try wip_errors.init(arena); try wip_errors.addZirErrorMessages(new_zir, new_tree, new_source, new_source_path); var error_bundle = try wip_errors.toOwnedBundle(""); - error_bundle.renderToStdErr(.{}, color); + error_bundle.renderToStderr(io, .{}, color); process.exit(1); } @@ -7005,7 +7006,7 @@ fn cmdFetch( if (fetch.error_bundle.root_list.items.len > 0) { var errors = try fetch.error_bundle.toOwnedBundle(""); - errors.renderToStdErr(.{}, color); + errors.renderToStderr(io, .{}, color); process.exit(1); } @@ -7345,7 +7346,7 @@ fn loadManifest( errdefer ast.deinit(gpa); if (ast.errors.len > 0) { - try std.zig.printAstErrorsToStderr(gpa, ast, Package.Manifest.basename, options.color); + try std.zig.printAstErrorsToStderr(gpa, io, ast, Package.Manifest.basename, options.color); process.exit(2); } @@ -7362,7 +7363,7 @@ fn loadManifest( var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStdErr(.{}, options.color); + error_bundle.renderToStderr(io, .{}, options.color); process.exit(2); } -- cgit v1.2.3