From aafddc2ea13e40a8262d9378aeca2e097a37ac03 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 Dec 2025 19:08:37 -0800 Subject: update all occurrences of close() to close(io) --- lib/std/debug/ElfFile.zig | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'lib/std/debug/ElfFile.zig') diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index e81943ab49..92bcca1bcf 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -1,5 +1,13 @@ //! A helper type for loading an ELF file and collecting its DWARF debug information, unwind //! information, and symbol table. +const ElfFile = @This(); + +const std = @import("std"); +const Io = std.Io; +const Endian = std.builtin.Endian; +const Dwarf = std.debug.Dwarf; +const Allocator = std.mem.Allocator; +const elf = std.elf; is_64: bool, endian: Endian, @@ -358,10 +366,17 @@ const Section = struct { const Array = std.enums.EnumArray(Section.Id, ?Section); }; -fn loadSeparateDebugFile(arena: Allocator, main_loaded: *LoadInnerResult, opt_crc: ?u32, comptime fmt: []const u8, args: anytype) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { +fn loadSeparateDebugFile( + arena: Allocator, + io: Io, + main_loaded: *LoadInnerResult, + opt_crc: ?u32, + comptime fmt: []const u8, + args: anytype, +) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { const path = try std.fmt.allocPrint(arena, fmt, args); const elf_file = std.fs.cwd().openFile(path, .{}) catch return null; - defer elf_file.close(); + defer elf_file.close(io); const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { error.OutOfMemory => |e| return e, @@ -529,10 +544,3 @@ fn loadInner( .mapped_mem = mapped_mem, }; } - -const std = @import("std"); -const Endian = std.builtin.Endian; -const Dwarf = std.debug.Dwarf; -const ElfFile = @This(); -const Allocator = std.mem.Allocator; -const elf = std.elf; -- cgit v1.2.3 From 3204fb756980c19b7a95534acdd7a1bba837fbc3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 Dec 2025 17:23:07 -0800 Subject: update all occurrences of std.fs.File to std.Io.File --- lib/compiler/aro/aro/Compilation.zig | 4 +- lib/compiler/aro/aro/Driver.zig | 15 +++---- lib/compiler/aro/aro/Preprocessor.zig | 3 +- lib/compiler/aro/backend/Assembly.zig | 3 +- lib/compiler/aro/main.zig | 3 +- lib/compiler/build_runner.zig | 6 +-- lib/compiler/libc.zig | 3 +- lib/compiler/objcopy.zig | 13 +++--- lib/compiler/reduce.zig | 3 +- lib/compiler/resinator/cli.zig | 3 +- lib/compiler/resinator/errors.zig | 8 ++-- lib/compiler/resinator/main.zig | 10 ++--- lib/compiler/resinator/utils.zig | 4 +- lib/compiler/test_runner.zig | 10 ++--- lib/compiler/translate-c/main.zig | 13 +++--- lib/std/Build.zig | 6 +-- lib/std/Build/Step.zig | 2 +- lib/std/Build/Step/ObjCopy.zig | 2 +- lib/std/Build/Step/Run.zig | 21 +++++----- lib/std/Io.zig | 2 +- lib/std/Io/Writer.zig | 9 ++-- lib/std/Io/test.zig | 2 +- lib/std/Io/tty.zig | 7 ++-- lib/std/Progress.zig | 18 ++++---- lib/std/Random/benchmark.zig | 6 ++- lib/std/Thread.zig | 4 +- lib/std/crypto/Certificate/Bundle.zig | 8 ++-- lib/std/crypto/Certificate/Bundle/macos.zig | 2 +- lib/std/crypto/benchmark.zig | 8 ++-- lib/std/debug.zig | 6 +-- lib/std/debug/ElfFile.zig | 4 +- lib/std/debug/Info.zig | 2 +- lib/std/debug/Pdb.zig | 2 +- lib/std/debug/SelfInfo/Windows.zig | 2 +- lib/std/debug/simple_panic.zig | 2 +- lib/std/dynamic_library.zig | 2 +- lib/std/fs/test.zig | 8 ++-- lib/std/hash/benchmark.zig | 5 ++- lib/std/http.zig | 2 +- lib/std/pdb.zig | 2 +- lib/std/posix/test.zig | 18 ++++---- lib/std/process/Child.zig | 4 +- lib/std/unicode/throughput_test.zig | 6 +-- lib/std/zig/Zir.zig | 8 ++-- lib/std/zig/Zoir.zig | 15 +++---- lib/std/zig/llvm/Builder.zig | 4 +- lib/std/zig/perf_test.zig | 3 +- lib/std/zip.zig | 5 ++- src/Compilation.zig | 4 +- src/Package/Fetch.zig | 16 +++---- src/Package/Fetch/git.zig | 22 +++++----- src/Zcu.zig | 8 ++-- src/Zcu/PerThread.zig | 6 +-- src/fmt.zig | 16 +++---- src/link.zig | 20 ++++----- src/link/Dwarf.zig | 53 ++++++++++++----------- src/link/Elf.zig | 5 ++- src/link/Elf/Object.zig | 65 +++++++++++++++-------------- src/link/Elf/SharedObject.zig | 37 ++++++++-------- src/link/Elf/file.zig | 35 ++++++++-------- src/link/Elf2.zig | 46 ++++++++++---------- src/link/MachO.zig | 23 +++++----- src/link/MachO/CodeSignature.zig | 8 ++-- src/link/MachO/fat.zig | 12 +++--- src/link/MachO/file.zig | 3 +- src/link/MachO/hasher.zig | 22 +++++----- src/link/MachO/uuid.zig | 19 ++++----- src/link/MappedFile.zig | 25 ++++++----- src/link/Wasm.zig | 5 ++- src/link/tapi.zig | 10 ++--- 70 files changed, 399 insertions(+), 359 deletions(-) (limited to 'lib/std/debug/ElfFile.zig') diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 9fb8123146..06b27c33bb 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -1646,7 +1646,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin return comp.addSourceFromFile(file, path, kind); } -pub fn addSourceFromFile(comp: *Compilation, file: std.fs.File, path: []const u8, kind: Source.Kind) !Source { +pub fn addSourceFromFile(comp: *Compilation, file: Io.File, path: []const u8, kind: Source.Kind) !Source { const contents = try comp.getFileContents(file, .unlimited); errdefer comp.gpa.free(contents); return comp.addSourceFromOwnedBuffer(path, contents, kind); @@ -1980,7 +1980,7 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 return comp.getFileContents(file, limit); } -fn getFileContents(comp: *Compilation, file: std.fs.File, limit: Io.Limit) ![]u8 { +fn getFileContents(comp: *Compilation, file: Io.File, limit: Io.Limit) ![]u8 { var file_buf: [4096]u8 = undefined; var file_reader = file.reader(comp.io, &file_buf); diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index 888ade2be4..f933e3ce52 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const Allocator = mem.Allocator; const process = std.process; @@ -1061,7 +1062,7 @@ pub fn printDiagnosticsStats(d: *Driver) void { } } -pub fn detectConfig(d: *Driver, file: std.fs.File) std.Io.tty.Config { +pub fn detectConfig(d: *Driver, file: Io.File) std.Io.tty.Config { if (d.diagnostics.color == false) return .no_color; const force_color = d.diagnostics.color == true; @@ -1109,7 +1110,7 @@ pub fn main(d: *Driver, tc: *Toolchain, args: []const []const u8, comptime fast_ defer macro_buf.deinit(d.comp.gpa); var stdout_buf: [256]u8 = undefined; - var stdout = std.fs.File.stdout().writer(&stdout_buf); + var stdout = Io.File.stdout().writer(&stdout_buf); if (parseArgs(d, &stdout.interface, ¯o_buf, args) catch |er| switch (er) { error.WriteFailed => return d.fatal("failed to write to stdout: {s}", .{errorDescription(er)}), error.OutOfMemory => return error.OutOfMemory, @@ -1329,7 +1330,7 @@ fn processSource( d.comp.cwd.createFile(path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, errorDescription(er) }) else - std.fs.File.stdout(); + Io.File.stdout(); defer if (dep_file_name != null) file.close(io); var file_writer = file.writer(&writer_buf); @@ -1354,7 +1355,7 @@ fn processSource( d.comp.cwd.createFile(some, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ some, errorDescription(er) }) else - std.fs.File.stdout(); + Io.File.stdout(); defer if (d.output_name != null) file.close(io); var file_writer = file.writer(&writer_buf); @@ -1369,7 +1370,7 @@ fn processSource( defer tree.deinit(); if (d.verbose_ast) { - var stdout = std.fs.File.stdout().writer(&writer_buf); + var stdout = Io.File.stdout().writer(&writer_buf); tree.dump(d.detectConfig(stdout.file), &stdout.interface) catch {}; } @@ -1433,7 +1434,7 @@ fn processSource( defer ir.deinit(gpa); if (d.verbose_ir) { - var stdout = std.fs.File.stdout().writer(&writer_buf); + var stdout = Io.File.stdout().writer(&writer_buf); ir.dump(gpa, d.detectConfig(stdout.file), &stdout.interface) catch {}; } @@ -1499,7 +1500,7 @@ pub fn invokeLinker(d: *Driver, tc: *Toolchain, comptime fast_exit: bool) Compil if (d.verbose_linker_args) { var stdout_buf: [4096]u8 = undefined; - var stdout = std.fs.File.stdout().writer(&stdout_buf); + var stdout = Io.File.stdout().writer(&stdout_buf); dumpLinkerArgs(&stdout.interface, argv.items) catch { return d.fatal("unable to dump linker args: {s}", .{errorDescription(stdout.err.?)}); }; diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index 6bd1206aff..d47727cbf0 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const Allocator = mem.Allocator; const assert = std.debug.assert; @@ -1068,7 +1069,7 @@ fn verboseLog(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: const line_col = source.lineCol(.{ .id = raw.source, .line = raw.line, .byte_offset = raw.start }); var stderr_buf: [4096]u8 = undefined; - var stderr = std.fs.File.stderr().writer(&stderr_buf); + var stderr = Io.File.stderr().writer(&stderr_buf); const w = &stderr.interface; w.print("{s}:{d}:{d}: ", .{ source.path, line_col.line_no, line_col.col }) catch return; diff --git a/lib/compiler/aro/backend/Assembly.zig b/lib/compiler/aro/backend/Assembly.zig index d0d14bdd77..80143bf97f 100644 --- a/lib/compiler/aro/backend/Assembly.zig +++ b/lib/compiler/aro/backend/Assembly.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; data: []const u8, @@ -11,7 +12,7 @@ pub fn deinit(self: *const Assembly, gpa: Allocator) void { gpa.free(self.text); } -pub fn writeToFile(self: Assembly, file: std.fs.File) !void { +pub fn writeToFile(self: Assembly, file: Io.File) !void { var file_writer = file.writer(&.{}); var buffers = [_][]const u8{ self.data, self.text }; diff --git a/lib/compiler/aro/main.zig b/lib/compiler/aro/main.zig index d3655e43da..66c8add4a3 100644 --- a/lib/compiler/aro/main.zig +++ b/lib/compiler/aro/main.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const Allocator = mem.Allocator; const mem = std.mem; const process = std.process; @@ -50,7 +51,7 @@ pub fn main() u8 { defer gpa.free(aro_name); var stderr_buf: [1024]u8 = undefined; - var stderr = std.fs.File.stderr().writer(&stderr_buf); + var stderr = Io.File.stderr().writer(&stderr_buf); var diagnostics: Diagnostics = .{ .output = .{ .to_writer = .{ .color = .detect(stderr.file), diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 2c8d71e3e2..6e0e2d8eca 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -7,7 +7,7 @@ const assert = std.debug.assert; const fmt = std.fmt; const mem = std.mem; const process = std.process; -const File = std.fs.File; +const File = std.Io.File; const Step = std.Build.Step; const Watch = std.Build.Watch; const WebServer = std.Build.WebServer; @@ -1845,9 +1845,9 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void { } var stdio_buffer_allocation: [256]u8 = undefined; -var stdout_writer_allocation: std.fs.File.Writer = undefined; +var stdout_writer_allocation: Io.File.Writer = undefined; fn initStdoutWriter() *Writer { - stdout_writer_allocation = std.fs.File.stdout().writerStreaming(&stdio_buffer_allocation); + stdout_writer_allocation = Io.File.stdout().writerStreaming(&stdio_buffer_allocation); return &stdout_writer_allocation.interface; } diff --git a/lib/compiler/libc.zig b/lib/compiler/libc.zig index a18a7a0e06..142b87062e 100644 --- a/lib/compiler/libc.zig +++ b/lib/compiler/libc.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const LibCInstallation = std.zig.LibCInstallation; @@ -39,7 +40,7 @@ pub fn main() !void { var input_file: ?[]const u8 = null; var target_arch_os_abi: []const u8 = "native"; var print_includes: bool = false; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; { var i: usize = 2; diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index 1608c121b1..c360ea8df0 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -1,12 +1,13 @@ const builtin = @import("builtin"); + const std = @import("std"); +const Io = std.Io; const mem = std.mem; const fs = std.fs; const elf = std.elf; const Allocator = std.mem.Allocator; -const File = std.fs.File; +const File = std.Io.File; const assert = std.debug.assert; - const fatal = std.process.fatal; const Server = std.zig.Server; @@ -56,7 +57,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void fatal("unexpected positional argument: '{s}'", .{arg}); } } else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - return std.fs.File.stdout().writeAll(usage); + return Io.File.stdout().writeAll(usage); } else if (mem.eql(u8, arg, "-O") or mem.eql(u8, arg, "--output-target")) { i += 1; if (i >= args.len) fatal("expected another argument after '{s}'", .{arg}); @@ -177,7 +178,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void } }; - const mode = if (out_fmt != .elf or only_keep_debug) fs.File.default_mode else stat.mode; + const mode = if (out_fmt != .elf or only_keep_debug) Io.File.default_mode else stat.mode; var output_file = try fs.cwd().createFile(output, .{ .mode = mode }); defer output_file.close(io); @@ -221,8 +222,8 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void try out.end(); if (listen) { - var stdin_reader = fs.File.stdin().reader(io, &stdin_buffer); - var stdout_writer = fs.File.stdout().writer(&stdout_buffer); + var stdin_reader = Io.File.stdin().reader(io, &stdin_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); var server = try Server.init(.{ .in = &stdin_reader.interface, .out = &stdout_writer.interface, diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig index 28305e801b..bbd3d172b4 100644 --- a/lib/compiler/reduce.zig +++ b/lib/compiler/reduce.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -68,7 +69,7 @@ pub fn main() !void { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - const stdout = std.fs.File.stdout(); + const stdout = Io.File.stdout(); try stdout.writeAll(usage); return std.process.cleanExit(); } else if (mem.eql(u8, arg, "--")) { diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index ffaa62e7ca..17da5c7b75 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const code_pages = @import("code_pages.zig"); const SupportedCodePage = code_pages.SupportedCodePage; const lang = @import("lang.zig"); @@ -169,7 +170,7 @@ pub const Options = struct { coff_options: cvtres.CoffOptions = .{}, pub const IoSource = union(enum) { - stdio: std.fs.File, + stdio: Io.File, filename: []const u8, }; pub const AutoIncludes = enum { any, msvc, gnu, none }; diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig index 8509aa610f..61fd00e683 100644 --- a/lib/compiler/resinator/errors.zig +++ b/lib/compiler/resinator/errors.zig @@ -169,9 +169,9 @@ pub const ErrorDetails = struct { filename_string_index: FilenameStringIndex, pub const FilenameStringIndex = std.meta.Int(.unsigned, 32 - @bitSizeOf(FileOpenErrorEnum)); - pub const FileOpenErrorEnum = std.meta.FieldEnum(std.fs.File.OpenError || std.fs.File.StatError); + pub const FileOpenErrorEnum = std.meta.FieldEnum(Io.File.OpenError || Io.File.StatError); - pub fn enumFromError(err: (std.fs.File.OpenError || std.fs.File.StatError)) FileOpenErrorEnum { + pub fn enumFromError(err: (Io.File.OpenError || Io.File.StatError)) FileOpenErrorEnum { return switch (err) { inline else => |e| @field(ErrorDetails.FileOpenError.FileOpenErrorEnum, @errorName(e)), }; @@ -1094,8 +1094,8 @@ const CorrespondingLines = struct { last_byte: u8 = 0, at_eof: bool = false, span: SourceMappings.CorrespondingSpan, - file: std.fs.File, - file_reader: std.fs.File.Reader, + file: Io.File, + file_reader: Io.File.Reader, code_page: SupportedCodePage, pub fn init( diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index 42308a8987..e171d8199c 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -45,7 +45,7 @@ pub fn main() !void { } var stdout_buffer: [1024]u8 = undefined; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; var error_handler: ErrorHandler = switch (zig_integration) { true => .{ @@ -447,8 +447,8 @@ const IoStream = struct { } pub const Source = union(enum) { - file: std.fs.File, - stdio: std.fs.File, + file: Io.File, + stdio: Io.File, memory: std.ArrayList(u8), /// The source has been closed and any usage of the Source in this state is illegal (except deinit). closed: void, @@ -500,10 +500,10 @@ const IoStream = struct { } pub const Writer = union(enum) { - file: std.fs.File.Writer, + file: Io.File.Writer, allocating: std.Io.Writer.Allocating, - pub const Error = Allocator.Error || std.fs.File.WriteError; + pub const Error = Allocator.Error || Io.File.WriteError; pub fn interface(this: *@This()) *std.Io.Writer { return switch (this.*) { diff --git a/lib/compiler/resinator/utils.zig b/lib/compiler/resinator/utils.zig index f8080539cb..e8565f3af9 100644 --- a/lib/compiler/resinator/utils.zig +++ b/lib/compiler/resinator/utils.zig @@ -32,8 +32,8 @@ pub fn openFileNotDir( cwd: std.fs.Dir, io: Io, path: []const u8, - flags: std.fs.File.OpenFlags, -) (std.fs.File.OpenError || std.fs.File.StatError)!std.fs.File { + flags: Io.File.OpenFlags, +) (Io.File.OpenError || Io.File.StatError)!Io.File { const file = try cwd.openFile(io, path, flags); errdefer file.close(io); // https://github.com/ziglang/zig/issues/5732 diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 72ed3e7677..07a6724ec0 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -74,8 +74,8 @@ pub fn main() void { fn mainServer() !void { @disableInstrumentation(); - var stdin_reader = std.fs.File.stdin().readerStreaming(runner_threaded_io.io(), &stdin_buffer); - var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer); + var stdin_reader = Io.File.stdin().readerStreaming(runner_threaded_io.io(), &stdin_buffer); + var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer); var server = try std.zig.Server.init(.{ .in = &stdin_reader.interface, .out = &stdout_writer.interface, @@ -228,7 +228,7 @@ fn mainTerminal() void { .root_name = "Test", .estimated_total_items = test_fn_list.len, }); - const have_tty = std.fs.File.stderr().isTty(); + const have_tty = Io.File.stderr().isTty(); var leaks: usize = 0; for (test_fn_list, 0..) |test_fn, i| { @@ -318,7 +318,7 @@ pub fn log( /// work-in-progress backends can handle it. pub fn mainSimple() anyerror!void { @disableInstrumentation(); - // is the backend capable of calling `std.fs.File.writeAll`? + // is the backend capable of calling `Io.File.writeAll`? const enable_write = switch (builtin.zig_backend) { .stage2_aarch64, .stage2_riscv64 => true, else => false, @@ -334,7 +334,7 @@ pub fn mainSimple() anyerror!void { var failed: u64 = 0; // we don't want to bring in File and Writer if the backend doesn't support it - const stdout = if (enable_write) std.fs.File.stdout() else {}; + const stdout = if (enable_write) Io.File.stdout() else {}; for (builtin.test_functions) |test_fn| { if (enable_write) { diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig index 0c72298b30..830c70e424 100644 --- a/lib/compiler/translate-c/main.zig +++ b/lib/compiler/translate-c/main.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const mem = std.mem; const process = std.process; @@ -34,7 +35,7 @@ pub fn main() u8 { } var stderr_buf: [1024]u8 = undefined; - var stderr = std.fs.File.stderr().writer(&stderr_buf); + var stderr = Io.File.stderr().writer(&stderr_buf); var diagnostics: aro.Diagnostics = switch (zig_integration) { false => .{ .output = .{ .to_writer = .{ .color = .detect(stderr.file), @@ -99,7 +100,7 @@ fn serveErrorBundle(arena: std.mem.Allocator, diagnostics: *const aro.Diagnostic "translation failure", ); var stdout_buffer: [1024]u8 = undefined; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); var server: std.zig.Server = .{ .out = &stdout_writer.interface, .in = undefined, @@ -129,13 +130,13 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration args[i] = arg; if (mem.eql(u8, arg, "--help")) { var stdout_buf: [512]u8 = undefined; - var stdout = std.fs.File.stdout().writer(&stdout_buf); + var stdout = Io.File.stdout().writer(&stdout_buf); try stdout.interface.print(usage, .{args[0]}); try stdout.interface.flush(); return; } else if (mem.eql(u8, arg, "--version")) { var stdout_buf: [512]u8 = undefined; - var stdout = std.fs.File.stdout().writer(&stdout_buf); + var stdout = Io.File.stdout().writer(&stdout_buf); // TODO add version try stdout.interface.writeAll("0.0.0-dev\n"); try stdout.interface.flush(); @@ -228,7 +229,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration d.comp.cwd.createFile(path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, aro.Driver.errorDescription(er) }) else - std.fs.File.stdout(); + Io.File.stdout(); defer if (dep_file_name != null) file.close(io); var file_writer = file.writer(&out_buf); @@ -246,7 +247,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration var close_out_file = false; var out_file_path: []const u8 = ""; - var out_file: std.fs.File = .stdout(); + var out_file: Io.File = .stdout(); defer if (close_out_file) out_file.close(io); if (d.output_name) |path| blk: { diff --git a/lib/std/Build.zig b/lib/std/Build.zig index fcd94ce134..2755b895c2 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1,3 +1,4 @@ +const Build = @This(); const builtin = @import("builtin"); const std = @import("std.zig"); @@ -9,13 +10,12 @@ const panic = std.debug.panic; const assert = debug.assert; const log = std.log; const StringHashMap = std.StringHashMap; -const Allocator = mem.Allocator; +const Allocator = std.mem.Allocator; const Target = std.Target; const process = std.process; const EnvMap = std.process.EnvMap; -const File = fs.File; +const File = std.Io.File; const Sha256 = std.crypto.hash.sha2.Sha256; -const Build = @This(); const ArrayList = std.ArrayList; pub const Cache = @import("Build/Cache.zig"); diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index acde47071d..2b1e6d8ace 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -667,7 +667,7 @@ fn clearZigProcess(s: *Step, gpa: Allocator) void { } } -fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void { +fn sendMessage(file: Io.File, tag: std.zig.Client.Message.Tag) !void { const header: std.zig.Client.Message.Header = .{ .tag = tag, .bytes_len = 0, diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index b5f058ddfc..4aa1c0a9dc 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -3,7 +3,7 @@ const ObjCopy = @This(); const Allocator = std.mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; -const File = std.fs.File; +const File = std.Io.File; const InstallDir = std.Build.InstallDir; const Step = std.Build.Step; const elf = std.elf; diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index e66e30cc79..1df6f42a35 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1,15 +1,16 @@ -const std = @import("std"); +const Run = @This(); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const Build = std.Build; -const Step = Build.Step; +const Step = std.Build.Step; const fs = std.fs; const mem = std.mem; const process = std.process; -const EnvMap = process.EnvMap; +const EnvMap = std.process.EnvMap; const assert = std.debug.assert; -const Path = Build.Cache.Path; - -const Run = @This(); +const Path = std.Build.Cache.Path; pub const base_id: Step.Id = .run; @@ -2095,7 +2096,7 @@ pub const CachedTestMetadata = struct { } }; -fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Progress.Node) !void { +fn requestNextTest(in: Io.File, metadata: *TestMetadata, sub_prog_node: *?std.Progress.Node) !void { while (metadata.next_index < metadata.names.len) { const i = metadata.next_index; metadata.next_index += 1; @@ -2114,7 +2115,7 @@ fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Pr } } -fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void { +fn sendMessage(file: Io.File, tag: std.zig.Client.Message.Tag) !void { const header: std.zig.Client.Message.Header = .{ .tag = tag, .bytes_len = 0, @@ -2125,7 +2126,7 @@ fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void { }; } -fn sendRunTestMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag, index: u32) !void { +fn sendRunTestMessage(file: Io.File, tag: std.zig.Client.Message.Tag, index: u32) !void { const header: std.zig.Client.Message.Header = .{ .tag = tag, .bytes_len = 4, @@ -2140,7 +2141,7 @@ fn sendRunTestMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag, index: } fn sendRunFuzzTestMessage( - file: std.fs.File, + file: Io.File, index: u32, kind: std.Build.abi.fuzz.LimitKind, amount_or_instance: u64, diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 0727e3efd3..f783718cef 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -528,7 +528,7 @@ pub fn Poller(comptime StreamEnum: type) type { /// Given an enum, returns a struct with fields of that enum, each field /// representing an I/O stream for polling. pub fn PollFiles(comptime StreamEnum: type) type { - return @Struct(.auto, null, std.meta.fieldNames(StreamEnum), &@splat(std.fs.File), &@splat(.{})); + return @Struct(.auto, null, std.meta.fieldNames(StreamEnum), &@splat(Io.File), &@splat(.{})); } test { diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig index 5601293cfb..3f25bc6a26 100644 --- a/lib/std/Io/Writer.zig +++ b/lib/std/Io/Writer.zig @@ -1,7 +1,8 @@ +const Writer = @This(); + const builtin = @import("builtin"); const native_endian = builtin.target.cpu.arch.endian(); -const Writer = @This(); const std = @import("../std.zig"); const assert = std.debug.assert; const Limit = std.Io.Limit; @@ -2837,7 +2838,7 @@ test "discarding sendFile" { const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [256]u8 = undefined; - var file_writer: std.fs.File.Writer = .init(file, &r_buffer); + var file_writer: File.Writer = .init(file, &r_buffer); try file_writer.interface.writeByte('h'); try file_writer.interface.flush(); @@ -2859,7 +2860,7 @@ test "allocating sendFile" { const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; - var file_writer: std.fs.File.Writer = .init(file, &r_buffer); + var file_writer: File.Writer = .init(file, &r_buffer); try file_writer.interface.writeAll("abcd"); try file_writer.interface.flush(); @@ -2883,7 +2884,7 @@ test sendFileReading { const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; - var file_writer: std.fs.File.Writer = .init(file, &r_buffer); + var file_writer: File.Writer = .init(file, &r_buffer); try file_writer.interface.writeAll("abcd"); try file_writer.interface.flush(); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 9ea2d48ee5..b922acc333 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -10,7 +10,7 @@ const expectError = std.testing.expectError; const DefaultPrng = std.Random.DefaultPrng; const mem = std.mem; const fs = std.fs; -const File = std.fs.File; +const File = std.Io.File; const assert = std.debug.assert; const tmpDir = std.testing.tmpDir; diff --git a/lib/std/Io/tty.zig b/lib/std/Io/tty.zig index 08e0bd71f0..d1f8b576c2 100644 --- a/lib/std/Io/tty.zig +++ b/lib/std/Io/tty.zig @@ -1,9 +1,10 @@ -const std = @import("std"); const builtin = @import("builtin"); -const File = std.fs.File; +const native_os = builtin.os.tag; + +const std = @import("std"); +const File = std.Io.File; const process = std.process; const windows = std.os.windows; -const native_os = builtin.os.tag; pub const Color = enum { black, diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 467d4754ff..d8b22c2db0 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -1,19 +1,21 @@ //! This API is non-allocating, non-fallible, thread-safe, and lock-free. +const Progress = @This(); -const std = @import("std"); const builtin = @import("builtin"); +const is_big_endian = builtin.cpu.arch.endian() == .big; +const is_windows = builtin.os.tag == .windows; + +const std = @import("std"); +const Io = std.Io; const windows = std.os.windows; const testing = std.testing; const assert = std.debug.assert; -const Progress = @This(); const posix = std.posix; -const is_big_endian = builtin.cpu.arch.endian() == .big; -const is_windows = builtin.os.tag == .windows; const Writer = std.Io.Writer; /// `null` if the current node (and its children) should /// not print on update() -terminal: std.fs.File, +terminal: Io.File, terminal_mode: TerminalMode, @@ -472,7 +474,7 @@ pub fn start(options: Options) Node { if (options.disable_printing) { return Node.none; } - const stderr: std.fs.File = .stderr(); + const stderr: Io.File = .stderr(); global_progress.terminal = stderr; if (stderr.enableAnsiEscapeCodes()) |_| { global_progress.terminal_mode = .ansi_escape_codes; @@ -633,8 +635,8 @@ pub fn unlockStdErr() void { /// Protected by `stderr_mutex`. const stderr_writer: *Writer = &stderr_file_writer.interface; /// Protected by `stderr_mutex`. -var stderr_file_writer: std.fs.File.Writer = .{ - .interface = std.fs.File.Writer.initInterface(&.{}), +var stderr_file_writer: Io.File.Writer = .{ + .interface = Io.File.Writer.initInterface(&.{}), .file = if (is_windows) undefined else .stderr(), .mode = .streaming, }; diff --git a/lib/std/Random/benchmark.zig b/lib/std/Random/benchmark.zig index 57dc69051e..97afe23b95 100644 --- a/lib/std/Random/benchmark.zig +++ b/lib/std/Random/benchmark.zig @@ -1,7 +1,9 @@ // zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const time = std.time; const Timer = time.Timer; const Random = std.Random; @@ -123,7 +125,7 @@ fn mode(comptime x: comptime_int) comptime_int { pub fn main() !void { var stdout_buffer: [0x100]u8 = undefined; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; var buffer: [1024]u8 = undefined; diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 9f532c3bec..8d8e5979df 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -175,7 +175,7 @@ pub const SetNameError = error{ Unsupported, Unexpected, InvalidWtf8, -} || posix.PrctlError || posix.WriteError || std.fs.File.OpenError || std.fmt.BufPrintError; +} || posix.PrctlError || posix.WriteError || Io.File.OpenError || std.fmt.BufPrintError; pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { if (name.len > max_name_len) return error.NameTooLong; @@ -293,7 +293,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { pub const GetNameError = error{ Unsupported, Unexpected, -} || posix.PrctlError || posix.ReadError || std.fs.File.OpenError || std.fmt.BufPrintError; +} || posix.PrctlError || posix.ReadError || Io.File.OpenError || std.fmt.BufPrintError; /// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/). /// On other platforms, the result is an opaque sequence of bytes with no particular encoding. diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index 9541e01db5..1d21918b5c 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -171,7 +171,7 @@ fn rescanWindows(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanW cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len); } -pub const AddCertsFromDirPathError = fs.File.OpenError || AddCertsFromDirError; +pub const AddCertsFromDirPathError = Io.File.OpenError || AddCertsFromDirError; pub fn addCertsFromDirPath( cb: *Bundle, @@ -212,7 +212,7 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, i } } -pub const AddCertsFromFilePathError = fs.File.OpenError || AddCertsFromFileError || Io.Clock.Error; +pub const AddCertsFromFilePathError = Io.File.OpenError || AddCertsFromFileError || Io.Clock.Error; pub fn addCertsFromFilePathAbsolute( cb: *Bundle, @@ -242,8 +242,8 @@ pub fn addCertsFromFilePath( } pub const AddCertsFromFileError = Allocator.Error || - fs.File.GetSeekPosError || - fs.File.ReadError || + Io.File.GetSeekPosError || + Io.File.ReadError || ParseCertError || std.base64.Error || error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker, Streaming }; diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index d32f1be8e0..473505ac51 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -6,7 +6,7 @@ const mem = std.mem; const Allocator = std.mem.Allocator; const Bundle = @import("../Bundle.zig"); -pub const RescanMacError = Allocator.Error || fs.File.OpenError || fs.File.ReadError || fs.File.SeekError || Bundle.ParseCertError || error{EndOfStream}; +pub const RescanMacError = Allocator.Error || Io.File.OpenError || Io.File.ReadError || Io.File.SeekError || Bundle.ParseCertError || error{EndOfStream}; pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanMacError!void { cb.bytes.clearRetainingCapacity(); diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig index 54024f070e..1b71110be5 100644 --- a/lib/std/crypto/benchmark.zig +++ b/lib/std/crypto/benchmark.zig @@ -1,10 +1,12 @@ // zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const mem = std.mem; const time = std.time; -const Timer = time.Timer; +const Timer = std.time.Timer; const crypto = std.crypto; const KiB = 1024; @@ -504,7 +506,7 @@ fn mode(comptime x: comptime_int) comptime_int { pub fn main() !void { // Size of buffer is about size of printed message. var stdout_buffer: [0x100]u8 = undefined; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 0cb96ed593..97741ecb40 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -8,7 +8,7 @@ const posix = std.posix; const fs = std.fs; const testing = std.testing; const Allocator = mem.Allocator; -const File = std.fs.File; +const File = std.Io.File; const windows = std.os.windows; const builtin = @import("builtin"); @@ -575,7 +575,7 @@ pub fn defaultPanic( // A panic happened while trying to print a previous panic message. // We're still holding the mutex but that's fine as we're going to // call abort(). - fs.File.stderr().writeAll("aborting due to recursive panic\n") catch {}; + File.stderr().writeAll("aborting due to recursive panic\n") catch {}; }, else => {}, // Panicked while printing the recursive panic message. } @@ -1596,7 +1596,7 @@ pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContex // A segfault happened while trying to print a previous panic message. // We're still holding the mutex but that's fine as we're going to // call abort(). - fs.File.stderr().writeAll("aborting due to recursive panic\n") catch {}; + File.stderr().writeAll("aborting due to recursive panic\n") catch {}; }, else => {}, // Panicked while printing the recursive panic message. } diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index 92bcca1bcf..5dbae18130 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -123,7 +123,7 @@ pub const LoadError = error{ pub fn load( gpa: Allocator, - elf_file: std.fs.File, + elf_file: Io.File, opt_build_id: ?[]const u8, di_search_paths: *const DebugInfoSearchPaths, ) LoadError!ElfFile { @@ -423,7 +423,7 @@ const LoadInnerResult = struct { }; fn loadInner( arena: Allocator, - elf_file: std.fs.File, + elf_file: Io.File, opt_crc: ?u32, ) (LoadError || error{ CrcMismatch, Streaming, Canceled })!LoadInnerResult { const mapped_mem: []align(std.heap.page_size_min) const u8 = mapped: { diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig index 9268ca0247..da7656e626 100644 --- a/lib/std/debug/Info.zig +++ b/lib/std/debug/Info.zig @@ -27,7 +27,7 @@ coverage: *Coverage, pub const LoadError = error{ MissingDebugInfo, UnsupportedDebugInfo, -} || std.fs.File.OpenError || ElfFile.LoadError || MachOFile.Error || std.debug.Dwarf.ScanError; +} || Io.File.OpenError || ElfFile.LoadError || MachOFile.Error || std.debug.Dwarf.ScanError; pub fn load( gpa: Allocator, diff --git a/lib/std/debug/Pdb.zig b/lib/std/debug/Pdb.zig index c10b361f72..3ecfd1b363 100644 --- a/lib/std/debug/Pdb.zig +++ b/lib/std/debug/Pdb.zig @@ -1,5 +1,5 @@ const std = @import("../std.zig"); -const File = std.fs.File; +const File = std.Io.File; const Allocator = std.mem.Allocator; const pdb = std.pdb; const assert = std.debug.assert; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index 557f3901eb..3af7223293 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -204,7 +204,7 @@ const Module = struct { coff_section_headers: []coff.SectionHeader, const MappedFile = struct { - file: fs.File, + file: Io.File, section_handle: windows.HANDLE, section_view: []const u8, fn deinit(mf: *const MappedFile, io: Io) void { diff --git a/lib/std/debug/simple_panic.zig b/lib/std/debug/simple_panic.zig index 45e97777c4..f6ff77e04f 100644 --- a/lib/std/debug/simple_panic.zig +++ b/lib/std/debug/simple_panic.zig @@ -15,7 +15,7 @@ pub fn call(msg: []const u8, ra: ?usize) noreturn { @branchHint(.cold); _ = ra; std.debug.lockStdErr(); - const stderr: std.fs.File = .stderr(); + const stderr: std.Io.File = .stderr(); stderr.writeAll(msg) catch {}; @trap(); } diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index c91056b0ab..ca36d5cbb9 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -225,7 +225,7 @@ pub const ElfDynLib = struct { const fd = try resolveFromName(io, path); defer posix.close(fd); - const file: std.fs.File = .{ .handle = fd }; + const file: Io.File = .{ .handle = fd }; const stat = try file.stat(); const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 15b8e9b558..f770ddd30e 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -12,7 +12,7 @@ const posix = std.posix; const ArenaAllocator = std.heap.ArenaAllocator; const Dir = std.fs.Dir; -const File = std.fs.File; +const File = std.Io.File; const tmpDir = testing.tmpDir; const SymLinkFlags = std.fs.Dir.SymLinkFlags; @@ -2231,7 +2231,7 @@ test "read file non vectored" { const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); defer file.close(io); { - var file_writer: std.fs.File.Writer = .init(file, &.{}); + var file_writer: File.Writer = .init(file, &.{}); try file_writer.interface.writeAll(contents); try file_writer.interface.flush(); } @@ -2263,7 +2263,7 @@ test "seek keeping partial buffer" { const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); defer file.close(io); { - var file_writer: std.fs.File.Writer = .init(file, &.{}); + var file_writer: File.Writer = .init(file, &.{}); try file_writer.interface.writeAll(contents); try file_writer.interface.flush(); } @@ -2325,7 +2325,7 @@ test "seekTo flushes buffered data" { defer file.close(io); { var buf: [16]u8 = undefined; - var file_writer = std.fs.File.writer(file, &buf); + var file_writer = File.writer(file, &buf); try file_writer.interface.writeAll(contents); try file_writer.seekTo(8); diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index a21d6e9ada..6744b87fac 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -1,7 +1,8 @@ // zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig +const builtin = @import("builtin"); const std = @import("std"); -const builtin = @import("builtin"); +const Io = std.Io; const time = std.time; const Timer = time.Timer; const hash = std.hash; @@ -354,7 +355,7 @@ fn mode(comptime x: comptime_int) comptime_int { pub fn main() !void { var stdout_buffer: [0x100]u8 = undefined; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; var buffer: [1024]u8 = undefined; diff --git a/lib/std/http.zig b/lib/std/http.zig index a768372ecc..291e22539b 100644 --- a/lib/std/http.zig +++ b/lib/std/http.zig @@ -2,7 +2,7 @@ const builtin = @import("builtin"); const std = @import("std.zig"); const assert = std.debug.assert; const Writer = std.Io.Writer; -const File = std.fs.File; +const File = std.Io.File; pub const Client = @import("http/Client.zig"); pub const Server = @import("http/Server.zig"); diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 36b0e04e5c..7e479de8d4 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -12,7 +12,7 @@ const math = std.math; const mem = std.mem; const coff = std.coff; const fs = std.fs; -const File = std.fs.File; +const File = std.Io.File; const debug = std.debug; const ArrayList = std.ArrayList; diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 8889e50ea3..82fa2c41d1 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -1,20 +1,20 @@ +const builtin = @import("builtin"); +const native_os = builtin.target.os.tag; + const std = @import("../std.zig"); +const Io = std.Io; const posix = std.posix; const testing = std.testing; -const expect = testing.expect; -const expectEqual = testing.expectEqual; -const expectError = testing.expectError; +const expect = std.testing.expect; +const expectEqual = std.testing.expectEqual; +const expectError = std.testing.expectError; const fs = std.fs; const mem = std.mem; const elf = std.elf; const linux = std.os.linux; - const a = std.testing.allocator; - -const builtin = @import("builtin"); const AtomicRmwOp = std.builtin.AtomicRmwOp; const AtomicOrder = std.builtin.AtomicOrder; -const native_os = builtin.target.os.tag; const tmpDir = std.testing.tmpDir; const AT = posix.AT; @@ -663,14 +663,14 @@ test "dup & dup2" { var file = try tmp.dir.createFile("os_dup_test", .{}); defer file.close(io); - var duped = std.fs.File{ .handle = try posix.dup(file.handle) }; + var duped = Io.File{ .handle = try posix.dup(file.handle) }; defer duped.close(io); try duped.writeAll("dup"); // Tests aren't run in parallel so using the next fd shouldn't be an issue. const new_fd = duped.handle + 1; try posix.dup2(file.handle, new_fd); - var dup2ed = std.fs.File{ .handle = new_fd }; + var dup2ed = Io.File{ .handle = new_fd }; defer dup2ed.close(io); try dup2ed.writeAll("dup2"); } diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index be3026ff10..87d2fe3ba9 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -8,7 +8,7 @@ const Io = std.Io; const unicode = std.unicode; const fs = std.fs; const process = std.process; -const File = std.fs.File; +const File = std.Io.File; const windows = std.os.windows; const linux = std.os.linux; const posix = std.posix; @@ -1055,7 +1055,7 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { fn writeIntFd(fd: i32, value: ErrInt) !void { var buffer: [8]u8 = undefined; - var fw: std.fs.File.Writer = .initStreaming(.{ .handle = fd }, &buffer); + var fw: File.Writer = .initStreaming(.{ .handle = fd }, &buffer); fw.interface.writeInt(u64, value, .little) catch unreachable; fw.interface.flush() catch return error.SystemResources; } diff --git a/lib/std/unicode/throughput_test.zig b/lib/std/unicode/throughput_test.zig index fd3f46ec58..c02f550a4a 100644 --- a/lib/std/unicode/throughput_test.zig +++ b/lib/std/unicode/throughput_test.zig @@ -1,8 +1,8 @@ const std = @import("std"); +const Io = std.Io; const time = std.time; const unicode = std.unicode; - -const Timer = time.Timer; +const Timer = std.time.Timer; const N = 1_000_000; @@ -41,7 +41,7 @@ fn benchmarkCodepointCount(buf: []const u8) !ResultCount { pub fn main() !void { // Size of buffer is about size of printed message. var stdout_buffer: [0x100]u8 = undefined; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; try stdout.print("short ASCII strings\n", .{}); diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index 09c785613f..37ce7b4cfa 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -11,9 +11,11 @@ //! * In the future, possibly inline assembly, which needs to get parsed and //! handled by the codegen backend, and errors reported there. However for now, //! inline assembly is not an exception. +const Zir = @This(); +const builtin = @import("builtin"); const std = @import("std"); -const builtin = @import("builtin"); +const Io = std.Io; const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -21,8 +23,6 @@ const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Ast = std.zig.Ast; -const Zir = @This(); - instructions: std.MultiArrayList(Inst).Slice, /// In order to store references to strings in fewer bytes, we copy all /// string bytes into here. String bytes can be null. It is up to whomever @@ -45,7 +45,7 @@ pub const Header = extern struct { /// it's essentially free to have a zero field here and makes the warning go away, /// making it more likely that following Valgrind warnings will be taken seriously. unused: u32 = 0, - stat_inode: std.fs.File.INode, + stat_inode: Io.File.INode, stat_size: u64, stat_mtime: i128, }; diff --git a/lib/std/zig/Zoir.zig b/lib/std/zig/Zoir.zig index 08a7fc9639..d82b8f1861 100644 --- a/lib/std/zig/Zoir.zig +++ b/lib/std/zig/Zoir.zig @@ -1,6 +1,13 @@ //! Zig Object Intermediate Representation. //! Simplified AST for the ZON (Zig Object Notation) format. //! `ZonGen` converts `Ast` to `Zoir`. +const Zoir = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; +const Ast = std.zig.Ast; nodes: std.MultiArrayList(Node.Repr).Slice, extra: []u32, @@ -25,7 +32,7 @@ pub const Header = extern struct { /// making it more likely that following Valgrind warnings will be taken seriously. unused: u64 = 0, - stat_inode: std.fs.File.INode, + stat_inode: Io.File.INode, stat_size: u64, stat_mtime: i128, @@ -254,9 +261,3 @@ pub const CompileError = extern struct { assert(std.meta.hasUniqueRepresentation(Note)); } }; - -const std = @import("std"); -const assert = std.debug.assert; -const Allocator = std.mem.Allocator; -const Ast = std.zig.Ast; -const Zoir = @This(); diff --git a/lib/std/zig/llvm/Builder.zig b/lib/std/zig/llvm/Builder.zig index 9a52ae2c81..9ca0124f4f 100644 --- a/lib/std/zig/llvm/Builder.zig +++ b/lib/std/zig/llvm/Builder.zig @@ -9578,7 +9578,7 @@ pub fn asmValue( pub fn dump(b: *Builder) void { var buffer: [4000]u8 = undefined; - const stderr: std.fs.File = .stderr(); + const stderr: Io.File = .stderr(); b.printToFile(stderr, &buffer) catch {}; } @@ -9589,7 +9589,7 @@ pub fn printToFilePath(b: *Builder, io: Io, dir: std.fs.Dir, path: []const u8) ! try b.printToFile(io, file, &buffer); } -pub fn printToFile(b: *Builder, file: std.fs.File, buffer: []u8) !void { +pub fn printToFile(b: *Builder, file: Io.File, buffer: []u8) !void { var fw = file.writer(buffer); try print(b, &fw.interface); try fw.interface.flush(); diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig index 1566a15d2d..da3dd42f15 100644 --- a/lib/std/zig/perf_test.zig +++ b/lib/std/zig/perf_test.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const Tokenizer = std.zig.Tokenizer; const fmtIntSizeBin = std.fmt.fmtIntSizeBin; @@ -22,7 +23,7 @@ pub fn main() !void { const bytes_per_sec = @as(u64, @intFromFloat(@floor(bytes_per_sec_float))); var stdout_buffer: [1024]u8 = undefined; - var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); const stdout = &stdout_writer.interface; try stdout.print("parsing speed: {Bi:.2}/s, {Bi:.2} used \n", .{ bytes_per_sec, memory_used }); try stdout.flush(); diff --git a/lib/std/zip.zig b/lib/std/zip.zig index c2dbaf5b81..9588651e7f 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -4,9 +4,10 @@ //! Note that this file uses the abbreviation "cd" for "central directory" const builtin = @import("builtin"); -const std = @import("std"); -const File = std.fs.File; const is_le = builtin.target.cpu.arch.endian() == .little; + +const std = @import("std"); +const File = std.Io.File; const Writer = std.Io.Writer; const Reader = std.Io.Reader; const flate = std.compress.flate; diff --git a/src/Compilation.zig b/src/Compilation.zig index df64dee19f..582c9dff3c 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5325,7 +5325,7 @@ fn docsCopyModule( comp: *Compilation, module: *Package.Module, name: []const u8, - tar_file_writer: *fs.File.Writer, + tar_file_writer: *Io.File.Writer, ) !void { const io = comp.io; const root = module.root; @@ -5361,7 +5361,7 @@ fn docsCopyModule( }; defer file.close(io); const stat = try file.stat(); - var file_reader: fs.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size); + var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size); archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| { return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{ diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 58f970abe5..988282097b 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -882,7 +882,7 @@ fn fail(f: *Fetch, msg_tok: std.zig.Ast.TokenIndex, msg_str: u32) RunError { } const Resource = union(enum) { - file: fs.File.Reader, + file: Io.File.Reader, http_request: HttpRequest, git: Git, dir: Io.Dir, @@ -1653,7 +1653,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute fn dumpHashInfo(all_files: []const *const HashedFile) !void { var stdout_buffer: [1024]u8 = undefined; - var stdout_writer: fs.File.Writer = .initStreaming(.stdout(), &stdout_buffer); + var stdout_writer: Io.File.Writer = .initStreaming(.stdout(), &stdout_buffer); const w = &stdout_writer.interface; for (all_files) |hashed_file| { try w.print("{t}: {x}: {s}\n", .{ hashed_file.kind, &hashed_file.hash, hashed_file.normalized_path }); @@ -1712,11 +1712,11 @@ fn deleteFileFallible(dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error try dir.deleteFile(deleted_file.fs_path); } -fn setExecutable(file: fs.File) !void { +fn setExecutable(file: Io.File) !void { if (!std.fs.has_executable_bit) return; const S = std.posix.S; - const mode = fs.File.default_mode | S.IXUSR | S.IXGRP | S.IXOTH; + const mode = Io.File.default_mode | S.IXUSR | S.IXGRP | S.IXOTH; try file.chmod(mode); } @@ -1738,10 +1738,10 @@ const HashedFile = struct { size: u64, const Error = - fs.File.OpenError || - fs.File.ReadError || - fs.File.StatError || - fs.File.ChmodError || + Io.File.OpenError || + Io.File.ReadError || + Io.File.StatError || + Io.File.ChmodError || Io.Dir.ReadLinkError; const Kind = enum { file, link }; diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index 864865bd19..0fca3a0ee3 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -198,8 +198,8 @@ pub const Repository = struct { repo: *Repository, allocator: Allocator, format: Oid.Format, - pack_file: *std.fs.File.Reader, - index_file: *std.fs.File.Reader, + pack_file: *Io.File.Reader, + index_file: *Io.File.Reader, ) !void { repo.* = .{ .odb = undefined }; try repo.odb.init(allocator, format, pack_file, index_file); @@ -372,9 +372,9 @@ pub const Repository = struct { /// [pack-format](https://git-scm.com/docs/pack-format). const Odb = struct { format: Oid.Format, - pack_file: *std.fs.File.Reader, + pack_file: *Io.File.Reader, index_header: IndexHeader, - index_file: *std.fs.File.Reader, + index_file: *Io.File.Reader, cache: ObjectCache = .{}, allocator: Allocator, @@ -383,8 +383,8 @@ const Odb = struct { odb: *Odb, allocator: Allocator, format: Oid.Format, - pack_file: *std.fs.File.Reader, - index_file: *std.fs.File.Reader, + pack_file: *Io.File.Reader, + index_file: *Io.File.Reader, ) !void { try pack_file.seekTo(0); try index_file.seekTo(0); @@ -1272,8 +1272,8 @@ const IndexEntry = struct { pub fn indexPack( allocator: Allocator, format: Oid.Format, - pack: *std.fs.File.Reader, - index_writer: *std.fs.File.Writer, + pack: *Io.File.Reader, + index_writer: *Io.File.Writer, ) !void { try pack.seekTo(0); @@ -1372,7 +1372,7 @@ pub fn indexPack( fn indexPackFirstPass( allocator: Allocator, format: Oid.Format, - pack: *std.fs.File.Reader, + pack: *Io.File.Reader, index_entries: *std.AutoHashMapUnmanaged(Oid, IndexEntry), pending_deltas: *std.ArrayList(IndexEntry), ) !Oid { @@ -1425,7 +1425,7 @@ fn indexPackFirstPass( fn indexPackHashDelta( allocator: Allocator, format: Oid.Format, - pack: *std.fs.File.Reader, + pack: *Io.File.Reader, delta: IndexEntry, index_entries: std.AutoHashMapUnmanaged(Oid, IndexEntry), cache: *ObjectCache, @@ -1477,7 +1477,7 @@ fn indexPackHashDelta( fn resolveDeltaChain( allocator: Allocator, format: Oid.Format, - pack: *std.fs.File.Reader, + pack: *Io.File.Reader, base_object: Object, delta_offsets: []const u64, cache: *ObjectCache, diff --git a/src/Zcu.zig b/src/Zcu.zig index 58d884afe3..cd4a8c7783 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1200,7 +1200,7 @@ pub const EmbedFile = struct { /// `.none` means the file was not loaded, so `stat` is undefined. val: InternPool.Index, /// If this is `null` and `val` is `.none`, the file has never been loaded. - err: ?(std.fs.File.OpenError || std.fs.File.StatError || std.fs.File.ReadError || error{UnexpectedEof}), + err: ?(Io.File.OpenError || Io.File.StatError || Io.File.ReadError || error{UnexpectedEof}), stat: Cache.File.Stat, pub const Index = enum(u32) { @@ -2927,7 +2927,7 @@ comptime { } } -pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: std.fs.File) !Zir { +pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: Io.File) !Zir { var buffer: [2000]u8 = undefined; var file_reader = cache_file.reader(io, &buffer); return result: { @@ -2986,7 +2986,7 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader return zir; } -pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.Stat, zir: Zir) (std.fs.File.WriteError || Allocator.Error)!void { +pub fn saveZirCache(gpa: Allocator, cache_file: Io.File, stat: Io.File.Stat, zir: Zir) (Io.File.WriteError || Allocator.Error)!void { const safety_buffer = if (data_has_safety_tag) try gpa.alloc([8]u8, zir.instructions.len) else @@ -3026,7 +3026,7 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S }; } -pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void { +pub fn saveZoirCache(cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void { const header: Zoir.Header = .{ .nodes_len = @intCast(zoir.nodes.len), .extra_len = @intCast(zoir.extra.len), diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index d2ca004058..55d6a3861f 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -118,7 +118,7 @@ pub fn updateFile( const zir_dir = cache_directory.handle; // Determine whether we need to reload the file from disk and redo parsing and AstGen. - var lock: std.fs.File.Lock = switch (file.status) { + var lock: Io.File.Lock = switch (file.status) { .never_loaded, .retryable_failure => lock: { // First, load the cached ZIR code, if any. log.debug("AstGen checking cache: {f} (local={}, digest={s})", .{ @@ -346,8 +346,8 @@ pub fn updateFile( fn loadZirZoirCache( zcu: *Zcu, - cache_file: std.fs.File, - stat: std.fs.File.Stat, + cache_file: Io.File, + stat: Io.File.Stat, file: *Zcu.File, comptime mode: Ast.Mode, ) !enum { success, invalid, truncated, stale } { diff --git a/src/fmt.zig b/src/fmt.zig index 907c7885ad..7bdc24054e 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -37,9 +37,9 @@ const Fmt = struct { arena: Allocator, io: Io, out_buffer: std.Io.Writer.Allocating, - stdout_writer: *fs.File.Writer, + stdout_writer: *Io.File.Writer, - const SeenMap = std.AutoHashMap(fs.File.INode, void); + const SeenMap = std.AutoHashMap(Io.File.INode, void); }; pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !void { @@ -59,7 +59,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - try fs.File.stdout().writeAll(usage_fmt); + try Io.File.stdout().writeAll(usage_fmt); return process.cleanExit(); } else if (mem.eql(u8, arg, "--color")) { if (i + 1 >= args.len) { @@ -99,9 +99,9 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! fatal("cannot use --stdin with positional arguments", .{}); } - const stdin: fs.File = .stdin(); + const stdin: Io.File = .stdin(); var stdio_buffer: [1024]u8 = undefined; - var file_reader: fs.File.Reader = stdin.reader(io, &stdio_buffer); + var file_reader: Io.File.Reader = stdin.reader(io, &stdio_buffer); const source_code = std.zig.readSourceFileToEndAlloc(gpa, &file_reader) catch |err| { fatal("unable to read stdin: {}", .{err}); }; @@ -154,7 +154,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! process.exit(code); } - return fs.File.stdout().writeAll(formatted); + return Io.File.stdout().writeAll(formatted); } if (input_files.items.len == 0) { @@ -162,7 +162,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! } var stdout_buffer: [4096]u8 = undefined; - var stdout_writer = fs.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(&stdout_buffer); var fmt: Fmt = .{ .gpa = gpa, @@ -272,7 +272,7 @@ fn fmtPathFile( return error.IsDir; var read_buffer: [1024]u8 = undefined; - var file_reader: fs.File.Reader = source_file.reader(io, &read_buffer); + var file_reader: Io.File.Reader = source_file.reader(io, &read_buffer); file_reader.size = stat.size; const gpa = fmt.gpa; diff --git a/src/link.zig b/src/link.zig index ef095987c9..d5daf6fca7 100644 --- a/src/link.zig +++ b/src/link.zig @@ -393,7 +393,7 @@ pub const File = struct { comp: *Compilation, emit: Path, - file: ?fs.File, + file: ?Io.File, /// When using the LLVM backend, the emitted object is written to a file with this name. This /// object file then becomes a normal link input to LLD or a self-hosted linker. /// @@ -1110,7 +1110,7 @@ pub const File = struct { }; } - fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: fs.File) anyerror!void { + fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: Io.File) anyerror!void { const comp = base.comp; const diags = &comp.link_diags; const gpa = comp.gpa; @@ -1238,7 +1238,7 @@ pub const File = struct { pub fn determineMode( output_mode: std.builtin.OutputMode, link_mode: std.builtin.LinkMode, - ) fs.File.Mode { + ) Io.File.Mode { // On common systems with a 0o022 umask, 0o777 will still result in a file created // with 0o755 permissions, but it works appropriately if the system is configured // more leniently. As another data point, C's fopen seems to open files with the @@ -1247,10 +1247,10 @@ pub const File = struct { switch (output_mode) { .Lib => return switch (link_mode) { .dynamic => executable_mode, - .static => fs.File.default_mode, + .static => Io.File.default_mode, }, .Exe => return executable_mode, - .Obj => return fs.File.default_mode, + .Obj => return Io.File.default_mode, } } @@ -1660,19 +1660,19 @@ pub const Input = union(enum) { pub const Object = struct { path: Path, - file: fs.File, + file: Io.File, must_link: bool, hidden: bool, }; pub const Res = struct { path: Path, - file: fs.File, + file: Io.File, }; pub const Dso = struct { path: Path, - file: fs.File, + file: Io.File, needed: bool, weak: bool, reexport: bool, @@ -1694,7 +1694,7 @@ pub const Input = union(enum) { } /// Returns `null` in the case of `dso_exact`. - pub fn pathAndFile(input: Input) ?struct { Path, fs.File } { + pub fn pathAndFile(input: Input) ?struct { Path, Io.File } { return switch (input) { .object, .archive => |obj| .{ obj.path, obj.file }, inline .res, .dso => |x| .{ x.path, x.file }, @@ -2075,7 +2075,7 @@ fn resolveLibInput( fn finishResolveLibInput( resolved_inputs: *std.ArrayList(Input), path: Path, - file: std.fs.File, + file: Io.File, link_mode: std.builtin.LinkMode, query: UnresolvedInput.Query, ) ResolveLibInputResult { diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 95f4ca8bbd..cfb02fba38 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1,3 +1,24 @@ +const Dwarf = @This(); + +const std = @import("std"); +const Io = std.Io; +const Allocator = std.mem.Allocator; +const DW = std.dwarf; +const Zir = std.zig.Zir; +const assert = std.debug.assert; +const log = std.log.scoped(.dwarf); +const Writer = std.Io.Writer; + +const InternPool = @import("../InternPool.zig"); +const Module = @import("../Package.zig").Module; +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const Zcu = @import("../Zcu.zig"); +const codegen = @import("../codegen.zig"); +const dev = @import("../dev.zig"); +const link = @import("../link.zig"); +const target_info = @import("../target.zig"); + gpa: Allocator, bin_file: *link.File, format: DW.Format, @@ -29,16 +50,16 @@ pub const UpdateError = error{ UnexpectedEndOfFile, } || codegen.GenerateSymbolError || - std.fs.File.OpenError || - std.fs.File.SetEndPosError || - std.fs.File.CopyRangeError || - std.fs.File.PReadError || - std.fs.File.PWriteError; + Io.File.OpenError || + Io.File.SetEndPosError || + Io.File.CopyRangeError || + Io.File.PReadError || + Io.File.PWriteError; pub const FlushError = UpdateError; pub const RelocError = - std.fs.File.PWriteError; + Io.File.PWriteError; pub const AddressSize = enum(u8) { @"32" = 4, @@ -6350,7 +6371,7 @@ const AbbrevCode = enum { }); }; -fn getFile(dwarf: *Dwarf) ?std.fs.File { +fn getFile(dwarf: *Dwarf) ?Io.File { if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| return d_sym.file; return dwarf.bin_file.file; } @@ -6429,21 +6450,3 @@ const force_incremental = false; inline fn incremental(dwarf: Dwarf) bool { return force_incremental or dwarf.bin_file.comp.config.incremental; } - -const Allocator = std.mem.Allocator; -const DW = std.dwarf; -const Dwarf = @This(); -const InternPool = @import("../InternPool.zig"); -const Module = @import("../Package.zig").Module; -const Type = @import("../Type.zig"); -const Value = @import("../Value.zig"); -const Zcu = @import("../Zcu.zig"); -const Zir = std.zig.Zir; -const assert = std.debug.assert; -const codegen = @import("../codegen.zig"); -const dev = @import("../dev.zig"); -const link = @import("../link.zig"); -const log = std.log.scoped(.dwarf); -const std = @import("std"); -const target_info = @import("../target.zig"); -const Writer = std.Io.Writer; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index ae7d631f09..584a50c7f2 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3651,7 +3651,7 @@ fn fileLookup(files: std.MultiArrayList(File.Entry), index: File.Index, zig_obje pub fn addFileHandle( gpa: Allocator, file_handles: *std.ArrayList(File.Handle), - handle: fs.File, + handle: Io.File, ) Allocator.Error!File.HandleIndex { try file_handles.append(gpa, handle); return @intCast(file_handles.items.len - 1); @@ -4068,7 +4068,7 @@ fn fmtDumpState(self: *Elf, writer: *std.Io.Writer) std.Io.Writer.Error!void { } /// Caller owns the memory. -pub fn preadAllAlloc(allocator: Allocator, handle: fs.File, offset: u64, size: u64) ![]u8 { +pub fn preadAllAlloc(allocator: Allocator, handle: Io.File, offset: u64, size: u64) ![]u8 { const buffer = try allocator.alloc(u8, math.cast(usize, size) orelse return error.Overflow); errdefer allocator.free(buffer); const amt = try handle.preadAll(buffer, offset); @@ -4460,6 +4460,7 @@ pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T { } const std = @import("std"); +const Io = std.Io; const build_options = @import("build_options"); const builtin = @import("builtin"); const assert = std.debug.assert; diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index d51a82b266..c0dde4176a 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -1,3 +1,30 @@ +const Object = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const eh_frame = @import("eh_frame.zig"); +const elf = std.elf; +const fs = std.fs; +const log = std.log.scoped(.link); +const math = std.math; +const mem = std.mem; +const Path = std.Build.Cache.Path; +const Allocator = std.mem.Allocator; + +const Diags = @import("../../link.zig").Diags; +const Archive = @import("Archive.zig"); +const Atom = @import("Atom.zig"); +const AtomList = @import("AtomList.zig"); +const Cie = eh_frame.Cie; +const Elf = @import("../Elf.zig"); +const Fde = eh_frame.Fde; +const File = @import("file.zig").File; +const Merge = @import("Merge.zig"); +const Symbol = @import("Symbol.zig"); +const Alignment = Atom.Alignment; +const riscv = @import("../riscv.zig"); + archive: ?InArchive = null, /// Archive files cannot contain subdirectories, so only the basename is needed /// for output. However, the full path is kept for error reporting. @@ -68,7 +95,7 @@ pub fn parse( diags: *Diags, /// For error reporting purposes only. path: Path, - handle: fs.File, + handle: Io.File, target: *const std.Target, debug_fmt_strip: bool, default_sym_version: elf.Versym, @@ -97,7 +124,7 @@ pub fn parseCommon( gpa: Allocator, diags: *Diags, path: Path, - handle: fs.File, + handle: Io.File, target: *const std.Target, ) !void { const offset = if (self.archive) |ar| ar.offset else 0; @@ -264,7 +291,7 @@ fn initAtoms( gpa: Allocator, diags: *Diags, path: Path, - handle: fs.File, + handle: Io.File, debug_fmt_strip: bool, target: *const std.Target, ) !void { @@ -421,7 +448,7 @@ fn initSymbols( fn parseEhFrame( self: *Object, gpa: Allocator, - handle: fs.File, + handle: Io.File, shndx: u32, target: *const std.Target, ) !void { @@ -1310,7 +1337,7 @@ fn addString(self: *Object, gpa: Allocator, str: []const u8) !u32 { } /// Caller owns the memory. -fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: fs.File, index: u32) ![]u8 { +fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: Io.File, index: u32) ![]u8 { assert(index < self.shdrs.items.len); const offset = if (self.archive) |ar| ar.offset else 0; const shdr = self.shdrs.items[index]; @@ -1320,7 +1347,7 @@ fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: fs.File, index: } /// Caller owns the memory. -fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela { +fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: Io.File, shndx: u32) ![]align(1) const elf.Elf64_Rela { const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx); const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela)); return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num]; @@ -1552,29 +1579,3 @@ const InArchive = struct { offset: u64, size: u32, }; - -const Object = @This(); - -const std = @import("std"); -const assert = std.debug.assert; -const eh_frame = @import("eh_frame.zig"); -const elf = std.elf; -const fs = std.fs; -const log = std.log.scoped(.link); -const math = std.math; -const mem = std.mem; -const Path = std.Build.Cache.Path; -const Allocator = std.mem.Allocator; - -const Diags = @import("../../link.zig").Diags; -const Archive = @import("Archive.zig"); -const Atom = @import("Atom.zig"); -const AtomList = @import("AtomList.zig"); -const Cie = eh_frame.Cie; -const Elf = @import("../Elf.zig"); -const Fde = eh_frame.Fde; -const File = @import("file.zig").File; -const Merge = @import("Merge.zig"); -const Symbol = @import("Symbol.zig"); -const Alignment = Atom.Alignment; -const riscv = @import("../riscv.zig"); diff --git a/src/link/Elf/SharedObject.zig b/src/link/Elf/SharedObject.zig index 1e17aa34a8..3720fe53d6 100644 --- a/src/link/Elf/SharedObject.zig +++ b/src/link/Elf/SharedObject.zig @@ -1,3 +1,20 @@ +const SharedObject = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const elf = std.elf; +const log = std.log.scoped(.elf); +const mem = std.mem; +const Path = std.Build.Cache.Path; +const Stat = std.Build.Cache.File.Stat; +const Allocator = mem.Allocator; + +const Elf = @import("../Elf.zig"); +const File = @import("file.zig").File; +const Symbol = @import("Symbol.zig"); +const Diags = @import("../../link.zig").Diags; + path: Path, index: File.Index, @@ -94,7 +111,7 @@ pub fn parseHeader( gpa: Allocator, diags: *Diags, file_path: Path, - fs_file: std.fs.File, + fs_file: Io.File, stat: Stat, target: *const std.Target, ) !Header { @@ -192,7 +209,7 @@ pub fn parse( gpa: Allocator, /// Moves resources from header. Caller may unconditionally deinit. header: *Header, - fs_file: std.fs.File, + fs_file: Io.File, ) !Parsed { const symtab = if (header.dynsym_sect_index) |index| st: { const shdr = header.sections[index]; @@ -534,19 +551,3 @@ const Format = struct { } } }; - -const SharedObject = @This(); - -const std = @import("std"); -const assert = std.debug.assert; -const elf = std.elf; -const log = std.log.scoped(.elf); -const mem = std.mem; -const Path = std.Build.Cache.Path; -const Stat = std.Build.Cache.File.Stat; -const Allocator = mem.Allocator; - -const Elf = @import("../Elf.zig"); -const File = @import("file.zig").File; -const Symbol = @import("Symbol.zig"); -const Diags = @import("../../link.zig").Diags; diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig index 50f5159d18..52d3c6e6f0 100644 --- a/src/link/Elf/file.zig +++ b/src/link/Elf/file.zig @@ -1,3 +1,20 @@ +const std = @import("std"); +const Io = std.Io; +const elf = std.elf; +const log = std.log.scoped(.link); +const Path = std.Build.Cache.Path; +const Allocator = std.mem.Allocator; + +const Archive = @import("Archive.zig"); +const Atom = @import("Atom.zig"); +const Cie = @import("eh_frame.zig").Cie; +const Elf = @import("../Elf.zig"); +const LinkerDefined = @import("LinkerDefined.zig"); +const Object = @import("Object.zig"); +const SharedObject = @import("SharedObject.zig"); +const Symbol = @import("Symbol.zig"); +const ZigObject = @import("ZigObject.zig"); + pub const File = union(enum) { zig_object: *ZigObject, linker_defined: *LinkerDefined, @@ -279,22 +296,6 @@ pub const File = union(enum) { shared_object: SharedObject, }; - pub const Handle = std.fs.File; + pub const Handle = Io.File; pub const HandleIndex = Index; }; - -const std = @import("std"); -const elf = std.elf; -const log = std.log.scoped(.link); -const Path = std.Build.Cache.Path; -const Allocator = std.mem.Allocator; - -const Archive = @import("Archive.zig"); -const Atom = @import("Atom.zig"); -const Cie = @import("eh_frame.zig").Cie; -const Elf = @import("../Elf.zig"); -const LinkerDefined = @import("LinkerDefined.zig"); -const Object = @import("Object.zig"); -const SharedObject = @import("SharedObject.zig"); -const Symbol = @import("Symbol.zig"); -const ZigObject = @import("ZigObject.zig"); diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index 7d12ccedb2..e35444bc02 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -1,3 +1,23 @@ +const Elf = @This(); + +const builtin = @import("builtin"); +const native_endian = builtin.cpu.arch.endian(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const log = std.log.scoped(.link); + +const codegen = @import("../codegen.zig"); +const Compilation = @import("../Compilation.zig"); +const InternPool = @import("../InternPool.zig"); +const link = @import("../link.zig"); +const MappedFile = @import("MappedFile.zig"); +const target_util = @import("../target.zig"); +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const Zcu = @import("../Zcu.zig"); + base: link.File, options: link.File.OpenOptions, mf: MappedFile, @@ -1973,8 +1993,8 @@ pub fn lazySymbol(elf: *Elf, lazy: link.File.LazySymbol) !Symbol.Index { return lazy_gop.value_ptr.*; } -pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError || - std.Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void { +pub fn loadInput(elf: *Elf, input: link.Input) (Io.File.Reader.SizeError || + Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void { const io = elf.base.comp.io; var buf: [4096]u8 = undefined; switch (input) { @@ -2007,7 +2027,7 @@ pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError || .dso_exact => |dso_exact| try elf.loadDsoExact(dso_exact.name), } } -fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void { +fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void { const comp = elf.base.comp; const gpa = comp.gpa; const diags = &comp.link_diags; @@ -2067,7 +2087,7 @@ fn loadObject( elf: *Elf, path: std.Build.Cache.Path, member: ?[]const u8, - fr: *std.Io.File.Reader, + fr: *Io.File.Reader, fl: MappedFile.Node.FileLocation, ) !void { const comp = elf.base.comp; @@ -2310,7 +2330,7 @@ fn loadObject( }, } } -fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void { +fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void { const comp = elf.base.comp; const diags = &comp.link_diags; const r = &fr.interface; @@ -3822,19 +3842,3 @@ pub fn printNode( try w.writeByte('\n'); } } - -const assert = std.debug.assert; -const builtin = @import("builtin"); -const codegen = @import("../codegen.zig"); -const Compilation = @import("../Compilation.zig"); -const Elf = @This(); -const InternPool = @import("../InternPool.zig"); -const link = @import("../link.zig"); -const log = std.log.scoped(.link); -const MappedFile = @import("MappedFile.zig"); -const native_endian = builtin.cpu.arch.endian(); -const std = @import("std"); -const target_util = @import("../target.zig"); -const Type = @import("../Type.zig"); -const Value = @import("../Value.zig"); -const Zcu = @import("../Zcu.zig"); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 471465cea1..72a49c0c9e 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -890,7 +890,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void { _ = try self.addTbd(.fromLinkInput(input), true, fh); } -fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch { +fn parseFatFile(self: *MachO, file: Io.File, path: Path) !?fat.Arch { const diags = &self.base.comp.link_diags; const fat_h = fat.readFatHeader(file) catch return null; if (fat_h.magic != macho.FAT_MAGIC and fat_h.magic != macho.FAT_MAGIC_64) return null; @@ -903,7 +903,7 @@ fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch { return diags.failParse(path, "missing arch in universal file: expected {s}", .{@tagName(cpu_arch)}); } -pub fn readMachHeader(file: std.fs.File, offset: usize) !macho.mach_header_64 { +pub fn readMachHeader(file: Io.File, offset: usize) !macho.mach_header_64 { var buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined; const nread = try file.preadAll(&buffer, offset); if (nread != buffer.len) return error.InputOutput; @@ -911,7 +911,7 @@ pub fn readMachHeader(file: std.fs.File, offset: usize) !macho.mach_header_64 { return hdr; } -pub fn readArMagic(file: std.fs.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 { +pub fn readArMagic(file: Io.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 { const nread = try file.preadAll(buffer, offset); if (nread != buffer.len) return error.InputOutput; return buffer[0..Archive.SARMAG]; @@ -3768,7 +3768,7 @@ pub fn getInternalObject(self: *MachO) ?*InternalObject { return self.getFile(index).?.internal; } -pub fn addFileHandle(self: *MachO, file: fs.File) !File.HandleIndex { +pub fn addFileHandle(self: *MachO, file: Io.File) !File.HandleIndex { const gpa = self.base.comp.gpa; const index: File.HandleIndex = @intCast(self.file_handles.items.len); const fh = try self.file_handles.addOne(gpa); @@ -5373,10 +5373,11 @@ const max_distance = (1 << (jump_bits - 1)); const max_allowed_distance = max_distance - 0x500_000; const MachO = @This(); - -const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const fs = std.fs; const log = std.log.scoped(.link); @@ -5386,6 +5387,11 @@ const math = std.math; const mem = std.mem; const meta = std.meta; const Writer = std.Io.Writer; +const AtomicBool = std.atomic.Value(bool); +const Cache = std.Build.Cache; +const Hash = std.hash.Wyhash; +const Md5 = std.crypto.hash.Md5; +const Allocator = std.mem.Allocator; const aarch64 = codegen.aarch64.encoding; const bind = @import("MachO/dyld_info/bind.zig"); @@ -5403,11 +5409,8 @@ const trace = @import("../tracy.zig").trace; const synthetic = @import("MachO/synthetic.zig"); const Alignment = Atom.Alignment; -const Allocator = mem.Allocator; const Archive = @import("MachO/Archive.zig"); -const AtomicBool = std.atomic.Value(bool); const Bind = bind.Bind; -const Cache = std.Build.Cache; const CodeSignature = @import("MachO/CodeSignature.zig"); const Compilation = @import("../Compilation.zig"); const DataInCode = synthetic.DataInCode; @@ -5417,14 +5420,12 @@ const ExportTrie = @import("MachO/dyld_info/Trie.zig"); const Path = Cache.Path; const File = @import("MachO/file.zig").File; const GotSection = synthetic.GotSection; -const Hash = std.hash.Wyhash; const Indsymtab = synthetic.Indsymtab; const InternalObject = @import("MachO/InternalObject.zig"); const ObjcStubsSection = synthetic.ObjcStubsSection; const Object = @import("MachO/Object.zig"); const LazyBind = bind.LazyBind; const LaSymbolPtrSection = synthetic.LaSymbolPtrSection; -const Md5 = std.crypto.hash.Md5; const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); const Rebase = @import("MachO/dyld_info/Rebase.zig"); diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 5bded3b9e3..5f9a9ecac9 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -1,17 +1,19 @@ const CodeSignature = @This(); const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const fs = std.fs; const log = std.log.scoped(.link); const macho = std.macho; const mem = std.mem; const testing = std.testing; +const Sha256 = std.crypto.hash.sha2.Sha256; +const Allocator = std.mem.Allocator; + const trace = @import("../../tracy.zig").trace; -const Allocator = mem.Allocator; const Hasher = @import("hasher.zig").ParallelHasher; const MachO = @import("../MachO.zig"); -const Sha256 = std.crypto.hash.sha2.Sha256; const hash_size = Sha256.digest_length; @@ -250,7 +252,7 @@ pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const } pub const WriteOpts = struct { - file: fs.File, + file: Io.File, exec_seg_base: u64, exec_seg_limit: u64, file_size: u32, diff --git a/src/link/MachO/fat.zig b/src/link/MachO/fat.zig index 7772f7a4de..fd9a302531 100644 --- a/src/link/MachO/fat.zig +++ b/src/link/MachO/fat.zig @@ -1,18 +1,20 @@ +const builtin = @import("builtin"); +const native_endian = builtin.target.cpu.arch.endian(); + const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; -const builtin = @import("builtin"); const log = std.log.scoped(.macho); const macho = std.macho; const mem = std.mem; -const native_endian = builtin.target.cpu.arch.endian(); const MachO = @import("../MachO.zig"); -pub fn readFatHeader(file: std.fs.File) !macho.fat_header { +pub fn readFatHeader(file: Io.File) !macho.fat_header { return readFatHeaderGeneric(macho.fat_header, file, 0); } -fn readFatHeaderGeneric(comptime Hdr: type, file: std.fs.File, offset: usize) !Hdr { +fn readFatHeaderGeneric(comptime Hdr: type, file: Io.File, offset: usize) !Hdr { var buffer: [@sizeOf(Hdr)]u8 = undefined; const nread = try file.preadAll(&buffer, offset); if (nread != buffer.len) return error.InputOutput; @@ -27,7 +29,7 @@ pub const Arch = struct { size: u32, }; -pub fn parseArchs(file: std.fs.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch { +pub fn parseArchs(file: Io.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch { var count: usize = 0; var fat_arch_index: u32 = 0; while (fat_arch_index < fat_header.nfat_arch and count < out.len) : (fat_arch_index += 1) { diff --git a/src/link/MachO/file.zig b/src/link/MachO/file.zig index 05b43de181..cd687a4941 100644 --- a/src/link/MachO/file.zig +++ b/src/link/MachO/file.zig @@ -355,11 +355,12 @@ pub const File = union(enum) { dylib: Dylib, }; - pub const Handle = std.fs.File; + pub const Handle = Io.File; pub const HandleIndex = Index; }; const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const log = std.log.scoped(.link); const macho = std.macho; diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig index 78cd847c40..8cf53071c8 100644 --- a/src/link/MachO/hasher.zig +++ b/src/link/MachO/hasher.zig @@ -1,3 +1,9 @@ +const std = @import("std"); +const Io = std.Io; +const Allocator = std.mem.Allocator; + +const trace = @import("../../tracy.zig").trace; + pub fn ParallelHasher(comptime Hasher: type) type { const hash_size = Hasher.digest_length; @@ -5,7 +11,7 @@ pub fn ParallelHasher(comptime Hasher: type) type { allocator: Allocator, io: std.Io, - pub fn hash(self: Self, file: fs.File, out: [][hash_size]u8, opts: struct { + pub fn hash(self: Self, file: Io.File, out: [][hash_size]u8, opts: struct { chunk_size: u64 = 0x4000, max_file_size: ?u64 = null, }) !void { @@ -23,7 +29,7 @@ pub fn ParallelHasher(comptime Hasher: type) type { const buffer = try self.allocator.alloc(u8, chunk_size * out.len); defer self.allocator.free(buffer); - const results = try self.allocator.alloc(fs.File.PReadError!usize, out.len); + const results = try self.allocator.alloc(Io.File.PReadError!usize, out.len); defer self.allocator.free(results); { @@ -51,11 +57,11 @@ pub fn ParallelHasher(comptime Hasher: type) type { } fn worker( - file: fs.File, + file: Io.File, fstart: usize, buffer: []u8, out: *[hash_size]u8, - err: *fs.File.PReadError!usize, + err: *Io.File.PReadError!usize, ) void { const tracy = trace(@src()); defer tracy.end(); @@ -66,11 +72,3 @@ pub fn ParallelHasher(comptime Hasher: type) type { const Self = @This(); }; } - -const assert = std.debug.assert; -const fs = std.fs; -const mem = std.mem; -const std = @import("std"); -const trace = @import("../../tracy.zig").trace; - -const Allocator = mem.Allocator; diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig index d08ac0c5b8..4d8eac7523 100644 --- a/src/link/MachO/uuid.zig +++ b/src/link/MachO/uuid.zig @@ -1,10 +1,18 @@ +const std = @import("std"); +const Io = std.Io; +const Md5 = std.crypto.hash.Md5; + +const trace = @import("../../tracy.zig").trace; +const Compilation = @import("../../Compilation.zig"); +const Hasher = @import("hasher.zig").ParallelHasher; + /// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce /// the final digest. /// While this is NOT a correct MD5 hash of the contents, this methodology is used by LLVM/LLD /// and we will use it too as it seems accepted by Apple OSes. /// TODO LLD also hashes the output filename to disambiguate between same builds with different /// output files. Should we also do that? -pub fn calcUuid(comp: *const Compilation, file: fs.File, file_size: u64, out: *[Md5.digest_length]u8) !void { +pub fn calcUuid(comp: *const Compilation, file: Io.File, file_size: u64, out: *[Md5.digest_length]u8) !void { const tracy = trace(@src()); defer tracy.end(); @@ -37,12 +45,3 @@ inline fn conform(out: *[Md5.digest_length]u8) void { out[6] = (out[6] & 0x0F) | (3 << 4); out[8] = (out[8] & 0x3F) | 0x80; } - -const fs = std.fs; -const mem = std.mem; -const std = @import("std"); -const trace = @import("../../tracy.zig").trace; - -const Compilation = @import("../../Compilation.zig"); -const Md5 = std.crypto.hash.Md5; -const Hasher = @import("hasher.zig").ParallelHasher; diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig index 975b94578b..7d4134ccaf 100644 --- a/src/link/MappedFile.zig +++ b/src/link/MappedFile.zig @@ -1,3 +1,15 @@ +const MappedFile = @This(); + +const builtin = @import("builtin"); +const is_linux = builtin.os.tag == .linux; +const is_windows = builtin.os.tag == .windows; + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const linux = std.os.linux; +const windows = std.os.windows; + file: std.Io.File, flags: packed struct { block_size: std.mem.Alignment, @@ -16,7 +28,7 @@ writers: std.SinglyLinkedList, pub const growth_factor = 4; -pub const Error = std.posix.MMapError || std.posix.MRemapError || std.fs.File.SetEndPosError || error{ +pub const Error = std.posix.MMapError || std.posix.MRemapError || Io.File.SetEndPosError || error{ NotFile, SystemResources, IsDir, @@ -618,7 +630,7 @@ fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested // Resize the entire file if (ni == Node.Index.root) { try mf.ensureCapacityForSetLocation(gpa); - try std.fs.File.adaptFromNewApi(mf.file).setEndPos(new_size); + try Io.File.adaptFromNewApi(mf.file).setEndPos(new_size); try mf.ensureTotalCapacity(@intCast(new_size)); ni.setLocationAssumeCapacity(mf, old_offset, new_size); return; @@ -1059,12 +1071,3 @@ fn verifyNode(mf: *MappedFile, parent_ni: Node.Index) void { ni = node.next; } } - -const assert = std.debug.assert; -const builtin = @import("builtin"); -const is_linux = builtin.os.tag == .linux; -const is_windows = builtin.os.tag == .windows; -const linux = std.os.linux; -const MappedFile = @This(); -const std = @import("std"); -const windows = std.os.windows; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 160e6cdcc6..7ab1e0eb4b 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -20,6 +20,7 @@ const native_endian = builtin.cpu.arch.endian(); const build_options = @import("build_options"); const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const Cache = std.Build.Cache; const Path = Cache.Path; @@ -3001,9 +3002,9 @@ pub fn createEmpty( .read = true, .mode = if (fs.has_executable_bit) if (target.os.tag == .wasi and output_mode == .Exe) - fs.File.default_mode | 0b001_000_000 + Io.File.default_mode | 0b001_000_000 else - fs.File.default_mode + Io.File.default_mode else 0, }); diff --git a/src/link/tapi.zig b/src/link/tapi.zig index 4c1471a6b4..fff25b7544 100644 --- a/src/link/tapi.zig +++ b/src/link/tapi.zig @@ -1,10 +1,10 @@ const std = @import("std"); -const fs = std.fs; +const Io = std.Io; const mem = std.mem; const log = std.log.scoped(.tapi); -const yaml = @import("tapi/yaml.zig"); +const Allocator = std.mem.Allocator; -const Allocator = mem.Allocator; +const yaml = @import("tapi/yaml.zig"); const Yaml = yaml.Yaml; const VersionField = union(enum) { @@ -130,7 +130,7 @@ pub const Tbd = union(enum) { pub const TapiError = error{ NotLibStub, InputOutput, -} || yaml.YamlError || std.fs.File.PReadError; +} || yaml.YamlError || Io.File.PReadError; pub const LibStub = struct { /// Underlying memory for stub's contents. @@ -139,7 +139,7 @@ pub const LibStub = struct { /// Typed contents of the tbd file. inner: []Tbd, - pub fn loadFromFile(allocator: Allocator, file: fs.File) TapiError!LibStub { + pub fn loadFromFile(allocator: Allocator, file: Io.File) TapiError!LibStub { const filesize = blk: { const stat = file.stat() catch break :blk std.math.maxInt(u32); break :blk @min(stat.size, std.math.maxInt(u32)); -- cgit v1.2.3 From 8328de24f13e21e325207b19288a143854df50df Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 Dec 2025 17:52:57 -0800 Subject: update all occurrences of openFile to receive an io instance --- lib/compiler/aro/aro/Compilation.zig | 4 +-- lib/compiler/aro/aro/Driver/Filesystem.zig | 2 +- lib/compiler/aro/aro/Toolchain.zig | 6 ++-- lib/compiler/objcopy.zig | 2 +- lib/compiler/std-docs.zig | 2 +- lib/std/Build/Cache.zig | 8 ++++-- lib/std/Build/Cache/Path.zig | 8 ++---- lib/std/Build/Fuzz.zig | 4 +-- lib/std/Build/Step/Run.zig | 6 ++-- lib/std/Build/Step/UpdateSourceFiles.zig | 2 +- lib/std/Build/Step/WriteFile.zig | 6 ++-- lib/std/Build/WebServer.zig | 4 +-- lib/std/Io/Dir.zig | 2 +- lib/std/Io/test.zig | 2 +- lib/std/Thread.zig | 4 +-- lib/std/crypto/Certificate/Bundle.zig | 2 +- lib/std/debug/ElfFile.zig | 2 +- lib/std/debug/Info.zig | 2 +- lib/std/debug/MachOFile.zig | 2 +- lib/std/debug/SelfInfo/Elf.zig | 4 +-- lib/std/debug/SelfInfo/MachO.zig | 2 +- lib/std/debug/SelfInfo/Windows.zig | 6 ++-- lib/std/fs.zig | 17 ----------- lib/std/fs/test.zig | 46 +++++++++++++++--------------- lib/std/os/linux/IoUring.zig | 6 ++-- lib/std/posix/test.zig | 8 +++--- lib/std/zig/system.zig | 6 ++-- src/Compilation.zig | 8 +++--- src/Package/Fetch.zig | 6 ++-- src/Package/Fetch/git.zig | 2 +- src/Zcu.zig | 2 +- src/Zcu/PerThread.zig | 4 +-- src/fmt.zig | 2 +- src/introspect.zig | 4 +-- src/link.zig | 24 ++++++++-------- src/link/Coff.zig | 4 +-- src/link/Elf2.zig | 14 +++++---- src/link/MachO.zig | 8 ++++-- src/link/MachO/relocatable.zig | 7 +++-- src/link/MappedFile.zig | 2 +- src/main.zig | 10 +++---- 41 files changed, 124 insertions(+), 138 deletions(-) (limited to 'lib/std/debug/ElfFile.zig') diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index c31caefb0f..09e4861d13 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -1641,7 +1641,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin const io = comp.io; - const file = try comp.cwd.openFile(path, .{}); + const file = try comp.cwd.openFile(io, path, .{}); defer file.close(io); return comp.addSourceFromFile(file, path, kind); } @@ -1975,7 +1975,7 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 const io = comp.io; - const file = try comp.cwd.openFile(path, .{}); + const file = try comp.cwd.openFile(io, path, .{}); defer file.close(io); return comp.getFileContents(file, limit); } diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig index 19ac9bfe41..b0bdbb7e21 100644 --- a/lib/compiler/aro/aro/Driver/Filesystem.zig +++ b/lib/compiler/aro/aro/Driver/Filesystem.zig @@ -213,7 +213,7 @@ pub const Filesystem = union(enum) { pub fn readFile(fs: Filesystem, io: Io, path: []const u8, buf: []u8) ?[]const u8 { return switch (fs) { .real => |cwd| { - const file = cwd.openFile(path, .{}) catch return null; + const file = cwd.openFile(io, path, .{}) catch return null; defer file.close(io); const bytes_read = file.readAll(buf) catch return null; diff --git a/lib/compiler/aro/aro/Toolchain.zig b/lib/compiler/aro/aro/Toolchain.zig index ae84369205..95a8baba77 100644 --- a/lib/compiler/aro/aro/Toolchain.zig +++ b/lib/compiler/aro/aro/Toolchain.zig @@ -524,12 +524,12 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void { /// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned pub fn readFile(tc: *const Toolchain, path: []const u8, buf: []u8) ?[]const u8 { const comp = tc.driver.comp; - return comp.cwd.adaptToNewApi().readFile(comp.io, path, buf) catch null; + return comp.cwd.readFile(comp.io, path, buf) catch null; } pub fn exists(tc: *const Toolchain, path: []const u8) bool { const comp = tc.driver.comp; - comp.cwd.adaptToNewApi().access(comp.io, path, .{}) catch return false; + comp.cwd.access(comp.io, path, .{}) catch return false; return true; } @@ -547,7 +547,7 @@ pub fn canExecute(tc: *const Toolchain, path: []const u8) bool { } const comp = tc.driver.comp; - comp.cwd.adaptToNewApi().access(comp.io, path, .{ .execute = true }) catch return false; + comp.cwd.access(comp.io, path, .{ .execute = true }) catch return false; // Todo: ensure path is not a directory return true; } diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index c360ea8df0..485e644daa 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -157,7 +157,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); - var in: File.Reader = .initSize(input_file.adaptToNewApi(), io, &input_buffer, stat.size); + var in: File.Reader = .initSize(input_file, io, &input_buffer, stat.size); const elf_hdr = std.elf.Header.read(&in.interface) catch |err| switch (err) { error.ReadFailed => fatal("unable to read {s}: {t}", .{ input, in.err.? }), diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index f1ca7fb5bb..e4efac28cd 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -225,7 +225,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void { }, else => continue, } - var file = try entry.dir.openFile(entry.basename, .{}); + var file = try entry.dir.openFile(io, entry.basename, .{}); defer file.close(io); const stat = try file.stat(); var file_reader: std.Io.File.Reader = .{ diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index b06547dc53..42459c033d 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -502,6 +502,8 @@ pub const Manifest = struct { @memcpy(manifest_file_path[0..self.hex_digest.len], &self.hex_digest); manifest_file_path[hex_digest_len..][0..ext.len].* = ext.*; + const io = self.cache.io; + // We'll try to open the cache with an exclusive lock, but if that would block // and `want_shared_lock` is set, a shared lock might be sufficient, so we'll // open with a shared lock instead. @@ -517,7 +519,7 @@ pub const Manifest = struct { break; } else |err| switch (err) { error.WouldBlock => { - self.manifest_file = self.cache.manifest_dir.openFile(&manifest_file_path, .{ + self.manifest_file = self.cache.manifest_dir.openFile(io, &manifest_file_path, .{ .mode = .read_write, .lock = .shared, }) catch |e| { @@ -757,7 +759,7 @@ pub const Manifest = struct { const pp = cache_hash_file.prefixed_path; const dir = self.cache.prefixes()[pp.prefix].handle; - const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { + const this_file = dir.openFile(io, pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { error.FileNotFound => { // Every digest before this one has been populated successfully. return .{ .miss = .{ .file_digests_populated = idx } }; @@ -900,7 +902,7 @@ pub const Manifest = struct { } else { const pp = ch_file.prefixed_path; const dir = self.cache.prefixes()[pp.prefix].handle; - const handle = try dir.openFile(pp.sub_path, .{}); + const handle = try dir.openFile(io, pp.sub_path, .{}); defer handle.close(io); return populateFileHashHandle(self, ch_file, handle); } diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index f6f76c1e8f..60211670de 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -59,18 +59,14 @@ pub fn joinStringZ(p: Path, gpa: Allocator, sub_path: []const u8) Allocator.Erro return p.root_dir.joinZ(gpa, parts); } -pub fn openFile( - p: Path, - sub_path: []const u8, - flags: Io.File.OpenFlags, -) !Io.File { +pub fn openFile(p: Path, io: Io, sub_path: []const u8, flags: Io.File.OpenFlags) !Io.File { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ p.sub_path, sub_path, }) catch return error.NameTooLong; }; - return p.root_dir.handle.openFile(joined_path, flags); + return p.root_dir.handle.openFile(io, joined_path, flags); } pub fn openDir( diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index db83f393fd..c95c9bd354 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -405,7 +405,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO .root_dir = run_step.step.owner.cache_root, .sub_path = "v/" ++ std.fmt.hex(coverage_id), }; - var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { + var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| { log.err("step '{s}': failed to load coverage file '{f}': {t}", .{ run_step.step.name, coverage_file_path, err, }); @@ -528,7 +528,7 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void { .root_dir = cov.run.step.owner.cache_root, .sub_path = "v/" ++ std.fmt.hex(cov.id), }; - var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { + var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| { fatal("step '{s}': failed to load coverage file '{f}': {t}", .{ cov.run.step.name, coverage_file_path, err, }); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 1df6f42a35..7c54c8048e 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -846,7 +846,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { errdefer result.deinit(); result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory; - const file = file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}) catch |err| { + const file = file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{}) catch |err| { return step.fail( "unable to open input file '{f}': {t}", .{ file_path, err }, @@ -1111,7 +1111,7 @@ pub fn rerunInFuzzMode( errdefer result.deinit(); result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory; - const file = try file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}); + const file = try file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{}); defer file.close(io); var buf: [1024]u8 = undefined; @@ -2185,7 +2185,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult { }, .lazy_path => |lazy_path| { const path = lazy_path.getPath3(b, &run.step); - const file = path.root_dir.handle.openFile(path.subPathOrDot(), .{}) catch |err| { + const file = path.root_dir.handle.openFile(io, path.subPathOrDot(), .{}) catch |err| { return run.step.fail("unable to open stdin file: {s}", .{@errorName(err)}); }; defer file.close(io); diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig index 7cdb521d21..f5d95182e9 100644 --- a/lib/std/Build/Step/UpdateSourceFiles.zig +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -99,7 +99,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cwd(), io, source_path, - b.build_root.handle.adaptToNewApi(), + b.build_root.handle, output_source_file.sub_path, .{}, ) catch |err| { diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 201b132271..2834f18564 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -284,7 +284,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { }, .copy => |file_source| { const source_path = file_source.getPath2(b, step); - const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir.adaptToNewApi(), file.sub_path, .{}) catch |err| { + const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir, file.sub_path, .{}) catch |err| { return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {t}", .{ source_path, b.cache_root, cache_path, fs.path.sep, file.sub_path, err, }); @@ -321,10 +321,10 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .directory => try cache_dir.makePath(dest_path), .file => { const prev_status = Io.Dir.updateFile( - src_entry_path.root_dir.handle.adaptToNewApi(), + src_entry_path.root_dir.handle, io, src_entry_path.sub_path, - cache_dir.adaptToNewApi(), + cache_dir, dest_path, .{}, ) catch |err| { diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index f91075b444..9938d5e1b0 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -504,14 +504,14 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons var archiver: std.tar.Writer = .{ .underlying_writer = &response.writer }; for (paths) |path| { - var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| { + var file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| { log.err("failed to open '{f}': {s}", .{ path, @errorName(err) }); continue; }; defer file.close(io); const stat = try file.stat(); var read_buffer: [1024]u8 = undefined; - var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size); + var file_reader: Io.File.Reader = .initSize(file, io, &read_buffer, stat.size); // TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can // be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI: diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 1c28c2f9b3..9ae636d4a6 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -481,7 +481,7 @@ pub fn updateFile( } var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available. - var atomic_file = try Dir.atomicFile(.adaptFromNewApi(dest_dir), dest_path, .{ + var atomic_file = try Dir.atomicFile(dest_dir, dest_path, .{ .permissions = actual_permissions, .write_buffer = &buffer, }); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index b922acc333..9f21fe50e7 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -44,7 +44,7 @@ test "write a file, read it, then delete it" { } { - var file = try tmp.dir.openFile(tmp_file_name, .{}); + var file = try tmp.dir.openFile(io, tmp_file_name, .{}); defer file.close(io); const file_size = try file.getEndPos(); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 8d8e5979df..102bb59415 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -208,7 +208,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); - const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only }); + const file = try std.fs.cwd().openFile(io, path, .{ .mode = .write_only }); defer file.close(io); try file.writeAll(name); @@ -325,7 +325,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co var threaded: std.Io.Threaded = .init_single_threaded; const io = threaded.ioBasic(); - const file = try std.fs.cwd().openFile(path, .{}); + const file = try std.fs.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.readerStreaming(io, &.{}); diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index df671e6f63..eb60ad37a8 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -208,7 +208,7 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, i else => continue, } - try addCertsFromFilePath(cb, gpa, io, now, iterable_dir.adaptToNewApi(), entry.name); + try addCertsFromFilePath(cb, gpa, io, now, iterable_dir, entry.name); } } diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index 5dbae18130..a0f1188ade 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -375,7 +375,7 @@ fn loadSeparateDebugFile( args: anytype, ) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { const path = try std.fmt.allocPrint(arena, fmt, args); - const elf_file = std.fs.cwd().openFile(path, .{}) catch return null; + const elf_file = std.fs.cwd().openFile(io, path, .{}) catch return null; defer elf_file.close(io); const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig index da7656e626..6b31f03f72 100644 --- a/lib/std/debug/Info.zig +++ b/lib/std/debug/Info.zig @@ -39,7 +39,7 @@ pub fn load( ) LoadError!Info { switch (format) { .elf => { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); defer file.close(io); var elf_file: ElfFile = try .load(gpa, file, null, &.none); diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig index 3f0f620a90..ae904c0aec 100644 --- a/lib/std/debug/MachOFile.zig +++ b/lib/std/debug/MachOFile.zig @@ -512,7 +512,7 @@ fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) { + const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 155dac6fb8..124768687c 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -325,7 +325,7 @@ const Module = struct { } fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf { const load_result = if (mod.name.len > 0) res: { - var file = std.fs.cwd().openFile(mod.name, .{}) catch return error.MissingDebugInfo; + var file = std.fs.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name)); } else res: { @@ -334,7 +334,7 @@ const Module = struct { else => return error.ReadFailed, }; defer gpa.free(path); - var file = std.fs.cwd().openFile(path, .{}) catch return error.MissingDebugInfo; + var file = std.fs.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path)); }; diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 2491cf416c..15da616f3b 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -616,7 +616,7 @@ test { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) { + const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index 3af7223293..c7f9d8c352 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -387,7 +387,7 @@ const Module = struct { const section_view = section_view_ptr.?[0..coff_len]; coff_obj = coff.Coff.init(section_view, false) catch return error.InvalidDebugInfo; break :mapped .{ - .file = .adaptFromNewApi(coff_file), + .file = coff_file, .section_handle = section_handle, .section_view = section_view, }; @@ -432,7 +432,7 @@ const Module = struct { break :pdb null; }; const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: { - break :res std.fs.cwd().openFile(path, .{}); + break :res std.fs.cwd().openFile(io, path, .{}); } else res: { const self_dir = fs.selfExeDirPathAlloc(gpa) catch |err| switch (err) { error.OutOfMemory, error.Unexpected => |e| return e, @@ -441,7 +441,7 @@ const Module = struct { defer gpa.free(self_dir); const abs_path = try fs.path.join(gpa, &.{ self_dir, path }); defer gpa.free(abs_path); - break :res std.fs.cwd().openFile(abs_path, .{}); + break :res std.fs.cwd().openFile(io, abs_path, .{}); }; const pdb_file = pdb_file_open_result catch |err| switch (err) { error.FileNotFound, error.IsDir => break :pdb null, diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 9472e5d2a5..cb4daf7c50 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -287,23 +287,6 @@ pub fn symLinkAbsoluteW( return windows.CreateSymbolicLink(null, mem.span(sym_link_path_w), mem.span(target_path_w), flags.is_directory); } -pub const OpenSelfExeError = Io.File.OpenSelfExeError; - -/// Deprecated in favor of `Io.File.openSelfExe`. -pub fn openSelfExe(flags: File.OpenFlags) OpenSelfExeError!File { - if (native_os == .linux or native_os == .serenity or native_os == .windows) { - var threaded: Io.Threaded = .init_single_threaded; - const io = threaded.ioBasic(); - return .adaptFromNewApi(try Io.File.openSelfExe(io, flags)); - } - // Use of max_path_bytes here is valid as the resulting path is immediately - // opened with no modification. - var buf: [max_path_bytes]u8 = undefined; - const self_exe_path = try selfExePath(&buf); - buf[self_exe_path.len] = 0; - return openFileAbsolute(buf[0..self_exe_path.len :0], flags); -} - // This is `posix.ReadLinkError || posix.RealPathError` with impossible errors excluded pub const SelfExePathError = error{ FileNotFound, diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 36ccd3a6be..7d566da0e9 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -855,7 +855,7 @@ test "directory operations on files" { } // ensure the file still exists and is a file as a sanity check - file = try ctx.dir.openFile(test_file_name, .{}); + file = try ctx.dir.openFile(io, test_file_name, .{}); const stat = try file.stat(); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -895,12 +895,12 @@ test "file operations on directories" { if (native_os == .wasi and builtin.link_libc) { // wasmtime unexpectedly succeeds here, see https://github.com/ziglang/zig/issues/20747 - const handle = try ctx.dir.openFile(test_dir_name, .{ .mode = .read_write }); + const handle = try ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write }); handle.close(io); } else { // Note: The `.mode = .read_write` is necessary to ensure the error occurs on all platforms. // TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732 - try testing.expectError(error.IsDir, ctx.dir.openFile(test_dir_name, .{ .mode = .read_write })); + try testing.expectError(error.IsDir, ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write })); } if (ctx.path_type == .absolute and comptime PathType.absolute.isSupported(builtin.os)) { @@ -973,8 +973,8 @@ test "Dir.rename files" { try ctx.dir.rename(test_file_name, renamed_test_file_name); // Ensure the file was renamed - try testing.expectError(error.FileNotFound, ctx.dir.openFile(test_file_name, .{})); - file = try ctx.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, test_file_name, .{})); + file = try ctx.dir.openFile(io, renamed_test_file_name, .{}); file.close(io); // Rename to self succeeds @@ -986,8 +986,8 @@ test "Dir.rename files" { existing_file.close(io); try ctx.dir.rename(renamed_test_file_name, existing_file_path); - try testing.expectError(error.FileNotFound, ctx.dir.openFile(renamed_test_file_name, .{})); - file = try ctx.dir.openFile(existing_file_path, .{}); + try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, renamed_test_file_name, .{})); + file = try ctx.dir.openFile(io, existing_file_path, .{}); file.close(io); } }.impl); @@ -1026,7 +1026,7 @@ test "Dir.rename directories" { // Ensure the directory was renamed and the file still exists in it try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_renamed_path, .{})); dir = try ctx.dir.openDir(test_dir_renamed_again_path, .{}); - file = try dir.openFile("test_file", .{}); + file = try dir.openFile(io, "test_file", .{}); file.close(io); dir.close(io); } @@ -1119,8 +1119,8 @@ test "rename" { try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); // ensure the file was renamed - try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(test_file_name, .{})); - file = try tmp_dir2.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(io, test_file_name, .{})); + file = try tmp_dir2.dir.openFile(io, renamed_test_file_name, .{}); file.close(io); } @@ -1156,8 +1156,8 @@ test "renameAbsolute" { ); // ensure the file was renamed - try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(test_file_name, .{})); - file = try tmp_dir.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(io, test_file_name, .{})); + file = try tmp_dir.dir.openFile(io, renamed_test_file_name, .{}); const stat = try file.stat(); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -1512,7 +1512,7 @@ test "setEndPos" { const file_name = "afile.txt"; try tmp.dir.writeFile(.{ .sub_path = file_name, .data = "ninebytes" }); - const f = try tmp.dir.openFile(file_name, .{ .mode = .read_write }); + const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write }); defer f.close(io); const initial_size = try f.getEndPos(); @@ -1856,7 +1856,7 @@ test "read from locked file" { .lock = .exclusive, }); defer f.close(io); - const f2 = try ctx.dir.openFile(filename, .{}); + const f2 = try ctx.dir.openFile(io, filename, .{}); defer f2.close(io); var buffer: [1]u8 = undefined; if (builtin.os.tag == .windows) { @@ -2041,12 +2041,12 @@ test "'.' and '..' in Io.Dir functions" { try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{}); try ctx.dir.rename(copy_path, rename_path); - const renamed_file = try ctx.dir.openFile(rename_path, .{}); + const renamed_file = try ctx.dir.openFile(io, rename_path, .{}); renamed_file.close(io); try ctx.dir.deleteFile(rename_path); try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" }); - var dir = ctx.dir.adaptToNewApi(); + var dir = ctx.dir; const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{}); try testing.expectEqual(Io.Dir.PrevStatus.stale, prev_status); @@ -2186,7 +2186,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{})); - var dir = ctx.dir.adaptToNewApi(); + var dir = ctx.dir; try testing.expectError(expected_err, dir.updateFile(io, invalid_path, dir, invalid_path, .{})); try testing.expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, .{})); @@ -2235,7 +2235,7 @@ test "read file non vectored" { try file_writer.interface.flush(); } - var file_reader: std.Io.File.Reader = .initAdapted(file, io, &.{}); + var file_reader: std.Io.File.Reader = .init(file, io, &.{}); var write_buffer: [100]u8 = undefined; var w: std.Io.Writer = .fixed(&write_buffer); @@ -2268,7 +2268,7 @@ test "seek keeping partial buffer" { } var read_buffer: [3]u8 = undefined; - var file_reader: Io.File.Reader = .initAdapted(file, io, &read_buffer); + var file_reader: Io.File.Reader = .init(file, io, &read_buffer); try testing.expectEqual(0, file_reader.logicalPos()); @@ -2301,7 +2301,7 @@ test "seekBy" { defer tmp_dir.cleanup(); try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" }); - const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only }); + const f = try tmp_dir.dir.openFile(io, "blah.txt", .{ .mode = .read_only }); defer f.close(io); var reader = f.readerStreaming(io, &.{}); try reader.seekBy(2); @@ -2332,7 +2332,7 @@ test "seekTo flushes buffered data" { } var read_buffer: [16]u8 = undefined; - var file_reader: std.Io.File.Reader = .initAdapted(file, io, &read_buffer); + var file_reader: std.Io.File.Reader = .init(file, io, &read_buffer); var buf: [4]u8 = undefined; try file_reader.interface.readSliceAll(&buf); @@ -2347,7 +2347,7 @@ test "File.Writer sendfile with buffered contents" { { try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); - const in = try tmp_dir.dir.openFile("a", .{}); + const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); const out = try tmp_dir.dir.createFile("b", .{}); defer out.close(io); @@ -2364,7 +2364,7 @@ test "File.Writer sendfile with buffered contents" { try out_w.interface.flush(); } - var check = try tmp_dir.dir.openFile("b", .{}); + var check = try tmp_dir.dir.openFile(io, "b", .{}); defer check.close(io); var check_buf: [4]u8 = undefined; var check_r = check.reader(io, &check_buf); diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index e4a5bd3738..c7d3f35d40 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -3002,7 +3002,7 @@ test "renameat" { }, cqe); // Validate that the old file doesn't exist anymore - try testing.expectError(error.FileNotFound, tmp.dir.openFile(old_path, .{})); + try testing.expectError(error.FileNotFound, tmp.dir.openFile(io, old_path, .{})); // Validate that the new file exists with the proper content var new_file_data: [16]u8 = undefined; @@ -3057,7 +3057,7 @@ test "unlinkat" { }, cqe); // Validate that the file doesn't exist anymore - _ = tmp.dir.openFile(path, .{}) catch |err| switch (err) { + _ = tmp.dir.openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => {}, else => std.debug.panic("unexpected error: {}", .{err}), }; @@ -3154,7 +3154,7 @@ test "symlinkat" { }, cqe); // Validate that the symlink exist - _ = try tmp.dir.openFile(link_path, .{}); + _ = try tmp.dir.openFile(io, link_path, .{}); } test "linkat" { diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 82fa2c41d1..19313e3ff7 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -164,10 +164,10 @@ test "linkat with different directories" { // Test 1: link from file in subdir back up to target in parent directory try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0); - const efd = try tmp.dir.openFile(target_name, .{}); + const efd = try tmp.dir.openFile(io, target_name, .{}); defer efd.close(io); - const nfd = try subdir.openFile(link_name, .{}); + const nfd = try subdir.openFile(io, link_name, .{}); defer nfd.close(io); { @@ -429,7 +429,7 @@ test "mmap" { // Map the whole file { - const file = try tmp.dir.openFile(test_out_file, .{}); + const file = try tmp.dir.openFile(io, test_out_file, .{}); defer file.close(io); const data = try posix.mmap( @@ -454,7 +454,7 @@ test "mmap" { // Map the upper half of the file { - const file = try tmp.dir.openFile(test_out_file, .{}); + const file = try tmp.dir.openFile(io, test_out_file, .{}); defer file.close(io); const data = try posix.mmap( diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 4f0c11797d..cc74da956a 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -817,7 +817,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion { // .dynstr section, and finding the max version number of symbols // that start with "GLIBC_2.". const glibc_so_basename = "libc.so.6"; - var file = dir.openFile(glibc_so_basename, .{}) catch |err| switch (err) { + var file = dir.openFile(io, glibc_so_basename, .{}) catch |err| switch (err) { error.NameTooLong => return error.Unexpected, error.BadPathName => return error.Unexpected, error.PipeBusy => return error.Unexpected, // Windows-only @@ -851,7 +851,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion { // Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system. var buffer: [8000]u8 = undefined; - var file_reader: Io.File.Reader = .initAdapted(file, io, &buffer); + var file_reader: Io.File.Reader = .init(file, io, &buffer); return glibcVerFromSoFile(&file_reader) catch |err| switch (err) { error.InvalidElfMagic, @@ -1053,7 +1053,7 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ var is_elf_file = false; defer if (!is_elf_file) file.close(io); - file_reader = .initAdapted(file, io, &file_reader_buffer); + file_reader = .init(file, io, &file_reader_buffer); file_name = undefined; // it aliases file_reader_buffer const header = elf.Header.read(&file_reader.interface) catch |hdr_err| switch (hdr_err) { diff --git a/src/Compilation.zig b/src/Compilation.zig index d75cba5a11..24b994f608 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1104,7 +1104,7 @@ pub const CObject = struct { const source_line = source_line: { if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0; - const file = fs.cwd().openFile(file_name, .{}) catch break :source_line 0; + const file = fs.cwd().openFile(io, file_name, .{}) catch break :source_line 0; defer file.close(io); var buffer: [1024]u8 = undefined; var file_reader = file.reader(io, &buffer); @@ -1179,7 +1179,7 @@ pub const CObject = struct { }; var buffer: [1024]u8 = undefined; - const file = try fs.cwd().openFile(path, .{}); + const file = try fs.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.reader(io, &buffer); var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface }); @@ -5354,14 +5354,14 @@ fn docsCopyModule( }, else => continue, } - var file = mod_dir.openFile(entry.path, .{}) catch |err| { + var file = mod_dir.openFile(io, entry.path, .{}) catch |err| { return comp.lockAndSetMiscFailure(.docs_copy, "unable to open {f}{s}: {t}", .{ root.fmt(comp), entry.path, err, }); }; defer file.close(io); const stat = try file.stat(); - var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size); + var file_reader: Io.File.Reader = .initSize(file, io, &buffer, stat.size); archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| { return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{ diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 988282097b..3bd05120ff 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -390,7 +390,7 @@ pub fn run(f: *Fetch) RunError!void { var server_header_buffer: [init_resource_buffer_size]u8 = undefined; const file_err = if (dir_err == error.NotDir) e: { - if (fs.cwd().openFile(path_or_url, .{})) |file| { + if (fs.cwd().openFile(io, path_or_url, .{})) |file| { var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) }; return f.runResource(path_or_url, &resource, null); } else |err| break :e err; @@ -995,7 +995,7 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u if (ascii.eqlIgnoreCase(uri.scheme, "file")) { const path = try uri.path.toRawMaybeAlloc(arena); - const file = f.parent_package_root.openFile(path, .{}) catch |err| { + const file = f.parent_package_root.openFile(io, path, .{}) catch |err| { return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{ f.parent_package_root, path, err, })); @@ -1677,7 +1677,7 @@ fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Er switch (hashed_file.kind) { .file => { - var file = try dir.openFile(hashed_file.fs_path, .{}); + var file = try dir.openFile(io, hashed_file.fs_path, .{}); defer file.close(io); // Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463 hasher.update(&.{ 0, 0 }); diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index abaa8fef73..ccae9440e2 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1714,7 +1714,7 @@ pub fn main() !void { const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat; - var pack_file = try std.fs.cwd().openFile(args[2], .{}); + var pack_file = try std.fs.cwd().openFile(io, args[2], .{}); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); diff --git a/src/Zcu.zig b/src/Zcu.zig index cd4a8c7783..d2634a8962 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1076,7 +1076,7 @@ pub const File = struct { var f = f: { const dir, const sub_path = file.path.openInfo(zcu.comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer f.close(io); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 55d6a3861f..45b1302138 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -94,7 +94,7 @@ pub fn updateFile( // In any case we need to examine the stat of the file to determine the course of action. var source_file = f: { const dir, const sub_path = file.path.openInfo(comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer source_file.close(io); @@ -2466,7 +2466,7 @@ fn updateEmbedFileInner( var file = f: { const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer file.close(io); diff --git a/src/fmt.zig b/src/fmt.zig index 663d09e9cb..ce8a31fa4c 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -262,7 +262,7 @@ fn fmtPathFile( ) !void { const io = fmt.io; - const source_file = try dir.openFile(sub_path, .{}); + const source_file = try dir.openFile(io, sub_path, .{}); var file_closed = false; errdefer if (!file_closed) source_file.close(io); diff --git a/src/introspect.zig b/src/introspect.zig index 9b6797e7d8..d2faa9a55c 100644 --- a/src/introspect.zig +++ b/src/introspect.zig @@ -22,7 +22,7 @@ fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { // Try lib/zig/std/std.zig const lib_zig = "lib" ++ fs.path.sep_str ++ "zig"; var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir; - const file = test_zig_dir.openFile(test_index_file, .{}) catch { + const file = test_zig_dir.openFile(io, test_index_file, .{}) catch { test_zig_dir.close(io); break :zig_dir; }; @@ -32,7 +32,7 @@ fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { // Try lib/std/std.zig var test_zig_dir = base_dir.openDir("lib", .{}) catch return null; - const file = test_zig_dir.openFile(test_index_file, .{}) catch { + const file = test_zig_dir.openFile(io, test_index_file, .{}) catch { test_zig_dir.close(io); return null; }; diff --git a/src/link.zig b/src/link.zig index d5daf6fca7..073ec632c6 100644 --- a/src/link.zig +++ b/src/link.zig @@ -637,7 +637,7 @@ pub const File = struct { } } } - base.file = try emit.root_dir.handle.openFile(emit.sub_path, .{ .mode = .read_write }); + base.file = try emit.root_dir.handle.openFile(io, emit.sub_path, .{ .mode = .read_write }); }, .elf2, .coff2 => if (base.file == null) { const mf = if (base.cast(.elf2)) |elf| @@ -646,10 +646,10 @@ pub const File = struct { &coff.mf else unreachable; - mf.file = try base.emit.root_dir.handle.adaptToNewApi().openFile(io, base.emit.sub_path, .{ + mf.file = try base.emit.root_dir.handle.openFile(io, base.emit.sub_path, .{ .mode = .read_write, }); - base.file = .adaptFromNewApi(mf.file); + base.file = mf.file; try mf.ensureTotalCapacity(@intCast(mf.nodes.items[0].location().resolve(mf)[1])); }, .c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }), @@ -2007,7 +2007,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :tbd, else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }), }; @@ -2043,7 +2043,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :so, else => |e| fatal("unable to search for so library '{f}': {s}", .{ test_path, @errorName(e), @@ -2061,7 +2061,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :mingw, else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }), }; @@ -2115,7 +2115,7 @@ fn resolvePathInput( .static_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color), .shared_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color), .object => { - var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err| + var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err| fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) }); errdefer file.close(io); try resolved_inputs.append(gpa, .{ .object = .{ @@ -2127,7 +2127,7 @@ fn resolvePathInput( return null; }, .res => { - var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err| + var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err| fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) }); errdefer file.close(io); try resolved_inputs.append(gpa, .{ .res = .{ @@ -2164,7 +2164,7 @@ fn resolvePathInputLib( .static_library, .shared_library => true, else => false, }) { - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => return .no_match, else => |e| fatal("unable to search for {s} library '{f}': {s}", .{ @tagName(link_mode), std.fmt.alt(test_path, .formatEscapeChar), @errorName(e), @@ -2242,7 +2242,7 @@ fn resolvePathInputLib( return .ok; } - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => return .no_match, else => |e| fatal("unable to search for {s} library {f}: {s}", .{ @tagName(link_mode), test_path, @errorName(e), @@ -2253,7 +2253,7 @@ fn resolvePathInputLib( } pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Object { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); errdefer file.close(io); return .{ .path = path, @@ -2264,7 +2264,7 @@ pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Obje } pub fn openDso(io: Io, path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); errdefer file.close(io); return .{ .path = path, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f33e0ccdea..e1d52fb7c4 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -632,7 +632,7 @@ fn create( }; const coff = try arena.create(Coff); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(comp.io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); @@ -644,7 +644,7 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index e35444bc02..72fdb244a4 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -928,6 +928,7 @@ fn create( path: std.Build.Cache.Path, options: link.File.OpenOptions, ) !*Elf { + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(target.ofmt == .elf); const class: std.elf.CLASS = switch (target.ptrBitWidth()) { @@ -973,11 +974,11 @@ fn create( }; const elf = try arena.create(Elf); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); elf.* = .{ .base = .{ .tag = .elf2, @@ -985,7 +986,7 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, @@ -3325,12 +3326,13 @@ fn flushInputSection(elf: *Elf, isi: Node.InputSectionIndex) !void { const file_loc = isi.fileLocation(elf); if (file_loc.size == 0) return; const comp = elf.base.comp; + const io = comp.io; const gpa = comp.gpa; const ii = isi.input(elf); const path = ii.path(elf); - const file = try path.root_dir.handle.adaptToNewApi().openFile(comp.io, path.sub_path, .{}); - defer file.close(comp.io); - var fr = file.reader(comp.io, &.{}); + const file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); + defer file.close(io); + var fr = file.reader(io, &.{}); try fr.seekTo(file_loc.offset); var nw: MappedFile.Node.Writer = undefined; const si = isi.symbol(elf); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 93adc633ce..e837cc853a 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1126,7 +1126,9 @@ fn parseDependentDylibs(self: *MachO) !void { if (self.dylibs.items.len == 0) return; - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; const framework_dirs = self.framework_dirs; // TODO delete this, directories must instead be resolved by the frontend @@ -1183,7 +1185,7 @@ fn parseDependentDylibs(self: *MachO) !void { const path = if (existing_ext.len > 0) id.name[0 .. id.name.len - existing_ext.len] else id.name; for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| { test_path.clearRetainingCapacity(); - if (self.base.comp.sysroot) |root| { + if (comp.sysroot) |root| { try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext }); } else { try test_path.print("{s}{s}", .{ path, ext }); @@ -1235,7 +1237,7 @@ fn parseDependentDylibs(self: *MachO) !void { .path = Path.initCwd(full_path), .weak = is_weak, }; - const file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{}); + const file = try lib.path.root_dir.handle.openFile(io, lib.path.sub_path, .{}); const fh = try self.addFileHandle(file); const fat_arch = try self.parseFatFile(file, lib.path); const offset = if (fat_arch) |fa| fa.offset else 0; diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index d2a6c2a3ab..0f42442640 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -1,6 +1,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { - const gpa = macho_file.base.comp.gpa; - const diags = &macho_file.base.comp.link_diags; + const gpa = comp.gpa; + const io = comp.io; + const diags = &comp.link_diags; // TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list. var positionals = std.array_list.Managed(link.Input).init(gpa); @@ -19,7 +20,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat // debug info segments/sections (this is apparently by design by Apple), we copy // the *only* input file over. const path = positionals.items[0].path().?; - const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| + const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) }); const stat = in_file.stat() catch |err| return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) }); diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig index 7d4134ccaf..a61c6e764c 100644 --- a/src/link/MappedFile.zig +++ b/src/link/MappedFile.zig @@ -630,7 +630,7 @@ fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested // Resize the entire file if (ni == Node.Index.root) { try mf.ensureCapacityForSetLocation(gpa); - try Io.File.adaptFromNewApi(mf.file).setEndPos(new_size); + try mf.file.setEndPos(new_size); try mf.ensureTotalCapacity(@intCast(new_size)); ni.setLocationAssumeCapacity(mf, old_offset, new_size); return; diff --git a/src/main.zig b/src/main.zig index 3ca64881f8..b040b6c8ef 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4681,7 +4681,7 @@ fn cmdTranslateC( } else { const hex_digest = Cache.binToHex(result.digest); const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_basename }); - const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| { + const zig_file = comp.dirs.local_cache.handle.openFile(io, out_zig_path, .{}) catch |err| { const path = comp.dirs.local_cache.path orelse "."; fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{ path, @@ -6187,7 +6187,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const display_path = zig_source_path orelse ""; const source: [:0]const u8 = s: { var f = if (zig_source_path) |p| file: { - break :file fs.cwd().openFile(p, .{}) catch |err| { + break :file fs.cwd().openFile(io, p, .{}) catch |err| { fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) }); }; } else Io.File.stdin(); @@ -6494,7 +6494,7 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void { const cache_file = args[0]; - var f = fs.cwd().openFile(cache_file, .{}) catch |err| { + var f = fs.cwd().openFile(io, cache_file, .{}) catch |err| { fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) }); }; defer f.close(io); @@ -6541,7 +6541,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { const new_source_path = args[1]; const old_source = source: { - var f = fs.cwd().openFile(old_source_path, .{}) catch |err| + var f = fs.cwd().openFile(io, old_source_path, .{}) catch |err| fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6549,7 +6549,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); }; const new_source = source: { - var f = fs.cwd().openFile(new_source_path, .{}) catch |err| + var f = fs.cwd().openFile(io, new_source_path, .{}) catch |err| fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); -- cgit v1.2.3 From f53248a40936ebc9aaf75ddbd16e67ebec05ab84 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 13:39:09 -0800 Subject: update all std.fs.cwd() to std.Io.Dir.cwd() --- lib/compiler/aro/aro/Compilation.zig | 8 +- lib/compiler/aro/aro/Driver.zig | 10 +-- lib/compiler/aro/aro/Parser.zig | 27 +++---- lib/compiler/aro/aro/Preprocessor.zig | 6 +- lib/compiler/aro/aro/Tokenizer.zig | 5 +- lib/compiler/aro/aro/Value.zig | 11 +-- lib/compiler/aro/main.zig | 2 +- lib/compiler/objcopy.zig | 4 +- lib/compiler/reduce.zig | 6 +- lib/compiler/resinator/cli.zig | 2 +- lib/compiler/resinator/compile.zig | 4 +- lib/compiler/resinator/main.zig | 22 +++--- lib/compiler/std-docs.zig | 2 +- lib/compiler/translate-c/main.zig | 8 +- lib/std/Build.zig | 10 +-- lib/std/Build/Cache.zig | 8 +- lib/std/Build/Step.zig | 17 ++-- lib/std/Build/Step/CheckFile.zig | 4 +- lib/std/Build/Step/ConfigHeader.zig | 8 +- lib/std/Build/Step/Options.zig | 15 ++-- lib/std/Build/Step/Run.zig | 2 +- lib/std/Build/Watch.zig | 16 ++-- lib/std/Io/File.zig | 8 ++ lib/std/Io/Threaded.zig | 9 +-- lib/std/Io/Writer.zig | 6 +- lib/std/Io/net/test.zig | 2 +- lib/std/Io/test.zig | 10 +-- lib/std/Thread.zig | 4 +- lib/std/crypto/Certificate/Bundle/macos.zig | 2 +- lib/std/crypto/codecs/asn1/test.zig | 4 +- lib/std/debug.zig | 20 ++--- lib/std/debug/ElfFile.zig | 2 +- lib/std/debug/MachOFile.zig | 2 +- lib/std/debug/SelfInfo/Elf.zig | 19 +++-- lib/std/debug/SelfInfo/MachO.zig | 2 +- lib/std/debug/SelfInfo/Windows.zig | 4 +- lib/std/dynamic_library.zig | 8 +- lib/std/fs/test.zig | 116 +++++++++++++--------------- lib/std/os/linux/IoUring.zig | 24 +++--- lib/std/os/linux/test.zig | 6 +- lib/std/os/windows.zig | 4 +- lib/std/posix.zig | 14 ++-- lib/std/posix/test.zig | 52 ++++++------- lib/std/process/Child.zig | 2 +- lib/std/std.zig | 2 +- lib/std/tar.zig | 14 ++-- lib/std/testing.zig | 2 +- lib/std/zig/LibCInstallation.zig | 12 +-- lib/std/zig/WindowsSdk.zig | 2 +- lib/std/zig/system.zig | 6 +- lib/std/zig/system/darwin/macos.zig | 7 +- lib/std/zip.zig | 4 +- src/Compilation.zig | 20 ++--- src/Package/Fetch.zig | 12 +-- src/Package/Fetch/git.zig | 12 +-- src/Zcu/PerThread.zig | 4 +- src/codegen/llvm.zig | 17 ++-- src/fmt.zig | 6 +- src/introspect.zig | 4 +- src/libs/freebsd.zig | 12 +-- src/libs/glibc.zig | 16 ++-- src/libs/mingw.zig | 17 ++-- src/libs/netbsd.zig | 12 +-- src/link/C.zig | 4 +- src/link/Coff.zig | 6 +- src/link/Elf.zig | 4 +- src/link/Lld.zig | 4 +- src/link/MachO.zig | 14 ++-- src/link/MachO/CodeSignature.zig | 2 +- src/link/SpirV.zig | 3 +- src/link/Wasm.zig | 4 +- src/main.zig | 36 ++++----- 72 files changed, 398 insertions(+), 377 deletions(-) (limited to 'lib/std/debug/ElfFile.zig') diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 09e4861d13..b3e4d5544d 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -2253,7 +2253,7 @@ test "addSourceFromBuffer" { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("path", str); @@ -2267,7 +2267,7 @@ test "addSourceFromBuffer" { var arena: std.heap.ArenaAllocator = .init(allocator); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); _ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n"); @@ -2313,7 +2313,7 @@ test "addSourceFromBuffer - exhaustive check for carriage return elimination" { var buf: [alphabet.len]u8 = @splat(alphabet[0]); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var source_count: u32 = 0; @@ -2341,7 +2341,7 @@ test "ignore BOM at beginning of file" { const Test = struct { fn run(arena: Allocator, buf: []const u8) !void { var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("file.c", buf); diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index f933e3ce52..fec3cea0f8 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -1327,7 +1327,7 @@ fn processSource( const dep_file_name = try d.getDepFileName(source, writer_buf[0..std.fs.max_name_bytes]); const file = if (dep_file_name) |path| - d.comp.cwd.createFile(path, .{}) catch |er| + d.comp.cwd.createFile(io, path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, errorDescription(er) }) else Io.File.stdout(); @@ -1352,7 +1352,7 @@ fn processSource( } const file = if (d.output_name) |some| - d.comp.cwd.createFile(some, .{}) catch |er| + d.comp.cwd.createFile(io, some, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ some, errorDescription(er) }) else Io.File.stdout(); @@ -1405,7 +1405,7 @@ fn processSource( defer assembly.deinit(gpa); if (d.only_preprocess_and_compile) { - const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) }); defer out_file.close(io); @@ -1419,7 +1419,7 @@ fn processSource( // then assemble to out_file_name var assembly_name_buf: [std.fs.max_name_bytes]u8 = undefined; const assembly_out_file_name = try d.getRandomFilename(&assembly_name_buf, ".s"); - const out_file = d.comp.cwd.createFile(assembly_out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, assembly_out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ assembly_out_file_name, errorDescription(er) }); defer out_file.close(io); assembly.writeToFile(out_file) catch |er| @@ -1455,7 +1455,7 @@ fn processSource( }; defer obj.deinit(); - const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) }); defer out_file.close(io); diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index 4a89e0d460..fc21ee4d0b 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const Allocator = mem.Allocator; const assert = std.debug.assert; @@ -211,7 +212,7 @@ fn checkIdentifierCodepointWarnings(p: *Parser, codepoint: u21, loc: Source.Loca const prev_total = p.diagnostics.total; var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); if (!char_info.isC99IdChar(codepoint)) { @@ -425,7 +426,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) if (p.diagnostics.effectiveKind(diagnostic) == .off) return; var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); p.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -447,7 +448,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) }, p.pp.expansionSlice(tok_i), true); } -fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !void { +fn formatArgs(p: *Parser, w: *Io.Writer, fmt: []const u8, args: anytype) !void { var i: usize = 0; inline for (std.meta.fields(@TypeOf(args))) |arg_info| { const arg = @field(args, arg_info.name); @@ -476,13 +477,13 @@ fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !vo try w.writeAll(fmt[i..]); } -fn formatTokenId(w: *std.Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize { +fn formatTokenId(w: *Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize { const i = Diagnostics.templateIndex(w, fmt, "{tok_id}"); try w.writeAll(tok_id.symbol()); return i; } -fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType) !usize { +fn formatQualType(p: *Parser, w: *Io.Writer, fmt: []const u8, qt: QualType) !usize { const i = Diagnostics.templateIndex(w, fmt, "{qt}"); try w.writeByte('\''); try qt.print(p.comp, w); @@ -501,7 +502,7 @@ fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType) return i; } -fn formatResult(p: *Parser, w: *std.Io.Writer, fmt: []const u8, res: Result) !usize { +fn formatResult(p: *Parser, w: *Io.Writer, fmt: []const u8, res: Result) !usize { const i = Diagnostics.templateIndex(w, fmt, "{value}"); switch (res.val.opt_ref) { .none => try w.writeAll("(none)"), @@ -524,7 +525,7 @@ const Normalized = struct { return .{ .str = str }; } - pub fn format(ctx: Normalized, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Normalized, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{normalized}"); var it: std.unicode.Utf8Iterator = .{ .bytes = ctx.str, @@ -558,7 +559,7 @@ const Codepoint = struct { return .{ .codepoint = codepoint }; } - pub fn format(ctx: Codepoint, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Codepoint, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{codepoint}"); try w.print("{X:0>4}", .{ctx.codepoint}); return i; @@ -572,7 +573,7 @@ const Escaped = struct { return .{ .str = str }; } - pub fn format(ctx: Escaped, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Escaped, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{s}"); try std.zig.stringEscape(ctx.str, w); return i; @@ -1453,7 +1454,7 @@ fn decl(p: *Parser) Error!bool { return true; } -fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *std.Io.Writer.Allocating) !?[]const u8 { +fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *Io.Writer.Allocating) !?[]const u8 { const w = &allocating.writer; const cond = cond_node.get(&p.tree); @@ -1526,7 +1527,7 @@ fn staticAssert(p: *Parser) Error!bool { } else { if (!res.val.toBool(p.comp)) { var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); if (p.staticAssertMessage(res_node, str, &allocating) catch return error.OutOfMemory) |message| { @@ -9719,7 +9720,7 @@ fn primaryExpr(p: *Parser) Error!?Result { qt = some.qt; } else if (p.func.qt) |func_qt| { var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); func_qt.printNamed(p.tokSlice(p.func.name), p.comp, &allocating.writer) catch return error.OutOfMemory; @@ -10608,7 +10609,7 @@ test "Node locations" { const arena = arena_state.allocator(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const file = try comp.addSourceFromBuffer("file.c", diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index d47727cbf0..e8343dc83a 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -3900,7 +3900,7 @@ test "Preserve pragma tokens sometimes" { defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); try comp.addDefaultPragmaHandlers(); @@ -3967,7 +3967,7 @@ test "destringify" { var arena: std.heap.ArenaAllocator = .init(gpa); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp, .default); defer pp.deinit(); @@ -4030,7 +4030,7 @@ test "Include guards" { const arena = arena_state.allocator(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp, .default); defer pp.deinit(); diff --git a/lib/compiler/aro/aro/Tokenizer.zig b/lib/compiler/aro/aro/Tokenizer.zig index c497c5ce82..198d49364a 100644 --- a/lib/compiler/aro/aro/Tokenizer.zig +++ b/lib/compiler/aro/aro/Tokenizer.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const Compilation = @import("Compilation.zig"); @@ -2326,7 +2327,7 @@ test "Tokenizer fuzz test" { fn testOne(_: @This(), input_bytes: []const u8) anyerror!void { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes); @@ -2351,7 +2352,7 @@ test "Tokenizer fuzz test" { fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); if (langopts) |provided| { comp.langopts = provided; diff --git a/lib/compiler/aro/aro/Value.zig b/lib/compiler/aro/aro/Value.zig index 25a2d1824f..14949ce03b 100644 --- a/lib/compiler/aro/aro/Value.zig +++ b/lib/compiler/aro/aro/Value.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; @@ -80,7 +81,7 @@ test "minUnsignedBits" { defer arena_state.deinit(); const arena = arena_state.allocator(); - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query)); @@ -119,7 +120,7 @@ test "minSignedBits" { defer arena_state.deinit(); const arena = arena_state.allocator(); - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query)); @@ -1080,7 +1081,7 @@ const NestedPrint = union(enum) { }, }; -pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void { +pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void { try w.writeByte('&'); try w.writeAll(base); if (!offset.isZero(comp)) { @@ -1089,7 +1090,7 @@ pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w } } -pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!?NestedPrint { +pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!?NestedPrint { if (qt.is(comp, .bool)) { try w.writeAll(if (v.isZero(comp)) "false" else "true"); return null; @@ -1116,7 +1117,7 @@ pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer return null; } -pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void { +pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void { const size: Compilation.CharUnitSize = @enumFromInt(qt.childType(comp).sizeof(comp)); const without_null = bytes[0 .. bytes.len - @intFromEnum(size)]; try w.writeByte('"'); diff --git a/lib/compiler/aro/main.zig b/lib/compiler/aro/main.zig index 66c8add4a3..d1be1dbb21 100644 --- a/lib/compiler/aro/main.zig +++ b/lib/compiler/aro/main.zig @@ -59,7 +59,7 @@ pub fn main() u8 { } }, }; - var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |er| switch (er) { + var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |er| switch (er) { error.OutOfMemory => { std.debug.print("out of memory\n", .{}); if (fast_exit) process.exit(1); diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index 485e644daa..e48f76a6a6 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -152,7 +152,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void defer threaded.deinit(); const io = threaded.io(); - const input_file = fs.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); + const input_file = Io.Dir.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); defer input_file.close(io); const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); @@ -180,7 +180,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const mode = if (out_fmt != .elf or only_keep_debug) Io.File.default_mode else stat.mode; - var output_file = try fs.cwd().createFile(output, .{ .mode = mode }); + var output_file = try Io.Dir.cwd().createFile(io, output, .{ .mode = mode }); defer output_file.close(io); var out = output_file.writer(&output_buffer); diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig index bbd3d172b4..d3f33ad81a 100644 --- a/lib/compiler/reduce.zig +++ b/lib/compiler/reduce.zig @@ -233,7 +233,7 @@ pub fn main() !void { } } - try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); // std.debug.print("trying this code:\n{s}\n", .{rendered.items}); const interestingness = try runCheck(arena, interestingness_argv.items); @@ -274,7 +274,7 @@ pub fn main() !void { fixups.clearRetainingCapacity(); rendered.clearRetainingCapacity(); try tree.render(gpa, &rendered.writer, fixups); - try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); return std.process.cleanExit(); } @@ -398,7 +398,7 @@ fn transformationsToFixups( } fn parse(gpa: Allocator, file_path: []const u8) !Ast { - const source_code = std.fs.cwd().readFileAllocOptions( + const source_code = Io.Dir.cwd().readFileAllocOptions( file_path, gpa, .limited(std.math.maxInt(u32)), diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index bb54289e3e..ae4ece2968 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -2003,7 +2003,7 @@ test "maybeAppendRC" { // Create the file so that it's found. In this scenario, .rc should not get // appended. - var file = try tmp.dir.createFile("foo", .{}); + var file = try tmp.dir.createFile(io, "foo", .{}); file.close(io); try options.maybeAppendRC(tmp.dir); try std.testing.expectEqualStrings("foo", options.input_source.filename); diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig index 7dc77e5ee1..3e046a10c1 100644 --- a/lib/compiler/resinator/compile.zig +++ b/lib/compiler/resinator/compile.zig @@ -111,7 +111,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io try search_dirs.append(allocator, .{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) }); } } - // Re-open the passed in cwd since we want to be able to close it (std.fs.cwd() shouldn't be closed) + // Re-open the passed in cwd since we want to be able to close it (Io.Dir.cwd() shouldn't be closed) const cwd_dir = options.cwd.openDir(".", .{}) catch |err| { try options.diagnostics.append(.{ .err = .failed_to_open_cwd, @@ -406,7 +406,7 @@ pub const Compiler = struct { // `/test.bin` relative to include paths and instead only treats it as // an absolute path. if (std.fs.path.isAbsolute(path)) { - const file = try utils.openFileNotDir(std.fs.cwd(), path, .{}); + const file = try utils.openFileNotDir(Io.Dir.cwd(), path, .{}); errdefer file.close(io); if (self.dependencies) |dependencies| { diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index c726a06cf4..416abc2ab7 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -67,7 +67,7 @@ pub fn main() !void { }, else => |e| return e, }; - try options.maybeAppendRC(std.fs.cwd()); + try options.maybeAppendRC(Io.Dir.cwd()); if (!zig_integration) { // print any warnings/notes @@ -141,7 +141,7 @@ pub fn main() !void { if (!zig_integration) std.debug.unlockStderrWriter(); } - var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, std.fs.cwd()); + var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var argv: std.ArrayList([]const u8) = .empty; @@ -196,7 +196,7 @@ pub fn main() !void { }; }, .filename => |input_filename| { - break :full_input std.fs.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| { + break :full_input Io.Dir.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| { try error_handler.emitMessage(gpa, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) }); std.process.exit(1); }; @@ -212,7 +212,7 @@ pub fn main() !void { try output_file.writeAll(full_input); }, .filename => |output_filename| { - try std.fs.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); + try Io.Dir.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); }, } return; @@ -277,7 +277,7 @@ pub fn main() !void { const output_buffered_stream = res_stream_writer.interface(); compile(gpa, io, final_input, output_buffered_stream, .{ - .cwd = std.fs.cwd(), + .cwd = Io.Dir.cwd(), .diagnostics = &diagnostics, .source_mappings = &mapping_results.mappings, .dependencies = maybe_dependencies, @@ -294,7 +294,7 @@ pub fn main() !void { .warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page, }) catch |err| switch (err) { error.ParseError, error.CompileError => { - try error_handler.emitDiagnostics(gpa, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings); + try error_handler.emitDiagnostics(gpa, Io.Dir.cwd(), final_input, &diagnostics, mapping_results.mappings); // Delete the output file on error res_stream.cleanupAfterError(io); std.process.exit(1); @@ -306,12 +306,12 @@ pub fn main() !void { // print any warnings/notes if (!zig_integration) { - diagnostics.renderToStdErr(std.fs.cwd(), final_input, mapping_results.mappings); + diagnostics.renderToStdErr(Io.Dir.cwd(), final_input, mapping_results.mappings); } // write the depfile if (options.depfile_path) |depfile_path| { - var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| { + var depfile = Io.Dir.cwd().createFile(io, depfile_path, .{}) catch |err| { try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) }); std.process.exit(1); }; @@ -440,7 +440,7 @@ const IoStream = struct { // Delete the output file on error file.close(io); // Failing to delete is not really a big deal, so swallow any errors - std.fs.cwd().deleteFile(self.name) catch {}; + Io.Dir.cwd().deleteFile(self.name) catch {}; }, .stdio, .memory, .closed => return, } @@ -457,8 +457,8 @@ const IoStream = struct { switch (source) { .filename => |filename| return .{ .file = switch (io) { - .input => try openFileNotDir(std.fs.cwd(), filename, .{}), - .output => try std.fs.cwd().createFile(filename, .{}), + .input => try openFileNotDir(Io.Dir.cwd(), filename, .{}), + .output => try Io.Dir.cwd().createFile(io, filename, .{}), }, }, .stdio => |file| return .{ .stdio = file }, diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index e4efac28cd..87c4da9faa 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -40,7 +40,7 @@ pub fn main() !void { const zig_exe_path = argv.next().?; const global_cache_path = argv.next().?; - var lib_dir = try std.fs.cwd().openDir(zig_lib_directory, .{}); + var lib_dir = try Io.Dir.cwd().openDir(zig_lib_directory, .{}); defer lib_dir.close(io); var listen_port: u16 = 0; diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig index 830c70e424..d0a873fd78 100644 --- a/lib/compiler/translate-c/main.zig +++ b/lib/compiler/translate-c/main.zig @@ -47,7 +47,7 @@ pub fn main() u8 { }; defer diagnostics.deinit(); - var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |err| switch (err) { + var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |err| switch (err) { error.OutOfMemory => { std.debug.print("ran out of memory initializing C compilation\n", .{}); if (fast_exit) process.exit(1); @@ -226,7 +226,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration const dep_file_name = try d.getDepFileName(source, out_buf[0..std.fs.max_name_bytes]); const file = if (dep_file_name) |path| - d.comp.cwd.createFile(path, .{}) catch |er| + d.comp.cwd.createFile(io, path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, aro.Driver.errorDescription(er) }) else Io.File.stdout(); @@ -253,10 +253,10 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration if (d.output_name) |path| blk: { if (std.mem.eql(u8, path, "-")) break :blk; if (std.fs.path.dirname(path)) |dirname| { - std.fs.cwd().makePath(dirname) catch |err| + Io.Dir.cwd().makePath(dirname) catch |err| return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) }); } - out_file = std.fs.cwd().createFile(path, .{}) catch |err| { + out_file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| { return d.fatal("failed to create output file '{s}': {s}", .{ path, aro.Driver.errorDescription(err) }); }; close_out_file = true; diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 7bfdbb6449..cc2f70fd2f 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1702,13 +1702,13 @@ pub fn addCheckFile( pub fn truncateFile(b: *Build, dest_path: []const u8) (Io.Dir.MakeError || Io.Dir.StatFileError)!void { const io = b.graph.io; if (b.verbose) log.info("truncate {s}", .{dest_path}); - const cwd = fs.cwd(); - var src_file = cwd.createFile(dest_path, .{}) catch |err| switch (err) { + const cwd = Io.Dir.cwd(); + var src_file = cwd.createFile(io, dest_path, .{}) catch |err| switch (err) { error.FileNotFound => blk: { if (fs.path.dirname(dest_path)) |dirname| { try cwd.makePath(dirname); } - break :blk try cwd.createFile(dest_path, .{}); + break :blk try cwd.createFile(io, dest_path, .{}); }, else => |e| return e, }; @@ -1846,7 +1846,7 @@ pub fn runAllowFail( }; errdefer b.allocator.free(stdout); - const term = try child.wait(); + const term = try child.wait(io); switch (term) { .Exited => |code| { if (code != 0) { @@ -2193,7 +2193,7 @@ fn dependencyInner( const build_root: std.Build.Cache.Directory = .{ .path = build_root_string, - .handle = fs.cwd().openDir(build_root_string, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(build_root_string, .{}) catch |err| { std.debug.print("unable to open '{s}': {s}\n", .{ build_root_string, @errorName(err), }); diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 42459c033d..fdcb2ab714 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -508,7 +508,7 @@ pub const Manifest = struct { // and `want_shared_lock` is set, a shared lock might be sufficient, so we'll // open with a shared lock instead. while (true) { - if (self.cache.manifest_dir.createFile(&manifest_file_path, .{ + if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{ .read = true, .truncate = false, .lock = .exclusive, @@ -543,7 +543,7 @@ pub const Manifest = struct { return error.CacheCheckFailed; } - if (self.cache.manifest_dir.createFile(&manifest_file_path, .{ + if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{ .read = true, .truncate = false, .lock = .exclusive, @@ -873,7 +873,7 @@ pub const Manifest = struct { if (man.want_refresh_timestamp) { man.want_refresh_timestamp = false; - var file = man.cache.manifest_dir.createFile("timestamp", .{ + var file = man.cache.manifest_dir.createFile(io, "timestamp", .{ .read = true, .truncate = true, }) catch |err| switch (err) { @@ -1324,7 +1324,7 @@ fn hashFile(file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.PReadErro fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { const test_out_file = "test-filetimestamp.tmp"; - var file = try dir.createFile(test_out_file, .{ + var file = try dir.createFile(io, test_out_file, .{ .read = true, .truncate = true, }); diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 56ef32e8d8..2ec1c0ef31 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -401,6 +401,9 @@ pub fn evalZigProcess( web_server: ?*Build.WebServer, gpa: Allocator, ) !?Path { + const b = s.owner; + const io = b.graph.io; + // If an error occurs, it's happened in this command: assert(s.result_failed_command == null); s.result_failed_command = try allocPrintCmd(gpa, null, argv); @@ -411,7 +414,7 @@ pub fn evalZigProcess( const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) { error.BrokenPipe => { // Process restart required. - const term = zp.child.wait() catch |e| { + const term = zp.child.wait(io) catch |e| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], e }); }; _ = term; @@ -427,7 +430,7 @@ pub fn evalZigProcess( if (s.result_error_msgs.items.len > 0 and result == null) { // Crash detected. - const term = zp.child.wait() catch |e| { + const term = zp.child.wait(io) catch |e| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], e }); }; s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0; @@ -439,9 +442,7 @@ pub fn evalZigProcess( return result; } assert(argv.len != 0); - const b = s.owner; const arena = b.allocator; - const io = b.graph.io; try handleChildProcUnsupported(s); try handleVerbose(s.owner, null, argv); @@ -478,7 +479,7 @@ pub fn evalZigProcess( zp.child.stdin.?.close(io); zp.child.stdin = null; - const term = zp.child.wait() catch |err| { + const term = zp.child.wait(io) catch |err| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], err }); }; s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0; @@ -519,7 +520,7 @@ pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u pub fn installDir(s: *Step, dest_path: []const u8) !Io.Dir.MakePathStatus { const b = s.owner; try handleVerbose(b, null, &.{ "install", "-d", dest_path }); - return std.fs.cwd().makePathStatus(dest_path) catch |err| + return Io.Dir.cwd().makePathStatus(dest_path) catch |err| return s.fail("unable to create dir '{s}': {t}", .{ dest_path, err }); } @@ -895,7 +896,7 @@ pub fn addWatchInput(step: *Step, lazy_file: Build.LazyPath) Allocator.Error!voi try addWatchInputFromPath(step, .{ .root_dir = .{ .path = null, - .handle = std.fs.cwd(), + .handle = Io.Dir.cwd(), }, .sub_path = std.fs.path.dirname(path_string) orelse "", }, std.fs.path.basename(path_string)); @@ -920,7 +921,7 @@ pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Alloc try addDirectoryWatchInputFromPath(step, .{ .root_dir = .{ .path = null, - .handle = std.fs.cwd(), + .handle = Io.Dir.cwd(), }, .sub_path = path_string, }); diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index efeedc8b80..560b6ad050 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -3,7 +3,9 @@ //! TODO: generalize the code in std.testing.expectEqualStrings and make this //! CheckFile step produce those helpful diagnostics when there is not a match. const CheckFile = @This(); + const std = @import("std"); +const Io = std.Io; const Step = std.Build.Step; const fs = std.fs; const mem = std.mem; @@ -53,7 +55,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try step.singleUnchangingWatchInput(check_file.source); const src_path = check_file.source.getPath2(b, step); - const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index df2419764d..ea7d9d99ff 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -1,5 +1,7 @@ -const std = @import("std"); const ConfigHeader = @This(); + +const std = @import("std"); +const Io = std.Io; const Step = std.Build.Step; const Allocator = std.mem.Allocator; const Writer = std.Io.Writer; @@ -205,7 +207,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .autoconf_undef, .autoconf_at => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read autoconf input file '{s}': {s}", .{ src_path, @errorName(err), }); @@ -219,7 +221,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cmake => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read cmake input file '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 9f5665e93a..1416e0e916 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -1,12 +1,13 @@ -const std = @import("std"); +const Options = @This(); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const fs = std.fs; const Step = std.Build.Step; const GeneratedFile = std.Build.GeneratedFile; const LazyPath = std.Build.LazyPath; -const Options = @This(); - pub const base_id: Step.Id = .options; step: Step, @@ -542,11 +543,11 @@ test Options { .cache = .{ .io = io, .gpa = arena.allocator(), - .manifest_dir = std.fs.cwd(), + .manifest_dir = Io.Dir.cwd(), }, .zig_exe = "test", .env_map = std.process.EnvMap.init(arena.allocator()), - .global_cache_root = .{ .path = "test", .handle = std.fs.cwd() }, + .global_cache_root = .{ .path = "test", .handle = Io.Dir.cwd() }, .host = .{ .query = .{}, .result = try std.zig.system.resolveTargetQuery(io, .{}), @@ -557,8 +558,8 @@ test Options { var builder = try std.Build.create( &graph, - .{ .path = "test", .handle = std.fs.cwd() }, - .{ .path = "test", .handle = std.fs.cwd() }, + .{ .path = "test", .handle = Io.Dir.cwd() }, + .{ .path = "test", .handle = Io.Dir.cwd() }, &.{}, ); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 7c54c8048e..af6bc20438 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1023,7 +1023,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, options, null); - const dep_file_dir = std.fs.cwd(); + const dep_file_dir = Io.Dir.cwd(); const dep_file_basename = dep_output_file.generated_file.getPath2(b, step); if (has_side_effects) try man.addDepFile(dep_file_dir, dep_file_basename) diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index ff06ad3ff3..f7ac47961e 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -122,7 +122,7 @@ const Os = switch (builtin.os.tag) { }) catch return error.NameTooLong; const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); - try posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID); + try posix.name_to_handle_at(path.root_dir.handle.handle, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID); const stack_lfh: FileHandle = .{ .handle = stack_ptr }; return stack_lfh.clone(gpa); } @@ -222,7 +222,7 @@ const Os = switch (builtin.os.tag) { posix.fanotify_mark(fan_fd, .{ .ADD = true, .ONLYDIR = true, - }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { + }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| { fatal("unable to watch {f}: {s}", .{ path, @errorName(err) }); }; } @@ -275,7 +275,7 @@ const Os = switch (builtin.os.tag) { posix.fanotify_mark(fan_fd, .{ .REMOVE = true, .ONLYDIR = true, - }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { + }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| switch (err) { error.FileNotFound => {}, // Expected, harmless. else => |e| std.log.warn("unable to unwatch '{f}': {s}", .{ path, @errorName(e) }), }; @@ -353,7 +353,7 @@ const Os = switch (builtin.os.tag) { // The following code is a drawn out NtCreateFile call. (mostly adapted from Io.Dir.makeOpenDirAccessMaskW) // It's necessary in order to get the specific flags that are required when calling ReadDirectoryChangesW. var dir_handle: windows.HANDLE = undefined; - const root_fd = path.root_dir.handle.fd; + const root_fd = path.root_dir.handle.handle; const sub_path = path.subPathOrDot(); const sub_path_w = try windows.sliceToPrefixedFileW(root_fd, sub_path); const path_len_bytes = std.math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong; @@ -681,9 +681,9 @@ const Os = switch (builtin.os.tag) { if (!gop.found_existing) { const skip_open_dir = path.sub_path.len == 0; const dir_fd = if (skip_open_dir) - path.root_dir.handle.fd + path.root_dir.handle.handle else - posix.openat(path.root_dir.handle.fd, path.sub_path, dir_open_flags, 0) catch |err| { + posix.openat(path.root_dir.handle.handle, path.sub_path, dir_open_flags, 0) catch |err| { fatal("failed to open directory {f}: {s}", .{ path, @errorName(err) }); }; // Empirically the dir has to stay open or else no events are triggered. @@ -750,7 +750,7 @@ const Os = switch (builtin.os.tag) { // to access that data via the dir_fd field. const path = w.dir_table.keys()[i]; const dir_fd = if (path.sub_path.len == 0) - path.root_dir.handle.fd + path.root_dir.handle.handle else handles.items(.dir_fd)[i]; assert(dir_fd != -1); @@ -761,7 +761,7 @@ const Os = switch (builtin.os.tag) { const last_dir_fd = fd: { const last_path = w.dir_table.keys()[handles.len - 1]; const last_dir_fd = if (last_path.sub_path.len == 0) - last_path.root_dir.handle.fd + last_path.root_dir.handle.handle else handles.items(.dir_fd)[handles.len - 1]; assert(last_dir_fd != -1); diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 8e71f648e2..a9b4775772 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -527,6 +527,14 @@ pub fn writerStreaming(file: File, io: Io, buffer: []u8) Writer { return .initStreaming(file, io, buffer); } +/// Equivalent to creating a streaming writer, writing `bytes`, and then flushing. +pub fn writeStreamingAll(file: File, io: Io, bytes: []const u8) Writer.Error!void { + var index: usize = 0; + while (index < bytes.len) { + index += try io.vtable.fileWriteStreaming(io.userdata, file, &.{}, &.{bytes[index..]}, 1); + } +} + pub const LockError = error{ SystemResources, FileLocksUnsupported, diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index fb76002201..124f886515 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2361,7 +2361,7 @@ fn dirCreateFilePosix( .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when accessing procfs. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -2670,7 +2670,7 @@ fn dirOpenFilePosix( .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when opening procfs files. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -3287,7 +3287,7 @@ fn dirRealPathPosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, out_b .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when accessing procfs. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -5548,7 +5548,6 @@ fn fileReadStreamingPosix(userdata: ?*anyopaque, file: File, data: [][]u8) File. switch (e) { .INVAL => |err| return errnoBug(err), .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => |err| { if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory. @@ -5672,7 +5671,6 @@ fn fileReadPositionalPosix(userdata: ?*anyopaque, file: File, data: [][]u8, offs switch (e) { .INVAL => |err| return errnoBug(err), .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => |err| { if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory. @@ -6312,7 +6310,6 @@ fn fileWriteStreaming( switch (e) { .INVAL => return error.InvalidArgument, .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => |err| return errnoBug(err), // `connect` was never called. diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig index 3f25bc6a26..63ae6d93a0 100644 --- a/lib/std/Io/Writer.zig +++ b/lib/std/Io/Writer.zig @@ -2835,7 +2835,7 @@ test "discarding sendFile" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [256]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); @@ -2857,7 +2857,7 @@ test "allocating sendFile" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); @@ -2881,7 +2881,7 @@ test sendFileReading { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig index 5818f6c3f7..c9ed0d3284 100644 --- a/lib/std/Io/net/test.zig +++ b/lib/std/Io/net/test.zig @@ -278,7 +278,7 @@ test "listen on a unix socket, send bytes, receive bytes" { defer testing.allocator.free(socket_path); const socket_addr = try net.UnixAddress.init(socket_path); - defer std.fs.cwd().deleteFile(socket_path) catch {}; + defer Io.Dir.cwd().deleteFile(socket_path) catch {}; var server = try socket_addr.listen(io, .{}); defer server.socket.close(io); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 9f21fe50e7..e731dc18d7 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -27,7 +27,7 @@ test "write a file, read it, then delete it" { random.bytes(data[0..]); const tmp_file_name = "temp_test_file.txt"; { - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var file_writer = file.writer(&.{}); @@ -40,7 +40,7 @@ test "write a file, read it, then delete it" { { // Make sure the exclusive flag is honored. - try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(tmp_file_name, .{ .exclusive = true })); + try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(io, tmp_file_name, .{ .exclusive = true })); } { @@ -70,7 +70,7 @@ test "File seek ops" { const io = testing.io; const tmp_file_name = "temp_test_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); try file.writeAll(&([_]u8{0x55} ** 8192)); @@ -96,7 +96,7 @@ test "setEndPos" { defer tmp.cleanup(); const tmp_file_name = "temp_test_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); // Verify that the file size changes and the file offset is not moved @@ -121,7 +121,7 @@ test "updateTimes" { defer tmp.cleanup(); const tmp_file_name = "just_a_temporary_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{ .read = true }); + var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true }); defer file.close(io); const stat_old = try file.stat(); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 102bb59415..8453bc4c81 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -208,7 +208,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); - const file = try std.fs.cwd().openFile(io, path, .{ .mode = .write_only }); + const file = try Io.Dir.cwd().openFile(io, path, .{ .mode = .write_only }); defer file.close(io); try file.writeAll(name); @@ -325,7 +325,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co var threaded: std.Io.Threaded = .init_single_threaded; const io = threaded.ioBasic(); - const file = try std.fs.cwd().openFile(io, path, .{}); + const file = try Io.Dir.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.readerStreaming(io, &.{}); diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index 473505ac51..444d8da675 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -19,7 +19,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanM _ = io; // TODO migrate file system to use std.Io for (keychain_paths) |keychain_path| { - const bytes = std.fs.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { + const bytes = Io.Dir.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { error.StreamTooLong => return error.FileTooBig, else => |e| return e, }; diff --git a/lib/std/crypto/codecs/asn1/test.zig b/lib/std/crypto/codecs/asn1/test.zig index ff854fcbde..3dbedb9f80 100644 --- a/lib/std/crypto/codecs/asn1/test.zig +++ b/lib/std/crypto/codecs/asn1/test.zig @@ -73,8 +73,8 @@ test AllTypes { try std.testing.expectEqualSlices(u8, encoded, buf); // Use this to update test file. - // const dir = try std.fs.cwd().openDir("lib/std/crypto/asn1", .{}); - // var file = try dir.createFile(path, .{}); + // const dir = try Io.Dir.cwd().openDir("lib/std/crypto/asn1", .{}); + // var file = try dir.createFile(io, path, .{}); // defer file.close(io); // try file.writeAll(buf); } diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 97741ecb40..5df0eef2d5 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -60,7 +60,7 @@ pub const cpu_context = @import("debug/cpu_context.zig"); /// }; /// /// Only required if `can_unwind == true`. Unwinds a single stack frame, returning the frame's /// /// return address, or 0 if the end of the stack has been reached. -/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) SelfInfoError!usize; +/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) SelfInfoError!usize; /// ``` pub const SelfInfo = if (@hasDecl(root, "debug") and @hasDecl(root.debug, "SelfInfo")) root.debug.SelfInfo @@ -558,9 +558,9 @@ pub fn defaultPanic( stderr.print("{s}\n", .{msg}) catch break :trace; if (@errorReturnTrace()) |t| if (t.index > 0) { - stderr.writeAll("error return context:\n") catch break :trace; + stderr.writeStreamingAll("error return context:\n") catch break :trace; writeStackTrace(t, stderr, tty_config) catch break :trace; - stderr.writeAll("\nstack trace:\n") catch break :trace; + stderr.writeStreamingAll("\nstack trace:\n") catch break :trace; }; writeCurrentStackTrace(.{ .first_address = first_trace_addr orelse @returnAddress(), @@ -575,7 +575,7 @@ pub fn defaultPanic( // A panic happened while trying to print a previous panic message. // We're still holding the mutex but that's fine as we're going to // call abort(). - File.stderr().writeAll("aborting due to recursive panic\n") catch {}; + File.stderr().writeStreamingAll("aborting due to recursive panic\n") catch {}; }, else => {}, // Panicked while printing the recursive panic message. } @@ -960,7 +960,7 @@ const StackIterator = union(enum) { }, }; - fn next(it: *StackIterator) Result { + fn next(it: *StackIterator, io: Io) Result { switch (it.*) { .ctx_first => |context_ptr| { // After the first frame, start actually unwinding. @@ -976,7 +976,7 @@ const StackIterator = union(enum) { .di => |*unwind_context| { const di = getSelfDebugInfo() catch unreachable; const di_gpa = getDebugInfoAllocator(); - const ret_addr = di.unwindFrame(di_gpa, unwind_context) catch |err| { + const ret_addr = di.unwindFrame(di_gpa, io, unwind_context) catch |err| { const pc = unwind_context.pc; const fp = unwind_context.getFp(); it.* = .{ .fp = fp }; @@ -1297,7 +1297,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("line_overlaps_page_boundary.zig", .{}); + const file = try test_dir.dir.createFile(io, "line_overlaps_page_boundary.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "line_overlaps_page_boundary.zig" }); defer gpa.free(path); @@ -1316,7 +1316,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("file_ends_on_page_boundary.zig", .{}); + const file = try test_dir.dir.createFile(io, "file_ends_on_page_boundary.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "file_ends_on_page_boundary.zig" }); defer gpa.free(path); @@ -1330,7 +1330,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("very_long_first_line_spanning_multiple_pages.zig", .{}); + const file = try test_dir.dir.createFile(io, "very_long_first_line_spanning_multiple_pages.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" }); defer gpa.free(path); @@ -1356,7 +1356,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("file_of_newlines.zig", .{}); + const file = try test_dir.dir.createFile(io, "file_of_newlines.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "file_of_newlines.zig" }); defer gpa.free(path); diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index a0f1188ade..203ee8effb 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -375,7 +375,7 @@ fn loadSeparateDebugFile( args: anytype, ) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { const path = try std.fmt.allocPrint(arena, fmt, args); - const elf_file = std.fs.cwd().openFile(io, path, .{}) catch return null; + const elf_file = Io.Dir.cwd().openFile(io, path, .{}) catch return null; defer elf_file.close(io); const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig index ae904c0aec..18126a1c29 100644 --- a/lib/std/debug/MachOFile.zig +++ b/lib/std/debug/MachOFile.zig @@ -512,7 +512,7 @@ fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { + const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 213389bf04..6ed18bcb80 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -29,13 +29,12 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void { } pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol { - _ = io; const module = try si.findModule(gpa, address, .exclusive); defer si.rwlock.unlock(); const vaddr = address - module.load_offset; - const loaded_elf = try module.getLoadedElf(gpa); + const loaded_elf = try module.getLoadedElf(gpa, io); if (loaded_elf.file.dwarf) |*dwarf| { if (!loaded_elf.scanned_dwarf) { dwarf.open(gpa, native_endian) catch |err| switch (err) { @@ -180,7 +179,7 @@ comptime { } } pub const UnwindContext = Dwarf.SelfUnwinder; -pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize { +pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) Error!usize { comptime assert(can_unwind); { @@ -201,7 +200,7 @@ pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error @memset(si.unwind_cache.?, .empty); } - const unwind_sections = try module.getUnwindSections(gpa); + const unwind_sections = try module.getUnwindSections(gpa, io); for (unwind_sections) |*unwind| { if (context.computeRules(gpa, unwind, module.load_offset, null)) |entry| { entry.populate(si.unwind_cache.?); @@ -261,12 +260,12 @@ const Module = struct { }; /// Assumes we already hold an exclusive lock. - fn getUnwindSections(mod: *Module, gpa: Allocator) Error![]Dwarf.Unwind { - if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa); + fn getUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error![]Dwarf.Unwind { + if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa, io); const us = &(mod.unwind.? catch |err| return err); return us.buf[0..us.len]; } - fn loadUnwindSections(mod: *Module, gpa: Allocator) Error!UnwindSections { + fn loadUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error!UnwindSections { var us: UnwindSections = .{ .buf = undefined, .len = 0, @@ -284,7 +283,7 @@ const Module = struct { } else { // There is no `.eh_frame_hdr` section. There may still be an `.eh_frame` or `.debug_frame` // section, but we'll have to load the binary to get at it. - const loaded = try mod.getLoadedElf(gpa); + const loaded = try mod.getLoadedElf(gpa, io); // If both are present, we can't just pick one -- the info could be split between them. // `.debug_frame` is likely to be the more complete section, so we'll prioritize that one. if (loaded.file.debug_frame) |*debug_frame| { @@ -325,7 +324,7 @@ const Module = struct { } fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf { const load_result = if (mod.name.len > 0) res: { - var file = std.fs.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; + var file = Io.Dir.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name)); } else res: { @@ -334,7 +333,7 @@ const Module = struct { else => return error.ReadFailed, }; defer gpa.free(path); - var file = std.fs.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; + var file = Io.Dir.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path)); }; diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 15da616f3b..db8e5334e6 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -616,7 +616,7 @@ test { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { + const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index f0ac30cca2..9874efd497 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -432,7 +432,7 @@ const Module = struct { break :pdb null; }; const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: { - break :res std.fs.cwd().openFile(io, path, .{}); + break :res Io.Dir.cwd().openFile(io, path, .{}); } else res: { const self_dir = std.process.executableDirPathAlloc(io, gpa) catch |err| switch (err) { error.OutOfMemory, error.Unexpected => |e| return e, @@ -441,7 +441,7 @@ const Module = struct { defer gpa.free(self_dir); const abs_path = try fs.path.join(gpa, &.{ self_dir, path }); defer gpa.free(abs_path); - break :res std.fs.cwd().openFile(io, abs_path, .{}); + break :res Io.Dir.cwd().openFile(io, abs_path, .{}); }; const pdb_file = pdb_file_open_result catch |err| switch (err) { error.FileNotFound, error.IsDir => break :pdb null, diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 7db177ad70..a1801d00d0 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -160,7 +160,7 @@ pub const ElfDynLib = struct { fn openPath(path: []const u8, io: Io) !Io.Dir { if (path.len == 0) return error.NotDir; var parts = std.mem.tokenizeScalar(u8, path, '/'); - var parent = if (path[0] == '/') try std.fs.cwd().openDir("/", .{}) else std.fs.cwd(); + var parent = if (path[0] == '/') try Io.Dir.cwd().openDir("/", .{}) else Io.Dir.cwd(); while (parts.next()) |part| { const child = try parent.openDir(part, .{}); parent.close(io); @@ -174,7 +174,7 @@ pub const ElfDynLib = struct { while (paths.next()) |p| { var dir = openPath(p) catch continue; defer dir.close(io); - const fd = posix.openat(dir.fd, file_name, .{ + const fd = posix.openat(dir.handle, file_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true, }, 0) catch continue; @@ -184,9 +184,9 @@ pub const ElfDynLib = struct { } fn resolveFromParent(io: Io, dir_path: []const u8, file_name: []const u8) ?posix.fd_t { - var dir = std.fs.cwd().openDir(dir_path, .{}) catch return null; + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch return null; defer dir.close(io); - return posix.openat(dir.fd, file_name, .{ + return posix.openat(dir.handle, file_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true, }, 0) catch null; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index f4bdecf89d..aab86d40a6 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -46,7 +46,7 @@ const PathType = enum { // The final path may not actually exist which would cause realpath to fail. // So instead, we get the path of the dir and join it with the relative path. var fd_path_buf: [fs.max_path_bytes]u8 = undefined; - const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf); + const dir_path = try std.os.getFdPath(dir.handle, &fd_path_buf); return fs.path.joinZ(allocator, &.{ dir_path, relative_path }); } }.transform, @@ -55,7 +55,7 @@ const PathType = enum { // Any drive absolute path (C:\foo) can be converted into a UNC path by // using '127.0.0.1' as the server name and '$' as the share name. var fd_path_buf: [fs.max_path_bytes]u8 = undefined; - const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf); + const dir_path = try std.os.getFdPath(dir.handle, &fd_path_buf); const windows_path_type = windows.getWin32PathType(u8, dir_path); switch (windows_path_type) { .unc_absolute => return fs.path.joinZ(allocator, &.{ dir_path, relative_path }), @@ -256,7 +256,7 @@ fn testReadLinkW(allocator: mem.Allocator, dir: Dir, target_path: []const u8, sy const target_path_w = try std.unicode.wtf8ToWtf16LeAlloc(allocator, target_path); defer allocator.free(target_path_w); // Calling the W functions directly requires the path to be NT-prefixed - const symlink_path_w = try std.os.windows.sliceToPrefixedFileW(dir.fd, symlink_path); + const symlink_path_w = try std.os.windows.sliceToPrefixedFileW(dir.handle, symlink_path); const wtf16_buffer = try allocator.alloc(u16, target_path_w.len); defer allocator.free(wtf16_buffer); const actual = try dir.readLinkW(symlink_path_w.span(), wtf16_buffer); @@ -288,9 +288,11 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { var symlink: Dir = switch (builtin.target.os.tag) { .windows => windows_symlink: { - const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.fd, "symlink"); + const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.handle, "symlink"); - var handle: windows.HANDLE = undefined; + var result: Dir = .{ + .handle = undefined, + }; const path_len_bytes = @as(u16, @intCast(sub_path_w.span().len * 2)); var nt_name = windows.UNICODE_STRING{ @@ -300,26 +302,16 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { }; var attr: windows.OBJECT_ATTRIBUTES = .{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), - .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.fd, - .Attributes = .{}, + .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.handle, + .Attributes = 0, .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var io_status_block: windows.IO_STATUS_BLOCK = undefined; const rc = windows.ntdll.NtCreateFile( - &handle, - .{ - .SPECIFIC = .{ .FILE_DIRECTORY = .{ - .READ_EA = true, - .TRAVERSE = true, - .READ_ATTRIBUTES = true, - } }, - .STANDARD = .{ - .RIGHTS = .READ, - .SYNCHRONIZE = true, - }, - }, + &result.handle, + windows.STANDARD_RIGHTS_READ | windows.FILE_READ_ATTRIBUTES | windows.FILE_READ_EA | windows.SYNCHRONIZE | windows.FILE_TRAVERSE, &attr, &io_status_block, null, @@ -337,7 +329,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { ); switch (rc) { - .SUCCESS => break :windows_symlink .{ .fd = handle }, + .SUCCESS => break :windows_symlink .{ .fd = result.handle }, else => return windows.unexpectedStatus(rc), } }, @@ -351,8 +343,8 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { .ACCMODE = .RDONLY, .CLOEXEC = true, }; - const fd = try posix.openatZ(ctx.dir.fd, &sub_path_c, flags, 0); - break :linux_symlink Dir{ .fd = fd }; + const fd = try posix.openatZ(ctx.dir.handle, &sub_path_c, flags, 0); + break :linux_symlink .{ .handle = fd }; }, else => unreachable, }; @@ -456,7 +448,7 @@ test "openDirAbsolute" { test "openDir cwd parent '..'" { const io = testing.io; - var dir = fs.cwd().openDir("..", .{}) catch |err| { + var dir = Io.Dir.cwd().openDir("..", .{}) catch |err| { if (native_os == .wasi and err == error.PermissionDenied) { return; // This is okay. WASI disallows escaping from the fs sandbox } @@ -534,7 +526,7 @@ test "Dir.Iterator" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -570,7 +562,7 @@ test "Dir.Iterator many entries" { var buf: [4]u8 = undefined; // Enough to store "1024". while (i < num) : (i += 1) { const name = try std.fmt.bufPrint(&buf, "{}", .{i}); - const file = try tmp_dir.dir.createFile(name, .{}); + const file = try tmp_dir.dir.createFile(io, name, .{}); file.close(io); } @@ -603,7 +595,7 @@ test "Dir.Iterator twice" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -638,7 +630,7 @@ test "Dir.Iterator reset" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -769,7 +761,7 @@ test "readFileAlloc" { var tmp_dir = tmpDir(.{}); defer tmp_dir.cleanup(); - var file = try tmp_dir.dir.createFile("test_file", .{ .read = true }); + var file = try tmp_dir.dir.createFile(io, "test_file", .{ .read = true }); defer file.close(io); const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024)); @@ -843,7 +835,7 @@ test "directory operations on files" { const test_file_name = try ctx.transformPath("test_file"); - var file = try ctx.dir.createFile(test_file_name, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try testing.expectError(error.PathAlreadyExists, ctx.dir.makeDir(test_file_name)); @@ -876,7 +868,7 @@ test "file operations on directories" { try ctx.dir.makeDir(test_dir_name); - try testing.expectError(error.IsDir, ctx.dir.createFile(test_dir_name, .{})); + try testing.expectError(error.IsDir, ctx.dir.createFile(io, test_dir_name, .{})); try testing.expectError(error.IsDir, ctx.dir.deleteFile(test_dir_name)); switch (native_os) { .dragonfly, .netbsd => { @@ -969,7 +961,7 @@ test "Dir.rename files" { // Renaming files const test_file_name = try ctx.transformPath("test_file"); const renamed_test_file_name = try ctx.transformPath("test_file_renamed"); - var file = try ctx.dir.createFile(test_file_name, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try ctx.dir.rename(test_file_name, renamed_test_file_name); @@ -983,7 +975,7 @@ test "Dir.rename files" { // Rename to existing file succeeds const existing_file_path = try ctx.transformPath("existing_file"); - var existing_file = try ctx.dir.createFile(existing_file_path, .{ .read = true }); + var existing_file = try ctx.dir.createFile(io, existing_file_path, .{ .read = true }); existing_file.close(io); try ctx.dir.rename(renamed_test_file_name, existing_file_path); @@ -1017,7 +1009,7 @@ test "Dir.rename directories" { var dir = try ctx.dir.openDir(test_dir_renamed_path, .{}); // Put a file in the directory - var file = try dir.createFile("test_file", .{ .read = true }); + var file = try dir.createFile(io, "test_file", .{ .read = true }); file.close(io); dir.close(io); @@ -1070,7 +1062,7 @@ test "Dir.rename directory onto non-empty dir" { try ctx.dir.makeDir(test_dir_path); var target_dir = try ctx.dir.makeOpenPath(target_dir_path, .{}); - var file = try target_dir.createFile("test_file", .{ .read = true }); + var file = try target_dir.createFile(io, "test_file", .{ .read = true }); file.close(io); target_dir.close(io); @@ -1094,7 +1086,7 @@ test "Dir.rename file <-> dir" { const test_file_path = try ctx.transformPath("test_file"); const test_dir_path = try ctx.transformPath("test_dir"); - var file = try ctx.dir.createFile(test_file_path, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_path, .{ .read = true }); file.close(io); try ctx.dir.makeDir(test_dir_path); try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path)); @@ -1115,7 +1107,7 @@ test "rename" { // Renaming files const test_file_name = "test_file"; const renamed_test_file_name = "test_file_renamed"; - var file = try tmp_dir1.dir.createFile(test_file_name, .{ .read = true }); + var file = try tmp_dir1.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); @@ -1149,7 +1141,7 @@ test "renameAbsolute" { // Renaming files const test_file_name = "test_file"; const renamed_test_file_name = "test_file_renamed"; - var file = try tmp_dir.dir.createFile(test_file_name, .{ .read = true }); + var file = try tmp_dir.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try fs.renameAbsolute( try fs.path.join(allocator, &.{ base_path, test_file_name }), @@ -1454,7 +1446,7 @@ test "writev, readv" { var write_vecs: [2][]const u8 = .{ line1, line2 }; var read_vecs: [2][]u8 = .{ &buf2, &buf1 }; - var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); var writer = src_file.writerStreaming(&.{}); @@ -1484,7 +1476,7 @@ test "pwritev, preadv" { var buf2: [line2.len]u8 = undefined; var read_vecs: [2][]u8 = .{ &buf2, &buf1 }; - var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); var writer = src_file.writer(&.{}); @@ -1584,14 +1576,14 @@ test "sendfile" { const line2 = "second line\n"; var vecs = [_][]const u8{ line1, line2 }; - var src_file = try dir.createFile("sendfile1.txt", .{ .read = true }); + var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); { var fw = src_file.writer(&.{}); try fw.interface.writeVecAll(&vecs); } - var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true }); + var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true }); defer dest_file.close(io); const header1 = "header1\n"; @@ -1627,12 +1619,12 @@ test "sendfile with buffered data" { var dir = try tmp.dir.openDir("os_test_tmp", .{}); defer dir.close(io); - var src_file = try dir.createFile("sendfile1.txt", .{ .read = true }); + var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); try src_file.writeAll("AAAABBBB"); - var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true }); + var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true }); defer dest_file.close(io); var src_buffer: [32]u8 = undefined; @@ -1718,10 +1710,10 @@ test "open file with exclusive nonblocking lock twice" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1735,10 +1727,10 @@ test "open file with shared and exclusive nonblocking lock" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1752,10 +1744,10 @@ test "open file with exclusive and shared nonblocking lock" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1769,13 +1761,13 @@ test "open file with exclusive lock twice, make sure second lock waits" { const io = ctx.io; const filename = try ctx.transformPath("file_lock_test.txt"); - const file = try ctx.dir.createFile(filename, .{ .lock = .exclusive }); + const file = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive }); errdefer file.close(io); const S = struct { fn checkFn(dir: *Io.Dir, path: []const u8, started: *std.Thread.ResetEvent, locked: *std.Thread.ResetEvent) !void { started.set(); - const file1 = try dir.createFile(path, .{ .lock = .exclusive }); + const file1 = try dir.createFile(io, path, .{ .lock = .exclusive }); locked.set(); file1.close(io); @@ -1847,13 +1839,13 @@ test "read from locked file" { const filename = try ctx.transformPath("read_lock_file_test.txt"); { - const f = try ctx.dir.createFile(filename, .{ .read = true }); + const f = try ctx.dir.createFile(io, filename, .{ .read = true }); defer f.close(io); var buffer: [1]u8 = undefined; _ = try f.read(&buffer); } { - const f = try ctx.dir.createFile(filename, .{ + const f = try ctx.dir.createFile(io, filename, .{ .read = true, .lock = .exclusive, }); @@ -2037,7 +2029,7 @@ test "'.' and '..' in Io.Dir functions" { var created_subdir = try ctx.dir.openDir(subdir_path, .{}); created_subdir.close(io); - const created_file = try ctx.dir.createFile(file_path, .{}); + const created_file = try ctx.dir.createFile(io, file_path, .{}); created_file.close(io); try ctx.dir.access(file_path, .{}); @@ -2103,7 +2095,7 @@ test "chmod" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile("test_file", .{ .mode = 0o600 }); + const file = try tmp.dir.createFile(io, "test_file", .{ .mode = 0o600 }); defer file.close(io); try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat()).mode & 0o7777); @@ -2127,7 +2119,7 @@ test "chown" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile("test_file", .{}); + const file = try tmp.dir.createFile(io, "test_file", .{}); defer file.close(io); try file.chown(null, null); @@ -2228,7 +2220,7 @@ test "read file non vectored" { const contents = "hello, world!\n"; - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); { var file_writer: File.Writer = .init(file, &.{}); @@ -2260,7 +2252,7 @@ test "seek keeping partial buffer" { const contents = "0123456789"; - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); { var file_writer: File.Writer = .init(file, &.{}); @@ -2321,7 +2313,7 @@ test "seekTo flushes buffered data" { const contents = "data"; - const file = try tmp.dir.createFile("seek.bin", .{ .read = true }); + const file = try tmp.dir.createFile(io, "seek.bin", .{ .read = true }); defer file.close(io); { var buf: [16]u8 = undefined; @@ -2350,7 +2342,7 @@ test "File.Writer sendfile with buffered contents" { try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); - const out = try tmp_dir.dir.createFile("b", .{}); + const out = try tmp_dir.dir.createFile(io, "b", .{}); defer out.close(io); var in_buf: [2]u8 = undefined; @@ -2397,7 +2389,7 @@ test "readlinkat" { // create a symbolic link if (native_os == .windows) { std.os.windows.CreateSymbolicLink( - tmp.dir.fd, + tmp.dir.handle, &[_]u16{ 'l', 'i', 'n', 'k' }, &[_:0]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' }, false, @@ -2407,7 +2399,7 @@ test "readlinkat" { else => return err, }; } else { - try posix.symlinkat("file.txt", tmp.dir.fd, "link"); + try posix.symlinkat("file.txt", tmp.dir.handle, "link"); } // read the link diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index c7d3f35d40..0972a302da 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -1991,7 +1991,7 @@ test "writev/fsync/readv" { defer tmp.cleanup(); const path = "test_io_uring_writev_fsync_readv"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2062,7 +2062,7 @@ test "write/read" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); const path = "test_io_uring_write_read"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2110,12 +2110,12 @@ test "splice/read" { var tmp = std.testing.tmpDir(.{}); const path_src = "test_io_uring_splice_src"; - const file_src = try tmp.dir.createFile(path_src, .{ .read = true, .truncate = true }); + const file_src = try tmp.dir.createFile(io, path_src, .{ .read = true, .truncate = true }); defer file_src.close(io); const fd_src = file_src.handle; const path_dst = "test_io_uring_splice_dst"; - const file_dst = try tmp.dir.createFile(path_dst, .{ .read = true, .truncate = true }); + const file_dst = try tmp.dir.createFile(io, path_dst, .{ .read = true, .truncate = true }); defer file_dst.close(io); const fd_dst = file_dst.handle; @@ -2185,7 +2185,7 @@ test "write_fixed/read_fixed" { defer tmp.cleanup(); const path = "test_io_uring_write_read_fixed"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2306,7 +2306,7 @@ test "close" { defer tmp.cleanup(); const path = "test_io_uring_close"; - const file = try tmp.dir.createFile(path, .{}); + const file = try tmp.dir.createFile(io, path, .{}); errdefer file.close(io); const sqe_close = try ring.close(0x44444444, file.handle); @@ -2652,7 +2652,7 @@ test "fallocate" { defer tmp.cleanup(); const path = "test_io_uring_fallocate"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try testing.expectEqual(@as(u64, 0), (try file.stat()).size); @@ -2699,7 +2699,7 @@ test "statx" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); const path = "test_io_uring_statx"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try testing.expectEqual(@as(u64, 0), (try file.stat()).size); @@ -2969,7 +2969,7 @@ test "renameat" { // Write old file with data - const old_file = try tmp.dir.createFile(old_path, .{ .truncate = true, .mode = 0o666 }); + const old_file = try tmp.dir.createFile(io, old_path, .{ .truncate = true, .mode = 0o666 }); defer old_file.close(io); try old_file.writeAll("hello"); @@ -3028,7 +3028,7 @@ test "unlinkat" { // Write old file with data - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); // Submit unlinkat @@ -3125,7 +3125,7 @@ test "symlinkat" { const path = "test_io_uring_symlinkat"; const link_path = "test_io_uring_symlinkat_link"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); // Submit symlinkat @@ -3177,7 +3177,7 @@ test "linkat" { // Write file with data - const first_file = try tmp.dir.createFile(first_path, .{ .truncate = true, .mode = 0o666 }); + const first_file = try tmp.dir.createFile(io, first_path, .{ .truncate = true, .mode = 0o666 }); defer first_file.close(io); try first_file.writeAll("hello"); diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index 39606ddfac..d7cfb4e138 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -18,7 +18,7 @@ test "fallocate" { defer tmp.cleanup(); const path = "test_fallocate"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try expect((try file.stat()).size == 0); @@ -85,7 +85,7 @@ test "statx" { defer tmp.cleanup(); const tmp_file_name = "just_a_temporary_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var buf: linux.Statx = undefined; @@ -121,7 +121,7 @@ test "fadvise" { defer tmp.cleanup(); const tmp_file_name = "temp_posix_fadvise.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var buf: [2048]u8 = undefined; diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 6b5b678b20..ba5282256f 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -4639,8 +4639,8 @@ pub fn wToPrefixedFileW(dir: ?HANDLE, path: [:0]const u16) Wtf16ToPrefixedFileWE break :path_to_get path; } // We can also skip GetFinalPathNameByHandle if the handle matches - // the handle returned by fs.cwd() - if (dir.? == std.fs.cwd().fd) { + // the handle returned by Io.Dir.cwd() + if (dir.? == Io.Dir.cwd().fd) { break :path_to_get path; } // At this point, we know we have a relative path that had too many diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 392987ec50..f4aa970413 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -15,15 +15,16 @@ //! deal with the exception. const builtin = @import("builtin"); -const root = @import("root"); +const native_os = builtin.os.tag; + const std = @import("std.zig"); +const Io = std.Io; const mem = std.mem; const fs = std.fs; -const max_path_bytes = fs.max_path_bytes; +const max_path_bytes = std.fs.max_path_bytes; const maxInt = std.math.maxInt; const cast = std.math.cast; const assert = std.debug.assert; -const native_os = builtin.os.tag; const page_size_min = std.heap.page_size_min; test { @@ -797,7 +798,6 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize { .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .CANCELED => return error.Canceled, .BADF => return error.NotOpenForReading, // Can be a race condition. @@ -917,7 +917,6 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. @@ -985,7 +984,8 @@ pub fn openZ(file_path: [*:0]const u8, flags: O, perm: mode_t) OpenError!fd_t { .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + // Can happen on Linux when opening procfs files. + .SRCH => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -1560,7 +1560,7 @@ pub fn mkdirZ(dir_path: [*:0]const u8, mode: mode_t) MakeDirError!void { pub fn mkdirW(dir_path_w: []const u16, mode: mode_t) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(dir_path_w, .{ - .dir = fs.cwd().fd, + .dir = Io.Dir.cwd().handle, .access_mask = .{ .STANDARD = .{ .SYNCHRONIZE = true }, .GENERIC = .{ .READ = true }, diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 0071a72a26..dc63be6e14 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -148,7 +148,7 @@ test "linkat with different directories" { try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "example" }); // Test 1: link from file in subdir back up to target in parent directory - try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0); + try posix.linkat(tmp.dir.handle, target_name, subdir.handle, link_name, 0); const efd = try tmp.dir.openFile(io, target_name, .{}); defer efd.close(io); @@ -164,7 +164,7 @@ test "linkat with different directories" { } // Test 2: remove link - try posix.unlinkat(subdir.fd, link_name, 0); + try posix.unlinkat(subdir.handle, link_name, 0); _, const elink = try getLinkInfo(efd.handle); try testing.expectEqual(@as(posix.nlink_t, 1), elink); } @@ -373,7 +373,7 @@ test "mmap" { // Create a file used for testing mmap() calls with a file descriptor { - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); var stream = file.writer(&.{}); @@ -444,7 +444,7 @@ test "fcntl" { const test_out_file = "os_tmp_test"; - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); // Note: The test assumes createFile opens the file with CLOEXEC @@ -495,7 +495,7 @@ test "fsync" { defer tmp.cleanup(); const test_out_file = "os_tmp_test"; - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); try posix.fsync(file.handle); @@ -617,7 +617,7 @@ test "dup & dup2" { defer tmp.cleanup(); { - var file = try tmp.dir.createFile("os_dup_test", .{}); + var file = try tmp.dir.createFile(io, "os_dup_test", .{}); defer file.close(io); var duped = Io.File{ .handle = try posix.dup(file.handle) }; @@ -659,7 +659,7 @@ test "writev longer than IOV_MAX" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pwritev", .{}); + var file = try tmp.dir.createFile(io, "pwritev", .{}); defer file.close(io); const iovecs = [_]posix.iovec_const{.{ .base = "a", .len = 1 }} ** (posix.IOV_MAX + 1); @@ -684,7 +684,7 @@ test "POSIX file locking with fcntl" { defer tmp.cleanup(); // Create a temporary lock file - var file = try tmp.dir.createFile("lock", .{ .read = true }); + var file = try tmp.dir.createFile(io, "lock", .{ .read = true }); defer file.close(io); try file.setEndPos(2); const fd = file.handle; @@ -881,7 +881,7 @@ test "isatty" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("foo", .{}); + var file = try tmp.dir.createFile(io, "foo", .{}); defer file.close(io); try expectEqual(posix.isatty(file.handle), false); @@ -893,7 +893,7 @@ test "pread with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pread_empty", .{ .read = true }); + var file = try tmp.dir.createFile(io, "pread_empty", .{ .read = true }); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -909,7 +909,7 @@ test "write with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("write_empty", .{}); + var file = try tmp.dir.createFile(io, "write_empty", .{}); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -925,7 +925,7 @@ test "pwrite with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pwrite_empty", .{}); + var file = try tmp.dir.createFile(io, "pwrite_empty", .{}); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -965,35 +965,35 @@ test "fchmodat smoke test" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - try expectError(error.FileNotFound, posix.fchmodat(tmp.dir.fd, "regfile", 0o666, 0)); + try expectError(error.FileNotFound, posix.fchmodat(tmp.dir.handle, "regfile", 0o666, 0)); const fd = try posix.openat( - tmp.dir.fd, + tmp.dir.handle, "regfile", .{ .ACCMODE = .WRONLY, .CREAT = true, .EXCL = true, .TRUNC = true }, 0o644, ); posix.close(fd); - try posix.symlinkat("regfile", tmp.dir.fd, "symlink"); - const sym_mode = try getFileMode(tmp.dir.fd, "symlink"); + try posix.symlinkat("regfile", tmp.dir.handle, "symlink"); + const sym_mode = try getFileMode(tmp.dir.handle, "symlink"); - try posix.fchmodat(tmp.dir.fd, "regfile", 0o640, 0); - try expectMode(tmp.dir.fd, "regfile", 0o640); - try posix.fchmodat(tmp.dir.fd, "regfile", 0o600, posix.AT.SYMLINK_NOFOLLOW); - try expectMode(tmp.dir.fd, "regfile", 0o600); + try posix.fchmodat(tmp.dir.handle, "regfile", 0o640, 0); + try expectMode(tmp.dir.handle, "regfile", 0o640); + try posix.fchmodat(tmp.dir.handle, "regfile", 0o600, posix.AT.SYMLINK_NOFOLLOW); + try expectMode(tmp.dir.handle, "regfile", 0o600); - try posix.fchmodat(tmp.dir.fd, "symlink", 0o640, 0); - try expectMode(tmp.dir.fd, "regfile", 0o640); - try expectMode(tmp.dir.fd, "symlink", sym_mode); + try posix.fchmodat(tmp.dir.handle, "symlink", 0o640, 0); + try expectMode(tmp.dir.handle, "regfile", 0o640); + try expectMode(tmp.dir.handle, "symlink", sym_mode); var test_link = true; - posix.fchmodat(tmp.dir.fd, "symlink", 0o600, posix.AT.SYMLINK_NOFOLLOW) catch |err| switch (err) { + posix.fchmodat(tmp.dir.handle, "symlink", 0o600, posix.AT.SYMLINK_NOFOLLOW) catch |err| switch (err) { error.OperationNotSupported => test_link = false, else => |e| return e, }; if (test_link) - try expectMode(tmp.dir.fd, "symlink", 0o600); - try expectMode(tmp.dir.fd, "regfile", 0o640); + try expectMode(tmp.dir.handle, "symlink", 0o600); + try expectMode(tmp.dir.handle, "regfile", 0o640); } const CommonOpenFlags = packed struct { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index b774303901..33faeef061 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -677,7 +677,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); if (self.cwd_dir) |cwd| { - posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); + posix.fchdir(cwd.handle) catch |err| forkChildErrReport(err_pipe[1], err); } else if (self.cwd) |cwd| { posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); } diff --git a/lib/std/std.zig b/lib/std/std.zig index 106811859b..1690c0575c 100644 --- a/lib/std/std.zig +++ b/lib/std/std.zig @@ -114,7 +114,7 @@ pub const options: Options = if (@hasDecl(root, "std_options")) root.std_options pub const Options = struct { enable_segfault_handler: bool = debug.default_enable_segfault_handler, - /// Function used to implement `std.fs.cwd` for WASI. + /// Function used to implement `std.Io.Dir.cwd` for WASI. wasiCwd: fn () os.wasi.fd_t = os.defaultWasiCwd, /// The current log level. diff --git a/lib/std/tar.zig b/lib/std/tar.zig index d861314fec..8a0bbb342f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -610,7 +610,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } }, .file => { - if (createDirAndFile(dir, file_name, fileMode(file.mode, options))) |fs_file| { + if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| { defer fs_file.close(io); var file_writer = fs_file.writer(&file_contents_buffer); try it.streamRemaining(file, &file_writer.interface); @@ -638,12 +638,12 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } } -fn createDirAndFile(dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { - const fs_file = dir.createFile(file_name, .{ .exclusive = true, .mode = mode }) catch |err| { +fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { + const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }) catch |err| { if (err == error.FileNotFound) { if (std.fs.path.dirname(file_name)) |dir_name| { try dir.makePath(dir_name); - return try dir.createFile(file_name, .{ .exclusive = true, .mode = mode }); + return try dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }); } } return err; @@ -880,9 +880,9 @@ test "create file and symlink" { var root = testing.tmpDir(.{}); defer root.cleanup(); - var file = try createDirAndFile(root.dir, "file1", default_mode); + var file = try createDirAndFile(io, root.dir, "file1", default_mode); file.close(io); - file = try createDirAndFile(root.dir, "a/b/c/file2", default_mode); + file = try createDirAndFile(io, root.dir, "a/b/c/file2", default_mode); file.close(io); createDirAndSymlink(root.dir, "a/b/c/file2", "symlink1") catch |err| { @@ -894,7 +894,7 @@ test "create file and symlink" { // Danglink symlnik, file created later try createDirAndSymlink(root.dir, "../../../g/h/i/file4", "j/k/l/symlink3"); - file = try createDirAndFile(root.dir, "g/h/i/file4", default_mode); + file = try createDirAndFile(io, root.dir, "g/h/i/file4", default_mode); file.close(io); } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 19038543a6..99d67ec132 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -628,7 +628,7 @@ pub fn tmpDir(opts: Io.Dir.OpenOptions) TmpDir { var sub_path: [TmpDir.sub_path_len]u8 = undefined; _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes); - const cwd = std.fs.cwd(); + const cwd = Io.Dir.cwd(); var cache_dir = cwd.makeOpenPath(".zig-cache", .{}) catch @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir"); defer cache_dir.close(io); diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig index c8bde2ab02..80317850df 100644 --- a/lib/std/zig/LibCInstallation.zig +++ b/lib/std/zig/LibCInstallation.zig @@ -57,7 +57,7 @@ pub fn parse( } } - const contents = try std.fs.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); + const contents = try Io.Dir.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); defer allocator.free(contents); var it = std.mem.tokenizeScalar(u8, contents, '\n'); @@ -337,7 +337,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F // search in reverse order const search_path_untrimmed = search_paths.items[search_paths.items.len - path_i - 1]; const search_path = std.mem.trimStart(u8, search_path_untrimmed, " "); - var search_dir = fs.cwd().openDir(search_path, .{}) catch |err| switch (err) { + var search_dir = Io.Dir.cwd().openDir(search_path, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -392,7 +392,7 @@ fn findNativeIncludeDirWindows( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -440,7 +440,7 @@ fn findNativeCrtDirWindows( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -508,7 +508,7 @@ fn findNativeKernel32LibDir( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Lib\\{s}\\um\\{s}", .{ install.path, install.version, arch_sub_dir }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -544,7 +544,7 @@ fn findNativeMsvcIncludeDir( const dir_path = try fs.path.join(allocator, &[_][]const u8{ up2, "include" }); errdefer allocator.free(dir_path); - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index 6b6e4fa9f7..dca474020a 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -828,7 +828,7 @@ const MsvcLibDir = struct { try lib_dir_buf.appendSlice("VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); var default_tools_version_buf: [512]u8 = undefined; - const default_tools_version_contents = std.fs.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch { + const default_tools_version_contents = Io.Dir.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch { return error.PathNotFound; }; var tokenizer = std.mem.tokenizeAny(u8, default_tools_version_contents, " \r\n"); diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index d3bafc16f2..9fa0546c3b 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -1,11 +1,12 @@ const builtin = @import("builtin"); +const native_endian = builtin.cpu.arch.endian(); + const std = @import("../std.zig"); const mem = std.mem; const elf = std.elf; const fs = std.fs; const assert = std.debug.assert; const Target = std.Target; -const native_endian = builtin.cpu.arch.endian(); const posix = std.posix; const Io = std.Io; @@ -69,7 +70,7 @@ pub fn getExternalExecutor( if (os_match and cpu_ok) native: { if (options.link_libc) { if (candidate.dynamic_linker.get()) |candidate_dl| { - fs.cwd().access(candidate_dl, .{}) catch { + Io.Dir.cwd().access(candidate_dl, .{}) catch { bad_result = .{ .bad_dl = candidate_dl }; break :native; }; @@ -710,6 +711,7 @@ fn abiAndDynamicLinkerFromFile( error.SystemResources, error.FileSystem, error.SymLinkLoop, + error.Canceled, error.Unexpected, => |e| return e, }; diff --git a/lib/std/zig/system/darwin/macos.zig b/lib/std/zig/system/darwin/macos.zig index 9bb4e34e3b..4ff6846a09 100644 --- a/lib/std/zig/system/darwin/macos.zig +++ b/lib/std/zig/system/darwin/macos.zig @@ -1,9 +1,10 @@ -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const mem = std.mem; const testing = std.testing; - const Target = std.Target; /// Detect macOS version. @@ -54,7 +55,7 @@ pub fn detect(target_os: *Target.Os) !void { // approx. 4 times historical file size var buf: [2048]u8 = undefined; - if (std.fs.cwd().readFile(path, &buf)) |bytes| { + if (Io.Dir.cwd().readFile(path, &buf)) |bytes| { if (parseSystemVersion(bytes)) |ver| { // never return non-canonical `10.(16+)` if (!(ver.major == 10 and ver.minor >= 16)) { diff --git a/lib/std/zip.zig b/lib/std/zip.zig index 0ca77c98a1..9d08847092 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -564,9 +564,9 @@ pub const Iterator = struct { defer parent_dir.close(io); const basename = std.fs.path.basename(filename); - break :blk try parent_dir.createFile(basename, .{ .exclusive = true }); + break :blk try parent_dir.createFile(io, basename, .{ .exclusive = true }); } - break :blk try dest.createFile(filename, .{ .exclusive = true }); + break :blk try dest.createFile(io, filename, .{ .exclusive = true }); }; defer out_file.close(io); var out_file_buffer: [1024]u8 = undefined; diff --git a/src/Compilation.zig b/src/Compilation.zig index 24b994f608..5f15ef5f74 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -450,7 +450,7 @@ pub const Path = struct { const dir = switch (p.root) { .none => { const cwd_sub_path = absToCwdRelative(p.sub_path, dirs.cwd); - return .{ fs.cwd(), cwd_sub_path }; + return .{ Io.Dir.cwd(), cwd_sub_path }; }, .zig_lib => dirs.zig_lib.handle, .global_cache => dirs.global_cache.handle, @@ -723,7 +723,7 @@ pub const Directories = struct { pub fn deinit(dirs: *Directories, io: Io) void { // The local and global caches could be the same. - const close_local = dirs.local_cache.handle.fd != dirs.global_cache.handle.fd; + const close_local = dirs.local_cache.handle.handle != dirs.global_cache.handle.handle; dirs.global_cache.handle.close(io); if (close_local) dirs.local_cache.handle.close(io); @@ -814,7 +814,7 @@ pub const Directories = struct { return .{ .path = if (std.mem.eql(u8, name, ".")) null else name, .handle = .{ - .fd = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}), + .handle = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}), }, }; } @@ -824,8 +824,8 @@ pub const Directories = struct { }; const nonempty_path = if (path.len == 0) "." else path; const handle_or_err = switch (thing) { - .@"zig lib" => fs.cwd().openDir(nonempty_path, .{}), - .@"global cache", .@"local cache" => fs.cwd().makeOpenPath(nonempty_path, .{}), + .@"zig lib" => Io.Dir.cwd().openDir(nonempty_path, .{}), + .@"global cache", .@"local cache" => Io.Dir.cwd().makeOpenPath(nonempty_path, .{}), }; return .{ .path = if (path.len == 0) null else path, @@ -1104,7 +1104,7 @@ pub const CObject = struct { const source_line = source_line: { if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0; - const file = fs.cwd().openFile(io, file_name, .{}) catch break :source_line 0; + const file = Io.Dir.cwd().openFile(io, file_name, .{}) catch break :source_line 0; defer file.close(io); var buffer: [1024]u8 = undefined; var file_reader = file.reader(io, &buffer); @@ -1179,7 +1179,7 @@ pub const CObject = struct { }; var buffer: [1024]u8 = undefined; - const file = try fs.cwd().openFile(io, path, .{}); + const file = try Io.Dir.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.reader(io, &buffer); var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface }); @@ -2109,7 +2109,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, }, }; // These correspond to std.zig.Server.Message.PathPrefix. - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(options.dirs.zig_lib); cache.addPrefix(options.dirs.local_cache); cache.addPrefix(options.dirs.global_cache); @@ -5220,7 +5220,7 @@ fn createDepFile( binfile: Cache.Path, ) anyerror!void { var buf: [4096]u8 = undefined; - var af = try std.fs.cwd().atomicFile(depfile, .{ .write_buffer = &buf }); + var af = try Io.Dir.cwd().atomicFile(depfile, .{ .write_buffer = &buf }); defer af.deinit(); comp.writeDepFile(binfile, &af.file_writer.interface) catch return af.file_writer.err.?; @@ -5284,7 +5284,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { }; } - var tar_file = out_dir.createFile("sources.tar", .{}) catch |err| { + var tar_file = out_dir.createFile(io, "sources.tar", .{}) catch |err| { return comp.lockAndSetMiscFailure( .docs_copy, "unable to create '{f}/sources.tar': {s}", diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 3bd05120ff..8a30529bc5 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -383,14 +383,14 @@ pub fn run(f: *Fetch) RunError!void { }, .remote => |remote| remote, .path_or_url => |path_or_url| { - if (fs.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| { + if (Io.Dir.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| { var resource: Resource = .{ .dir = dir }; return f.runResource(path_or_url, &resource, null); } else |dir_err| { var server_header_buffer: [init_resource_buffer_size]u8 = undefined; const file_err = if (dir_err == error.NotDir) e: { - if (fs.cwd().openFile(io, path_or_url, .{})) |file| { + if (Io.Dir.cwd().openFile(io, path_or_url, .{})) |file| { var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) }; return f.runResource(path_or_url, &resource, null); } else |err| break :e err; @@ -1303,7 +1303,7 @@ fn unzip( const random_integer = std.crypto.random.int(u64); zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer); - break cache_root.handle.createFile(&zip_path, .{ + break cache_root.handle.createFile(io, &zip_path, .{ .exclusive = true, .read = true, }) catch |err| switch (err) { @@ -1365,7 +1365,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U { var pack_dir = try out_dir.makeOpenPath(".git", .{}); defer pack_dir.close(io); - var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true }); + var pack_file = try pack_dir.createFile(io, "pkg.pack", .{ .read = true }); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = b: { @@ -1376,7 +1376,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U break :b pack_file_writer.moveToReader(io); }; - var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true }); + var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); @@ -2235,7 +2235,7 @@ test "set executable bit based on file content" { fn saveEmbedFile(io: Io, comptime tarball_name: []const u8, dir: Io.Dir) !void { //const tarball_name = "duplicate_paths_excluded.tar.gz"; const tarball_content = @embedFile("Fetch/testdata/" ++ tarball_name); - var tmp_file = try dir.createFile(tarball_name, .{}); + var tmp_file = try dir.createFile(io, tarball_name, .{}); defer tmp_file.close(io); try tmp_file.writeAll(tarball_content); } diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index ccae9440e2..7b08a89cae 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -264,7 +264,7 @@ pub const Repository = struct { try repository.odb.seekOid(entry.oid); const file_object = try repository.odb.readObject(); if (file_object.type != .blob) return error.InvalidFile; - var file = dir.createFile(entry.name, .{ .exclusive = true }) catch |e| { + var file = dir.createFile(io, entry.name, .{ .exclusive = true }) catch |e| { const file_name = try std.fs.path.join(diagnostics.allocator, &.{ current_path, entry.name }); errdefer diagnostics.allocator.free(file_name); try diagnostics.errors.append(diagnostics.allocator, .{ .unable_to_create_file = .{ @@ -1584,14 +1584,14 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u var git_dir = testing.tmpDir(.{}); defer git_dir.cleanup(); - var pack_file = try git_dir.dir.createFile("testrepo.pack", .{ .read = true }); + var pack_file = try git_dir.dir.createFile(io, "testrepo.pack", .{ .read = true }); defer pack_file.close(io); try pack_file.writeAll(testrepo_pack); var pack_file_buffer: [2000]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); - var index_file = try git_dir.dir.createFile("testrepo.idx", .{ .read = true }); + var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); @@ -1714,20 +1714,20 @@ pub fn main() !void { const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat; - var pack_file = try std.fs.cwd().openFile(io, args[2], .{}); + var pack_file = try Io.Dir.cwd().openFile(io, args[2], .{}); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); const commit = try Oid.parse(format, args[3]); - var worktree = try std.fs.cwd().makeOpenPath(args[4], .{}); + var worktree = try Io.Dir.cwd().makeOpenPath(args[4], .{}); defer worktree.close(io); var git_dir = try worktree.makeOpenPath(".git", .{}); defer git_dir.close(io); std.debug.print("Starting index...\n", .{}); - var index_file = try git_dir.createFile("idx", .{ .read = true }); + var index_file = try git_dir.createFile(io, "idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [4096]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 45b1302138..9a75b2096e 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -170,7 +170,7 @@ pub fn updateFile( // version. Likewise if we're working on AstGen and another process asks for // the cached file, they'll get it. const cache_file = while (true) { - break zir_dir.createFile(&hex_digest, .{ + break zir_dir.createFile(io, &hex_digest, .{ .read = true, .truncate = false, .lock = lock, @@ -196,7 +196,7 @@ pub fn updateFile( cache_directory, }); } - break zir_dir.createFile(&hex_digest, .{ + break zir_dir.createFile(io, &hex_digest, .{ .read = true, .truncate = false, .lock = lock, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cb4fe0459f..4fc58c2c4b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1,19 +1,22 @@ -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const log = std.log.scoped(.codegen); const math = std.math; const DW = std.dwarf; - const Builder = std.zig.llvm.Builder; + +const build_options = @import("build_options"); const llvm = if (build_options.have_llvm) @import("llvm/bindings.zig") else @compileError("LLVM unavailable"); + const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); -const build_options = @import("build_options"); const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); @@ -964,7 +967,7 @@ pub const Object = struct { if (std.mem.eql(u8, path, "-")) { o.builder.dump(); } else { - o.builder.printToFilePath(std.fs.cwd(), path) catch |err| { + o.builder.printToFilePath(Io.Dir.cwd(), path) catch |err| { log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) }); }; } @@ -978,7 +981,7 @@ pub const Object = struct { o.builder.clearAndFree(); if (options.pre_bc_path) |path| { - var file = std.fs.cwd().createFile(path, .{}) catch |err| + var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(io); @@ -991,7 +994,7 @@ pub const Object = struct { options.post_ir_path == null and options.post_bc_path == null) return; if (options.post_bc_path) |path| { - var file = std.fs.cwd().createFile(path, .{}) catch |err| + var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(io); @@ -2711,7 +2714,7 @@ pub const Object = struct { } fn allocTypeName(o: *Object, pt: Zcu.PerThread, ty: Type) Allocator.Error![:0]const u8 { - var aw: std.Io.Writer.Allocating = .init(o.gpa); + var aw: Io.Writer.Allocating = .init(o.gpa); defer aw.deinit(); ty.print(&aw.writer, pt, null) catch |err| switch (err) { error.WriteFailed => return error.OutOfMemory, diff --git a/src/fmt.zig b/src/fmt.zig index ce8a31fa4c..36a3833986 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -182,11 +182,11 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! // Mark any excluded files/directories as already seen, // so that they are skipped later during actual processing for (excluded_files.items) |file_path| { - const stat = fs.cwd().statFile(file_path) catch |err| switch (err) { + const stat = Io.Dir.cwd().statFile(file_path) catch |err| switch (err) { error.FileNotFound => continue, // On Windows, statFile does not work for directories error.IsDir => dir: { - var dir = try fs.cwd().openDir(file_path, .{}); + var dir = try Io.Dir.cwd().openDir(file_path, .{}); defer dir.close(io); break :dir try dir.stat(); }, @@ -196,7 +196,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! } for (input_files.items) |file_path| { - try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path); + try fmtPath(&fmt, file_path, check_flag, Io.Dir.cwd(), file_path); } try fmt.stdout_writer.interface.flush(); if (fmt.any_error) { diff --git a/src/introspect.zig b/src/introspect.zig index d2faa9a55c..04ddf47e8a 100644 --- a/src/introspect.zig +++ b/src/introspect.zig @@ -82,7 +82,7 @@ pub fn findZigLibDirFromSelfExe( cwd_path: []const u8, self_exe_path: []const u8, ) error{ OutOfMemory, FileNotFound }!Cache.Directory { - const cwd = fs.cwd(); + const cwd = Io.Dir.cwd(); var cur_path: []const u8 = self_exe_path; while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) { var base_dir = cwd.openDir(dirname, .{}) catch continue; @@ -206,7 +206,7 @@ pub fn resolveSuitableLocalCacheDir(arena: Allocator, cwd: []const u8) Allocator var cur_dir = cwd; while (true) { const joined = try fs.path.join(arena, &.{ cur_dir, Package.build_zig_basename }); - if (fs.cwd().access(joined, .{})) |_| { + if (Io.Dir.cwd().access(joined, .{})) |_| { return try fs.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename }); } else |err| switch (err) { error.FileNotFound => { diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index 8c5e0afe4b..cfd8d5554c 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -446,7 +446,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -468,7 +468,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -986,7 +986,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -1014,7 +1014,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.getSoVersion(&target.os), + so_files.dir_path.sub_path, path.sep, lib.name, lib.getSoVersion(&target.os), }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index bec20ff3d4..e3d8ce1f7f 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -681,7 +681,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -703,7 +703,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -775,7 +775,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye try stubs_asm.appendSlice(".text\n"); var sym_i: usize = 0; - var sym_name_buf: std.Io.Writer.Allocating = .init(arena); + var sym_name_buf: Io.Writer.Allocating = .init(arena); var opt_symbol_name: ?[]const u8 = null; var versions_buffer: [32]u8 = undefined; var versions_len: usize = undefined; @@ -796,7 +796,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye // twice, which causes a "duplicate symbol" assembler error. var versions_written = std.AutoArrayHashMap(Version, void).init(arena); - var inc_reader: std.Io.Reader = .fixed(metadata.inclusions); + var inc_reader: Io.Reader = .fixed(metadata.inclusions); const fn_inclusions_len = try inc_reader.takeInt(u16, .little); @@ -1130,7 +1130,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -1156,7 +1156,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover, + so_files.dir_path.sub_path, path.sep, lib.name, lib.sover, }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index 005696e1fc..b3ca51e833 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -1,7 +1,8 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; -const path = std.fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const log = std.log.scoped(.mingw); @@ -259,7 +260,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -304,7 +305,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { .output = .{ .to_list = .{ .arena = .init(gpa) } }, }; defer diagnostics.deinit(); - var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, std.fs.cwd()); + var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, Io.Dir.cwd()); defer aro_comp.deinit(); aro_comp.target = .fromZigTarget(target.*); @@ -343,7 +344,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { } const members = members: { - var aw: std.Io.Writer.Allocating = .init(gpa); + var aw: Io.Writer.Allocating = .init(gpa); errdefer aw.deinit(); try pp.prettyPrintTokens(&aw.writer, .result_only); @@ -376,7 +377,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { errdefer gpa.free(lib_final_path); { - const lib_final_file = try o_dir.createFile(final_lib_basename, .{ .truncate = true }); + const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true }); defer lib_final_file.close(io); var buffer: [1024]u8 = undefined; var file_writer = lib_final_file.writer(&buffer); @@ -442,7 +443,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{ lib_path, lib_name }); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, @@ -459,7 +460,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{lib_name}); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, @@ -476,7 +477,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{lib_name}); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index 67e6a2f903..cb6a80d69d 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -387,7 +387,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -409,7 +409,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -640,7 +640,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -661,7 +661,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover, + so_files.dir_path.sub_path, path.sep, lib.name, lib.sover, }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/link/C.zig b/src/link/C.zig index 04c92443e5..a001f8fdd9 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -136,7 +136,7 @@ pub fn createEmpty( assert(!use_lld); assert(!use_llvm); - const file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ // Truncation is done on `flush`. .truncate = false, }); @@ -792,7 +792,7 @@ pub fn flushEmitH(zcu: *Zcu) !void { } const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory; - const file = try directory.handle.createFile(emit_h.loc.basename, .{ + const file = try directory.handle.createFile(io, emit_h.loc.basename, .{ // We set the end position explicitly below; by not truncating the file, we possibly // make it easier on the file system by doing 1 reallocation instead of two. .truncate = false, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index e1d52fb7c4..009e59ed0d 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -631,12 +631,14 @@ fn create( else => return error.UnsupportedCOFFArchitecture, }; + const io = comp.io; + const coff = try arena.create(Coff); - const file = try path.root_dir.handle.createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); coff.* = .{ .base = .{ .tag = .coff2, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 584a50c7f2..53812a37ec 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -313,9 +313,11 @@ pub fn createEmpty( const is_obj = output_mode == .Obj; const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static); + const io = comp.io; + // What path should this ELF linker code output to? const sub_path = emit.sub_path; - self.base.file = try emit.root_dir.handle.createFile(sub_path, .{ + self.base.file = try emit.root_dir.handle.createFile(io, sub_path, .{ .truncate = true, .read = true, .mode = link.File.determineMode(output_mode, link_mode), diff --git a/src/link/Lld.zig b/src/link/Lld.zig index 49f6d3f7c7..b25b9da9d9 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -1572,7 +1572,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void { // report a nice error here with the file path if it fails instead of // just returning the error code. // chmod does not interact with umask, so we use a conservative -rwxr--r-- here. - std.posix.fchmodat(fs.cwd().fd, full_out_path, 0o744, 0) catch |err| switch (err) { + std.posix.fchmodat(Io.Dir.cwd().handle, full_out_path, 0o744, 0) catch |err| switch (err) { error.OperationNotSupported => unreachable, // Not a symlink. else => |e| return e, }; @@ -1624,7 +1624,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi const rand_int = std.crypto.random.int(u64); const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp"; - const rsp_file = try comp.dirs.local_cache.handle.createFile(rsp_path, .{}); + const rsp_file = try comp.dirs.local_cache.handle.createFile(io, rsp_path, .{}); defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err| log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) }); { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e837cc853a..0f6127e10e 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -219,7 +219,9 @@ pub fn createEmpty( }; errdefer self.base.destroy(); - self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + self.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, .mode = link.File.determineMode(output_mode, link_mode), @@ -1082,7 +1084,7 @@ fn accessLibPath( test_path.clearRetainingCapacity(); try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1110,7 +1112,7 @@ fn accessFrameworkPath( ext, }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1191,7 +1193,7 @@ fn parseDependentDylibs(self: *MachO) !void { try test_path.print("{s}{s}", .{ path, ext }); } try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -3289,7 +3291,7 @@ pub fn reopenDebugInfo(self: *MachO) !void { var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{}); defer d_sym_bundle.close(io); - self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{ + self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{ .truncate = false, .read = true, }); @@ -4370,7 +4372,7 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi // The file/property is also available with vendored libc. fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 { const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" }); - const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); + const contents = try Io.Dir.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{}); if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string; return error.SdkVersionFailure; diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 5f9a9ecac9..814faf234a 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -247,7 +247,7 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void { } pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void { - const inner = try fs.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); + const inner = try Io.Dir.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); self.entitlements = .{ .inner = inner }; } diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 7e28dc0a8b..d13caaa315 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -33,6 +33,7 @@ pub fn createEmpty( options: link.File.OpenOptions, ) !*Linker { const gpa = comp.gpa; + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(!comp.config.use_lld); // Caught by Compilation.Config.resolve @@ -78,7 +79,7 @@ pub fn createEmpty( }; errdefer linker.deinit(); - linker.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + linker.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, }); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 7ab1e0eb4b..5f89625d56 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2997,7 +2997,9 @@ pub fn createEmpty( .named => |name| (try wasm.internString(name)).toOptional(), }; - wasm.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + wasm.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, .mode = if (fs.has_executable_bit) diff --git a/src/main.zig b/src/main.zig index b040b6c8ef..67b7384b57 100644 --- a/src/main.zig +++ b/src/main.zig @@ -713,7 +713,7 @@ const Emit = union(enum) { } else e: { // If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would. if (fs.path.dirname(path)) |dir_path| { - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| { fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) }); }; dir.close(io); @@ -3304,7 +3304,7 @@ fn buildOutputType( } else emit: { // If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would. if (fs.path.dirname(path)) |dir_path| { - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| { fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) }); }; dir.close(io); @@ -3389,7 +3389,7 @@ fn buildOutputType( // file will not run and this temp file will be leaked. The filename // will be a hash of its contents — so multiple invocations of // `zig cc -` will result in the same temp file name. - var f = try dirs.local_cache.handle.createFile(dump_path, .{}); + var f = try dirs.local_cache.handle.createFile(io, dump_path, .{}); defer f.close(io); // Re-using the hasher from Cache, since the functional requirements @@ -4773,7 +4773,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! var ok_count: usize = 0; for (template_paths) |template_path| { - if (templates.write(arena, fs.cwd(), sanitized_root_name, template_path, fingerprint)) |_| { + if (templates.write(arena, Io.Dir.cwd(), sanitized_root_name, template_path, fingerprint)) |_| { std.log.info("created {s}", .{template_path}); ok_count += 1; } else |err| switch (err) { @@ -5227,7 +5227,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) if (system_pkg_dir_path) |p| { job_queue.global_cache = .{ .path = p, - .handle = fs.cwd().openDir(p, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(p, .{}) catch |err| { fatal("unable to open system package directory '{s}': {s}", .{ p, @errorName(err), }); @@ -5823,7 +5823,7 @@ const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, /// Initialize the arguments from a Response File. "*.rsp" fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile { const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit - const cmd_line = try fs.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); + const cmd_line = try Io.Dir.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); errdefer allocator.free(cmd_line); return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line); @@ -6187,7 +6187,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const display_path = zig_source_path orelse ""; const source: [:0]const u8 = s: { var f = if (zig_source_path) |p| file: { - break :file fs.cwd().openFile(io, p, .{}) catch |err| { + break :file Io.Dir.cwd().openFile(io, p, .{}) catch |err| { fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) }); }; } else Io.File.stdin(); @@ -6494,7 +6494,7 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void { const cache_file = args[0]; - var f = fs.cwd().openFile(io, cache_file, .{}) catch |err| { + var f = Io.Dir.cwd().openFile(io, cache_file, .{}) catch |err| { fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) }); }; defer f.close(io); @@ -6541,7 +6541,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { const new_source_path = args[1]; const old_source = source: { - var f = fs.cwd().openFile(io, old_source_path, .{}) catch |err| + var f = Io.Dir.cwd().openFile(io, old_source_path, .{}) catch |err| fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6549,7 +6549,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); }; const new_source = source: { - var f = fs.cwd().openFile(io, new_source_path, .{}) catch |err| + var f = Io.Dir.cwd().openFile(io, new_source_path, .{}) catch |err| fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6845,7 +6845,7 @@ fn accessFrameworkPath( framework_dir_path, framework_name, framework_name, ext, }); try checked_paths.print("\n {s}", .{test_path.items}); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| fatal("unable to search for {s} framework '{s}': {s}", .{ ext, test_path.items, @errorName(e), @@ -6957,7 +6957,7 @@ fn cmdFetch( var global_cache_directory: Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); break :l .{ - .handle = try fs.cwd().makeOpenPath(p, .{}), + .handle = try Io.Dir.cwd().makeOpenPath(p, .{}), .path = p, }; }; @@ -7260,7 +7260,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { if (options.build_file) |bf| { if (fs.path.dirname(bf)) |dirname| { - const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + const dir = Io.Dir.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory to build file from argument 'build-file', '{s}': {s}", .{ dirname, @errorName(err) }); }; return .{ @@ -7272,7 +7272,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { return .{ .build_zig_basename = build_zig_basename, - .directory = .{ .path = null, .handle = fs.cwd() }, + .directory = .{ .path = null, .handle = Io.Dir.cwd() }, .cleanup_build_dir = null, }; } @@ -7280,8 +7280,8 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { var dirname: []const u8 = cwd_path; while (true) { const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig_basename }); - if (fs.cwd().access(joined_path, .{})) |_| { - const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + if (Io.Dir.cwd().access(joined_path, .{})) |_| { + const dir = Io.Dir.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) }); }; return .{ @@ -7443,7 +7443,7 @@ const Templates = struct { } }; fn writeSimpleTemplateFile(io: Io, file_name: []const u8, comptime fmt: []const u8, args: anytype) !void { - const f = try fs.cwd().createFile(file_name, .{ .exclusive = true }); + const f = try Io.Dir.cwd().createFile(io, file_name, .{ .exclusive = true }); defer f.close(io); var buf: [4096]u8 = undefined; var fw = f.writer(&buf); @@ -7591,7 +7591,7 @@ fn addLibDirectoryWarn2( ignore_not_found: bool, ) void { lib_directories.appendAssumeCapacity(.{ - .handle = fs.cwd().openDir(path, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(path, .{}) catch |err| { if (err == error.FileNotFound and ignore_not_found) return; warn("unable to open library directory '{s}': {s}", .{ path, @errorName(err) }); return; -- cgit v1.2.3 From 314c906dba32e72317947a15254519b22745b13f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 16:41:24 -0800 Subject: std.debug: simplify printLineFromFile --- lib/compiler/build_runner.zig | 2 +- lib/std/Build/Fuzz.zig | 2 +- lib/std/Io/test.zig | 12 +++---- lib/std/debug.zig | 74 +++++++++++++-------------------------- lib/std/debug/ElfFile.zig | 18 +++++----- lib/std/debug/MachOFile.zig | 2 +- lib/std/debug/SelfInfo/Elf.zig | 4 +-- lib/std/debug/SelfInfo/MachO.zig | 2 +- lib/std/os/uefi/protocol/file.zig | 9 ----- 9 files changed, 46 insertions(+), 79 deletions(-) (limited to 'lib/std/debug/ElfFile.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 6e0e2d8eca..36c73e96eb 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -824,7 +824,7 @@ fn runStepNames( } if (@bitSizeOf(usize) != 64) { // Current implementation depends on posix.mmap()'s second parameter, `length: usize`, - // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case + // being compatible with file system's u64 return value. This is not the case // on 32-bit platforms. // Affects or affected by issues #5185, #22523, and #22464. fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)}); diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index c95c9bd354..8d5bc27f3a 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -413,7 +413,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO }; defer coverage_file.close(io); - const file_size = coverage_file.getEndPos() catch |err| { + const file_size = coverage_file.length(io) catch |err| { log.err("unable to check len of coverage file '{f}': {t}", .{ coverage_file_path, err }); return error.AlreadyReported; }; diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 8b9d714ee8..e911031c7f 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -47,7 +47,7 @@ test "write a file, read it, then delete it" { var file = try tmp.dir.openFile(io, tmp_file_name, .{}); defer file.close(io); - const file_size = try file.getEndPos(); + const file_size = try file.length(io); const expected_file_size: u64 = "begin".len + data.len + "end".len; try expectEqual(expected_file_size, file_size); @@ -77,7 +77,7 @@ test "File seek ops" { // Seek to the end try file.seekFromEnd(0); - try expect((try file.getPos()) == try file.getEndPos()); + try expect((try file.getPos()) == try file.length(io)); // Negative delta try file.seekBy(-4096); try expect((try file.getPos()) == 4096); @@ -100,17 +100,17 @@ test "setEndPos" { defer file.close(io); // Verify that the file size changes and the file offset is not moved - try expect((try file.getEndPos()) == 0); + try expect((try file.length(io)) == 0); try expect((try file.getPos()) == 0); try file.setEndPos(8192); - try expect((try file.getEndPos()) == 8192); + try expect((try file.length(io)) == 8192); try expect((try file.getPos()) == 0); try file.seekTo(100); try file.setEndPos(4096); - try expect((try file.getEndPos()) == 4096); + try expect((try file.length(io)) == 4096); try expect((try file.getPos()) == 100); try file.setEndPos(0); - try expect((try file.getEndPos()) == 0); + try expect((try file.length(io)) == 0); try expect((try file.getPos()) == 100); } diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 7ede172d86..34f740bfee 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -558,9 +558,9 @@ pub fn defaultPanic( stderr.print("{s}\n", .{msg}) catch break :trace; if (@errorReturnTrace()) |t| if (t.index > 0) { - stderr.writeStreamingAll("error return context:\n") catch break :trace; + stderr.writeAll("error return context:\n") catch break :trace; writeStackTrace(t, stderr, tty_config) catch break :trace; - stderr.writeStreamingAll("\nstack trace:\n") catch break :trace; + stderr.writeAll("\nstack trace:\n") catch break :trace; }; writeCurrentStackTrace(.{ .first_address = first_trace_addr orelse @returnAddress(), @@ -617,6 +617,8 @@ pub const StackUnwindOptions = struct { /// /// See `writeCurrentStackTrace` to immediately print the trace instead of capturing it. pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) StackTrace { + var threaded: Io.Threaded = .init_single_threaded; + const io = threaded.ioBasic(); const empty_trace: StackTrace = .{ .index = 0, .instruction_addresses = &.{} }; if (!std.options.allow_stack_tracing) return empty_trace; var it: StackIterator = .init(options.context); @@ -628,7 +630,7 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: // Ideally, we would iterate the whole stack so that the `index` in the returned trace was // indicative of how many frames were skipped. However, this has a significant runtime cost // in some cases, so at least for now, we don't do that. - while (index < addr_buf.len) switch (it.next()) { + while (index < addr_buf.len) switch (it.next(io)) { .switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break, .end => break, .frame => |ret_addr| { @@ -684,7 +686,7 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri var total_frames: usize = 0; var wait_for = options.first_address; var printed_any_frame = false; - while (true) switch (it.next()) { + while (true) switch (it.next(io)) { .switch_to_fp => |unwind_error| { switch (StackIterator.fp_usability) { .useless, .unsafe => {}, @@ -1196,54 +1198,26 @@ fn printLineFromFile(io: Io, writer: *Writer, source_location: SourceLocation) ! // Need this to always block even in async I/O mode, because this could potentially // be called from e.g. the event loop code crashing. const cwd: Io.Dir = .cwd(); - var f = try cwd.openFile(io, source_location.file_name, .{}); - defer f.close(io); + var file = try cwd.openFile(io, source_location.file_name, .{}); + defer file.close(io); // TODO fstat and make sure that the file has the correct size - var buf: [4096]u8 = undefined; - var amt_read = try f.read(buf[0..]); - const line_start = seek: { - var current_line_start: usize = 0; - var next_line: usize = 1; - while (next_line != source_location.line) { - const slice = buf[current_line_start..amt_read]; - if (mem.findScalar(u8, slice, '\n')) |pos| { - next_line += 1; - if (pos == slice.len - 1) { - amt_read = try f.read(buf[0..]); - current_line_start = 0; - } else current_line_start += pos + 1; - } else if (amt_read < buf.len) { - return error.EndOfFile; - } else { - amt_read = try f.read(buf[0..]); - current_line_start = 0; - } - } - break :seek current_line_start; - }; - const slice = buf[line_start..amt_read]; - if (mem.findScalar(u8, slice, '\n')) |pos| { - const line = slice[0 .. pos + 1]; - mem.replaceScalar(u8, line, '\t', ' '); - return writer.writeAll(line); - } else { // Line is the last inside the buffer, and requires another read to find delimiter. Alternatively the file ends. - mem.replaceScalar(u8, slice, '\t', ' '); - try writer.writeAll(slice); - while (amt_read == buf.len) { - amt_read = try f.read(buf[0..]); - if (mem.findScalar(u8, buf[0..amt_read], '\n')) |pos| { - const line = buf[0 .. pos + 1]; - mem.replaceScalar(u8, line, '\t', ' '); - return writer.writeAll(line); - } else { - const line = buf[0..amt_read]; - mem.replaceScalar(u8, line, '\t', ' '); - try writer.writeAll(line); - } + var buffer: [4096]u8 = undefined; + var file_reader: File.Reader = .init(file, io, &buffer); + const r = &file_reader.interface; + var line_index: usize = 0; + while (r.takeDelimiterExclusive('\n')) |line| { + line_index += 1; + if (line_index == source_location.line) { + // TODO delete hard tabs from the language + mem.replaceScalar(u8, line, '\t', ' '); + try writer.writeAll(line); + // Make sure printing last line of file inserts extra newline. + try writer.writeByte('\n'); + return; } - // Make sure printing last line of file inserts extra newline - try writer.writeByte('\n'); + } else |err| { + return err; } } @@ -1598,7 +1572,7 @@ pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContex // We're still holding the mutex but that's fine as we're going to // call abort(). const stderr, _ = lockStderrWriter(&.{}); - stderr().writeAll("aborting due to recursive panic\n") catch {}; + stderr.writeAll("aborting due to recursive panic\n") catch {}; }, else => {}, // Panicked while printing the recursive panic message. } diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index 203ee8effb..a101309d22 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -123,6 +123,7 @@ pub const LoadError = error{ pub fn load( gpa: Allocator, + io: Io, elf_file: Io.File, opt_build_id: ?[]const u8, di_search_paths: *const DebugInfoSearchPaths, @@ -131,7 +132,7 @@ pub fn load( errdefer arena_instance.deinit(); const arena = arena_instance.allocator(); - var result = loadInner(arena, elf_file, null) catch |err| switch (err) { + var result = loadInner(arena, io, elf_file, null) catch |err| switch (err) { error.CrcMismatch => unreachable, // we passed crc as null else => |e| return e, }; @@ -156,7 +157,7 @@ pub fn load( if (build_id.len < 3) break :build_id; for (di_search_paths.global_debug) |global_debug| { - if (try loadSeparateDebugFile(arena, &result, null, "{s}/.build-id/{x}/{x}.debug", .{ + if (try loadSeparateDebugFile(arena, io, &result, null, "{s}/.build-id/{x}/{x}.debug", .{ global_debug, build_id[0..1], build_id[1..], @@ -164,7 +165,7 @@ pub fn load( } if (di_search_paths.debuginfod_client) |components| { - if (try loadSeparateDebugFile(arena, &result, null, "{s}{s}/{x}/debuginfo", .{ + if (try loadSeparateDebugFile(arena, io, &result, null, "{s}{s}/{x}/debuginfo", .{ components[0], components[1], build_id, @@ -181,18 +182,18 @@ pub fn load( const exe_dir = di_search_paths.exe_dir orelse break :debug_link; - if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/{s}", .{ + if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/{s}", .{ exe_dir, debug_filename, })) |mapped| break :load_di mapped; - if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/.debug/{s}", .{ + if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/.debug/{s}", .{ exe_dir, debug_filename, })) |mapped| break :load_di mapped; for (di_search_paths.global_debug) |global_debug| { // This looks like a bug; it isn't. They really do embed the absolute path to the // exe's dirname, *under* the global debug path. - if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/{s}/{s}", .{ + if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/{s}/{s}", .{ global_debug, exe_dir, debug_filename, @@ -378,7 +379,7 @@ fn loadSeparateDebugFile( const elf_file = Io.Dir.cwd().openFile(io, path, .{}) catch return null; defer elf_file.close(io); - const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { + const result = loadInner(arena, io, elf_file, opt_crc) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.CrcMismatch => return null, else => return null, @@ -423,13 +424,14 @@ const LoadInnerResult = struct { }; fn loadInner( arena: Allocator, + io: Io, elf_file: Io.File, opt_crc: ?u32, ) (LoadError || error{ CrcMismatch, Streaming, Canceled })!LoadInnerResult { const mapped_mem: []align(std.heap.page_size_min) const u8 = mapped: { const file_len = std.math.cast( usize, - elf_file.getEndPos() catch |err| switch (err) { + elf_file.length(io) catch |err| switch (err) { error.PermissionDenied => unreachable, // not asking for PROT_EXEC else => |e| return e, }, diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig index 18126a1c29..9e81fb8911 100644 --- a/lib/std/debug/MachOFile.zig +++ b/lib/std/debug/MachOFile.zig @@ -520,7 +520,7 @@ fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) c const file_len = std.math.cast( usize, - file.getEndPos() catch return error.ReadFailed, + file.length(io) catch return error.ReadFailed, ) orelse return error.ReadFailed; return posix.mmap( diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 6ed18bcb80..be76f3a8c2 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -326,7 +326,7 @@ const Module = struct { const load_result = if (mod.name.len > 0) res: { var file = Io.Dir.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; defer file.close(io); - break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name)); + break :res std.debug.ElfFile.load(gpa, io, file, mod.build_id, &.native(mod.name)); } else res: { const path = std.process.executablePathAlloc(io, gpa) catch |err| switch (err) { error.OutOfMemory => |e| return e, @@ -335,7 +335,7 @@ const Module = struct { defer gpa.free(path); var file = Io.Dir.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; defer file.close(io); - break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path)); + break :res std.debug.ElfFile.load(gpa, io, file, mod.build_id, &.native(path)); }; var elf_file = load_result catch |err| switch (err) { diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index db8e5334e6..714ff539f1 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -622,7 +622,7 @@ fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) c }; defer file.close(io); - const file_end_pos = file.getEndPos() catch |err| switch (err) { + const file_end_pos = file.length(io) catch |err| switch (err) { error.Unexpected => |e| return e, else => return error.ReadFailed, }; diff --git a/lib/std/os/uefi/protocol/file.zig b/lib/std/os/uefi/protocol/file.zig index 9e3e7cc081..9b371916a7 100644 --- a/lib/std/os/uefi/protocol/file.zig +++ b/lib/std/os/uefi/protocol/file.zig @@ -163,15 +163,6 @@ pub const File = extern struct { } } - fn getEndPos(self: *File) SeekError!u64 { - const start_pos = try self.getPosition(); - // ignore error - defer self.setPosition(start_pos) catch {}; - - try self.setPosition(end_of_file); - return self.getPosition(); - } - pub fn setPosition(self: *File, position: u64) SeekError!void { switch (self._set_position(self, position)) { .success => {}, -- cgit v1.2.3