From d1d2c37af26902f953b2b72335b326c4b01e3bb2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 Dec 2025 20:37:43 -0800 Subject: std: all Dir functions moved to std.Io --- lib/std/Build/Cache.zig | 83 +++++++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 40 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 5e8412cfcf..b06547dc53 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -8,7 +8,6 @@ const builtin = @import("builtin"); const std = @import("std"); const Io = std.Io; const crypto = std.crypto; -const fs = std.fs; const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; @@ -18,7 +17,7 @@ const log = std.log.scoped(.cache); gpa: Allocator, io: Io, -manifest_dir: fs.Dir, +manifest_dir: Io.Dir, hash: HashHelper = .{}, /// This value is accessed from multiple threads, protected by mutex. recent_problematic_timestamp: Io.Timestamp = .zero, @@ -71,7 +70,7 @@ const PrefixedPath = struct { fn findPrefix(cache: *const Cache, file_path: []const u8) !PrefixedPath { const gpa = cache.gpa; - const resolved_path = try fs.path.resolve(gpa, &.{file_path}); + const resolved_path = try std.fs.path.resolve(gpa, &.{file_path}); errdefer gpa.free(resolved_path); return findPrefixResolved(cache, resolved_path); } @@ -102,9 +101,9 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath { } fn getPrefixSubpath(allocator: Allocator, prefix: []const u8, path: []u8) ![]u8 { - const relative = try fs.path.relative(allocator, prefix, path); + const relative = try std.fs.path.relative(allocator, prefix, path); errdefer allocator.free(relative); - var component_iterator = fs.path.NativeComponentIterator.init(relative); + var component_iterator = std.fs.path.NativeComponentIterator.init(relative); if (component_iterator.root() != null) { return error.NotASubPath; } @@ -145,17 +144,17 @@ pub const File = struct { max_file_size: ?usize, /// Populated if the user calls `addOpenedFile`. /// The handle is not owned here. - handle: ?fs.File, + handle: ?Io.File, stat: Stat, bin_digest: BinDigest, contents: ?[]const u8, pub const Stat = struct { - inode: fs.File.INode, + inode: Io.File.INode, size: u64, mtime: Io.Timestamp, - pub fn fromFs(fs_stat: fs.File.Stat) Stat { + pub fn fromFs(fs_stat: Io.File.Stat) Stat { return .{ .inode = fs_stat.inode, .size = fs_stat.size, @@ -178,7 +177,7 @@ pub const File = struct { file.max_file_size = if (file.max_file_size) |old| @max(old, new) else new; } - pub fn updateHandle(file: *File, new_handle: ?fs.File) void { + pub fn updateHandle(file: *File, new_handle: ?Io.File) void { const handle = new_handle orelse return; file.handle = handle; } @@ -293,16 +292,16 @@ pub fn binToHex(bin_digest: BinDigest) HexDigest { } pub const Lock = struct { - manifest_file: fs.File, + manifest_file: Io.File, - pub fn release(lock: *Lock) void { + pub fn release(lock: *Lock, io: Io) void { if (builtin.os.tag == .windows) { // Windows does not guarantee that locks are immediately unlocked when // the file handle is closed. See LockFileEx documentation. lock.manifest_file.unlock(); } - lock.manifest_file.close(); + lock.manifest_file.close(io); lock.* = undefined; } }; @@ -311,7 +310,7 @@ pub const Manifest = struct { cache: *Cache, /// Current state for incremental hashing. hash: HashHelper, - manifest_file: ?fs.File, + manifest_file: ?Io.File, manifest_dirty: bool, /// Set this flag to true before calling hit() in order to indicate that /// upon a cache hit, the code using the cache will not modify the files @@ -332,9 +331,9 @@ pub const Manifest = struct { pub const Diagnostic = union(enum) { none, - manifest_create: fs.File.OpenError, - manifest_read: fs.File.ReadError, - manifest_lock: fs.File.LockError, + manifest_create: Io.File.OpenError, + manifest_read: Io.File.Reader.Error, + manifest_lock: Io.File.LockError, file_open: FileOp, file_stat: FileOp, file_read: FileOp, @@ -393,10 +392,10 @@ pub const Manifest = struct { } /// Same as `addFilePath` except the file has already been opened. - pub fn addOpenedFile(m: *Manifest, path: Path, handle: ?fs.File, max_file_size: ?usize) !usize { + pub fn addOpenedFile(m: *Manifest, path: Path, handle: ?Io.File, max_file_size: ?usize) !usize { const gpa = m.cache.gpa; try m.files.ensureUnusedCapacity(gpa, 1); - const resolved_path = try fs.path.resolve(gpa, &.{ + const resolved_path = try std.fs.path.resolve(gpa, &.{ path.root_dir.path orelse ".", path.subPathOrDot(), }); @@ -417,7 +416,7 @@ pub const Manifest = struct { return addFileInner(self, prefixed_path, null, max_file_size); } - fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, handle: ?fs.File, max_file_size: ?usize) usize { + fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, handle: ?Io.File, max_file_size: ?usize) usize { const gop = self.files.getOrPutAssumeCapacityAdapted(prefixed_path, FilesAdapter{}); if (gop.found_existing) { self.cache.gpa.free(prefixed_path.sub_path); @@ -460,7 +459,7 @@ pub const Manifest = struct { } } - pub fn addDepFile(self: *Manifest, dir: fs.Dir, dep_file_sub_path: []const u8) !void { + pub fn addDepFile(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void { assert(self.manifest_file == null); return self.addDepFileMaybePost(dir, dep_file_sub_path); } @@ -702,7 +701,7 @@ pub const Manifest = struct { const file_path = iter.rest(); const stat_size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat; - const stat_inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat; + const stat_inode = fmt.parseInt(Io.File.INode, inode, 10) catch return error.InvalidFormat; const stat_mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat; const file_bin_digest = b: { if (digest_str.len != hex_digest_len) return error.InvalidFormat; @@ -772,7 +771,7 @@ pub const Manifest = struct { return error.CacheCheckFailed; }, }; - defer this_file.close(); + defer this_file.close(io); const actual_stat = this_file.stat() catch |err| { self.diagnostic = .{ .file_stat = .{ @@ -879,7 +878,7 @@ pub const Manifest = struct { error.Canceled => return error.Canceled, else => return true, }; - defer file.close(); + defer file.close(io); // Save locally and also save globally (we still hold the global lock). const stat = file.stat() catch |err| switch (err) { @@ -894,18 +893,20 @@ pub const Manifest = struct { } fn populateFileHash(self: *Manifest, ch_file: *File) !void { + const io = self.cache.io; + if (ch_file.handle) |handle| { return populateFileHashHandle(self, ch_file, handle); } else { const pp = ch_file.prefixed_path; const dir = self.cache.prefixes()[pp.prefix].handle; const handle = try dir.openFile(pp.sub_path, .{}); - defer handle.close(); + defer handle.close(io); return populateFileHashHandle(self, ch_file, handle); } } - fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: fs.File) !void { + fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: Io.File) !void { const actual_stat = try handle.stat(); ch_file.stat = .{ .size = actual_stat.size, @@ -1064,12 +1065,12 @@ pub const Manifest = struct { self.hash.hasher.update(&new_file.bin_digest); } - pub fn addDepFilePost(self: *Manifest, dir: fs.Dir, dep_file_sub_path: []const u8) !void { + pub fn addDepFilePost(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void { assert(self.manifest_file != null); return self.addDepFileMaybePost(dir, dep_file_sub_path); } - fn addDepFileMaybePost(self: *Manifest, dir: fs.Dir, dep_file_sub_path: []const u8) !void { + fn addDepFileMaybePost(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void { const gpa = self.cache.gpa; const dep_file_contents = try dir.readFileAlloc(dep_file_sub_path, gpa, .limited(manifest_file_size_max)); defer gpa.free(dep_file_contents); @@ -1148,7 +1149,7 @@ pub const Manifest = struct { } } - fn writeDirtyManifestToStream(self: *Manifest, fw: *fs.File.Writer) !void { + fn writeDirtyManifestToStream(self: *Manifest, fw: *Io.File.Writer) !void { try fw.interface.writeAll(manifest_header ++ "\n"); for (self.files.keys()) |file| { try fw.interface.print("{d} {d} {d} {x} {d} {s}\n", .{ @@ -1214,13 +1215,15 @@ pub const Manifest = struct { /// `Manifest.hit` must be called first. /// Don't forget to call `writeManifest` before this! pub fn deinit(self: *Manifest) void { + const io = self.cache.io; + if (self.manifest_file) |file| { if (builtin.os.tag == .windows) { // See Lock.release for why this is required on Windows file.unlock(); } - file.close(); + file.close(io); } for (self.files.keys()) |*file| { file.deinit(self.cache.gpa); @@ -1281,7 +1284,7 @@ pub const Manifest = struct { /// On operating systems that support symlinks, does a readlink. On other operating systems, /// uses the file contents. Windows supports symlinks but only with elevated privileges, so /// it is treated as not supporting symlinks. -pub fn readSmallFile(dir: fs.Dir, sub_path: []const u8, buffer: []u8) ![]u8 { +pub fn readSmallFile(dir: Io.Dir, sub_path: []const u8, buffer: []u8) ![]u8 { if (builtin.os.tag == .windows) { return dir.readFile(sub_path, buffer); } else { @@ -1293,7 +1296,7 @@ pub fn readSmallFile(dir: fs.Dir, sub_path: []const u8, buffer: []u8) ![]u8 { /// uses the file contents. Windows supports symlinks but only with elevated privileges, so /// it is treated as not supporting symlinks. /// `data` must be a valid UTF-8 encoded file path and 255 bytes or fewer. -pub fn writeSmallFile(dir: fs.Dir, sub_path: []const u8, data: []const u8) !void { +pub fn writeSmallFile(dir: Io.Dir, sub_path: []const u8, data: []const u8) !void { assert(data.len <= 255); if (builtin.os.tag == .windows) { return dir.writeFile(.{ .sub_path = sub_path, .data = data }); @@ -1302,7 +1305,7 @@ pub fn writeSmallFile(dir: fs.Dir, sub_path: []const u8, data: []const u8) !void } } -fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) fs.File.PReadError!void { +fn hashFile(file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.PReadError!void { var buf: [1024]u8 = undefined; var hasher = hasher_init; var off: u64 = 0; @@ -1316,7 +1319,7 @@ fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) fs.File.PReadErro } // Create/Write a file, close it, then grab its stat.mtime timestamp. -fn testGetCurrentFileTimestamp(dir: fs.Dir) !Io.Timestamp { +fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { const test_out_file = "test-filetimestamp.tmp"; var file = try dir.createFile(test_out_file, .{ @@ -1324,7 +1327,7 @@ fn testGetCurrentFileTimestamp(dir: fs.Dir) !Io.Timestamp { .truncate = true, }); defer { - file.close(); + file.close(io); dir.deleteFile(test_out_file) catch {}; } @@ -1343,8 +1346,8 @@ test "cache file and then recall it" { try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = "Hello, world!\n" }); // Wait for file timestamps to tick - const initial_time = try testGetCurrentFileTimestamp(tmp.dir); - while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) { + const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir); + while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time.nanoseconds) { try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io); } @@ -1358,7 +1361,7 @@ test "cache file and then recall it" { .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); - defer cache.manifest_dir.close(); + defer cache.manifest_dir.close(io); { var ch = cache.obtain(); @@ -1424,7 +1427,7 @@ test "check that changing a file makes cache fail" { .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); - defer cache.manifest_dir.close(); + defer cache.manifest_dir.close(io); { var ch = cache.obtain(); @@ -1484,7 +1487,7 @@ test "no file inputs" { .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); - defer cache.manifest_dir.close(); + defer cache.manifest_dir.close(io); { var man = cache.obtain(); @@ -1543,7 +1546,7 @@ test "Manifest with files added after initial hash work" { .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); - defer cache.manifest_dir.close(); + defer cache.manifest_dir.close(io); { var ch = cache.obtain(); -- cgit v1.2.3 From 8328de24f13e21e325207b19288a143854df50df Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 Dec 2025 17:52:57 -0800 Subject: update all occurrences of openFile to receive an io instance --- lib/compiler/aro/aro/Compilation.zig | 4 +-- lib/compiler/aro/aro/Driver/Filesystem.zig | 2 +- lib/compiler/aro/aro/Toolchain.zig | 6 ++-- lib/compiler/objcopy.zig | 2 +- lib/compiler/std-docs.zig | 2 +- lib/std/Build/Cache.zig | 8 ++++-- lib/std/Build/Cache/Path.zig | 8 ++---- lib/std/Build/Fuzz.zig | 4 +-- lib/std/Build/Step/Run.zig | 6 ++-- lib/std/Build/Step/UpdateSourceFiles.zig | 2 +- lib/std/Build/Step/WriteFile.zig | 6 ++-- lib/std/Build/WebServer.zig | 4 +-- lib/std/Io/Dir.zig | 2 +- lib/std/Io/test.zig | 2 +- lib/std/Thread.zig | 4 +-- lib/std/crypto/Certificate/Bundle.zig | 2 +- lib/std/debug/ElfFile.zig | 2 +- lib/std/debug/Info.zig | 2 +- lib/std/debug/MachOFile.zig | 2 +- lib/std/debug/SelfInfo/Elf.zig | 4 +-- lib/std/debug/SelfInfo/MachO.zig | 2 +- lib/std/debug/SelfInfo/Windows.zig | 6 ++-- lib/std/fs.zig | 17 ----------- lib/std/fs/test.zig | 46 +++++++++++++++--------------- lib/std/os/linux/IoUring.zig | 6 ++-- lib/std/posix/test.zig | 8 +++--- lib/std/zig/system.zig | 6 ++-- src/Compilation.zig | 8 +++--- src/Package/Fetch.zig | 6 ++-- src/Package/Fetch/git.zig | 2 +- src/Zcu.zig | 2 +- src/Zcu/PerThread.zig | 4 +-- src/fmt.zig | 2 +- src/introspect.zig | 4 +-- src/link.zig | 24 ++++++++-------- src/link/Coff.zig | 4 +-- src/link/Elf2.zig | 14 +++++---- src/link/MachO.zig | 8 ++++-- src/link/MachO/relocatable.zig | 7 +++-- src/link/MappedFile.zig | 2 +- src/main.zig | 10 +++---- 41 files changed, 124 insertions(+), 138 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index c31caefb0f..09e4861d13 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -1641,7 +1641,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin const io = comp.io; - const file = try comp.cwd.openFile(path, .{}); + const file = try comp.cwd.openFile(io, path, .{}); defer file.close(io); return comp.addSourceFromFile(file, path, kind); } @@ -1975,7 +1975,7 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 const io = comp.io; - const file = try comp.cwd.openFile(path, .{}); + const file = try comp.cwd.openFile(io, path, .{}); defer file.close(io); return comp.getFileContents(file, limit); } diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig index 19ac9bfe41..b0bdbb7e21 100644 --- a/lib/compiler/aro/aro/Driver/Filesystem.zig +++ b/lib/compiler/aro/aro/Driver/Filesystem.zig @@ -213,7 +213,7 @@ pub const Filesystem = union(enum) { pub fn readFile(fs: Filesystem, io: Io, path: []const u8, buf: []u8) ?[]const u8 { return switch (fs) { .real => |cwd| { - const file = cwd.openFile(path, .{}) catch return null; + const file = cwd.openFile(io, path, .{}) catch return null; defer file.close(io); const bytes_read = file.readAll(buf) catch return null; diff --git a/lib/compiler/aro/aro/Toolchain.zig b/lib/compiler/aro/aro/Toolchain.zig index ae84369205..95a8baba77 100644 --- a/lib/compiler/aro/aro/Toolchain.zig +++ b/lib/compiler/aro/aro/Toolchain.zig @@ -524,12 +524,12 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void { /// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned pub fn readFile(tc: *const Toolchain, path: []const u8, buf: []u8) ?[]const u8 { const comp = tc.driver.comp; - return comp.cwd.adaptToNewApi().readFile(comp.io, path, buf) catch null; + return comp.cwd.readFile(comp.io, path, buf) catch null; } pub fn exists(tc: *const Toolchain, path: []const u8) bool { const comp = tc.driver.comp; - comp.cwd.adaptToNewApi().access(comp.io, path, .{}) catch return false; + comp.cwd.access(comp.io, path, .{}) catch return false; return true; } @@ -547,7 +547,7 @@ pub fn canExecute(tc: *const Toolchain, path: []const u8) bool { } const comp = tc.driver.comp; - comp.cwd.adaptToNewApi().access(comp.io, path, .{ .execute = true }) catch return false; + comp.cwd.access(comp.io, path, .{ .execute = true }) catch return false; // Todo: ensure path is not a directory return true; } diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index c360ea8df0..485e644daa 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -157,7 +157,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); - var in: File.Reader = .initSize(input_file.adaptToNewApi(), io, &input_buffer, stat.size); + var in: File.Reader = .initSize(input_file, io, &input_buffer, stat.size); const elf_hdr = std.elf.Header.read(&in.interface) catch |err| switch (err) { error.ReadFailed => fatal("unable to read {s}: {t}", .{ input, in.err.? }), diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index f1ca7fb5bb..e4efac28cd 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -225,7 +225,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void { }, else => continue, } - var file = try entry.dir.openFile(entry.basename, .{}); + var file = try entry.dir.openFile(io, entry.basename, .{}); defer file.close(io); const stat = try file.stat(); var file_reader: std.Io.File.Reader = .{ diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index b06547dc53..42459c033d 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -502,6 +502,8 @@ pub const Manifest = struct { @memcpy(manifest_file_path[0..self.hex_digest.len], &self.hex_digest); manifest_file_path[hex_digest_len..][0..ext.len].* = ext.*; + const io = self.cache.io; + // We'll try to open the cache with an exclusive lock, but if that would block // and `want_shared_lock` is set, a shared lock might be sufficient, so we'll // open with a shared lock instead. @@ -517,7 +519,7 @@ pub const Manifest = struct { break; } else |err| switch (err) { error.WouldBlock => { - self.manifest_file = self.cache.manifest_dir.openFile(&manifest_file_path, .{ + self.manifest_file = self.cache.manifest_dir.openFile(io, &manifest_file_path, .{ .mode = .read_write, .lock = .shared, }) catch |e| { @@ -757,7 +759,7 @@ pub const Manifest = struct { const pp = cache_hash_file.prefixed_path; const dir = self.cache.prefixes()[pp.prefix].handle; - const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { + const this_file = dir.openFile(io, pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { error.FileNotFound => { // Every digest before this one has been populated successfully. return .{ .miss = .{ .file_digests_populated = idx } }; @@ -900,7 +902,7 @@ pub const Manifest = struct { } else { const pp = ch_file.prefixed_path; const dir = self.cache.prefixes()[pp.prefix].handle; - const handle = try dir.openFile(pp.sub_path, .{}); + const handle = try dir.openFile(io, pp.sub_path, .{}); defer handle.close(io); return populateFileHashHandle(self, ch_file, handle); } diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index f6f76c1e8f..60211670de 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -59,18 +59,14 @@ pub fn joinStringZ(p: Path, gpa: Allocator, sub_path: []const u8) Allocator.Erro return p.root_dir.joinZ(gpa, parts); } -pub fn openFile( - p: Path, - sub_path: []const u8, - flags: Io.File.OpenFlags, -) !Io.File { +pub fn openFile(p: Path, io: Io, sub_path: []const u8, flags: Io.File.OpenFlags) !Io.File { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ p.sub_path, sub_path, }) catch return error.NameTooLong; }; - return p.root_dir.handle.openFile(joined_path, flags); + return p.root_dir.handle.openFile(io, joined_path, flags); } pub fn openDir( diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index db83f393fd..c95c9bd354 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -405,7 +405,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO .root_dir = run_step.step.owner.cache_root, .sub_path = "v/" ++ std.fmt.hex(coverage_id), }; - var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { + var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| { log.err("step '{s}': failed to load coverage file '{f}': {t}", .{ run_step.step.name, coverage_file_path, err, }); @@ -528,7 +528,7 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void { .root_dir = cov.run.step.owner.cache_root, .sub_path = "v/" ++ std.fmt.hex(cov.id), }; - var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { + var coverage_file = coverage_file_path.root_dir.handle.openFile(io, coverage_file_path.sub_path, .{}) catch |err| { fatal("step '{s}': failed to load coverage file '{f}': {t}", .{ cov.run.step.name, coverage_file_path, err, }); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 1df6f42a35..7c54c8048e 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -846,7 +846,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { errdefer result.deinit(); result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory; - const file = file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}) catch |err| { + const file = file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{}) catch |err| { return step.fail( "unable to open input file '{f}': {t}", .{ file_path, err }, @@ -1111,7 +1111,7 @@ pub fn rerunInFuzzMode( errdefer result.deinit(); result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory; - const file = try file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{}); + const file = try file_path.root_dir.handle.openFile(io, file_path.subPathOrDot(), .{}); defer file.close(io); var buf: [1024]u8 = undefined; @@ -2185,7 +2185,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult { }, .lazy_path => |lazy_path| { const path = lazy_path.getPath3(b, &run.step); - const file = path.root_dir.handle.openFile(path.subPathOrDot(), .{}) catch |err| { + const file = path.root_dir.handle.openFile(io, path.subPathOrDot(), .{}) catch |err| { return run.step.fail("unable to open stdin file: {s}", .{@errorName(err)}); }; defer file.close(io); diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig index 7cdb521d21..f5d95182e9 100644 --- a/lib/std/Build/Step/UpdateSourceFiles.zig +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -99,7 +99,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cwd(), io, source_path, - b.build_root.handle.adaptToNewApi(), + b.build_root.handle, output_source_file.sub_path, .{}, ) catch |err| { diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 201b132271..2834f18564 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -284,7 +284,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { }, .copy => |file_source| { const source_path = file_source.getPath2(b, step); - const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir.adaptToNewApi(), file.sub_path, .{}) catch |err| { + const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir, file.sub_path, .{}) catch |err| { return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {t}", .{ source_path, b.cache_root, cache_path, fs.path.sep, file.sub_path, err, }); @@ -321,10 +321,10 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .directory => try cache_dir.makePath(dest_path), .file => { const prev_status = Io.Dir.updateFile( - src_entry_path.root_dir.handle.adaptToNewApi(), + src_entry_path.root_dir.handle, io, src_entry_path.sub_path, - cache_dir.adaptToNewApi(), + cache_dir, dest_path, .{}, ) catch |err| { diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index f91075b444..9938d5e1b0 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -504,14 +504,14 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons var archiver: std.tar.Writer = .{ .underlying_writer = &response.writer }; for (paths) |path| { - var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| { + var file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| { log.err("failed to open '{f}': {s}", .{ path, @errorName(err) }); continue; }; defer file.close(io); const stat = try file.stat(); var read_buffer: [1024]u8 = undefined; - var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size); + var file_reader: Io.File.Reader = .initSize(file, io, &read_buffer, stat.size); // TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can // be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI: diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 1c28c2f9b3..9ae636d4a6 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -481,7 +481,7 @@ pub fn updateFile( } var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available. - var atomic_file = try Dir.atomicFile(.adaptFromNewApi(dest_dir), dest_path, .{ + var atomic_file = try Dir.atomicFile(dest_dir, dest_path, .{ .permissions = actual_permissions, .write_buffer = &buffer, }); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index b922acc333..9f21fe50e7 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -44,7 +44,7 @@ test "write a file, read it, then delete it" { } { - var file = try tmp.dir.openFile(tmp_file_name, .{}); + var file = try tmp.dir.openFile(io, tmp_file_name, .{}); defer file.close(io); const file_size = try file.getEndPos(); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 8d8e5979df..102bb59415 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -208,7 +208,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); - const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only }); + const file = try std.fs.cwd().openFile(io, path, .{ .mode = .write_only }); defer file.close(io); try file.writeAll(name); @@ -325,7 +325,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co var threaded: std.Io.Threaded = .init_single_threaded; const io = threaded.ioBasic(); - const file = try std.fs.cwd().openFile(path, .{}); + const file = try std.fs.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.readerStreaming(io, &.{}); diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index df671e6f63..eb60ad37a8 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -208,7 +208,7 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, i else => continue, } - try addCertsFromFilePath(cb, gpa, io, now, iterable_dir.adaptToNewApi(), entry.name); + try addCertsFromFilePath(cb, gpa, io, now, iterable_dir, entry.name); } } diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index 5dbae18130..a0f1188ade 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -375,7 +375,7 @@ fn loadSeparateDebugFile( args: anytype, ) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { const path = try std.fmt.allocPrint(arena, fmt, args); - const elf_file = std.fs.cwd().openFile(path, .{}) catch return null; + const elf_file = std.fs.cwd().openFile(io, path, .{}) catch return null; defer elf_file.close(io); const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig index da7656e626..6b31f03f72 100644 --- a/lib/std/debug/Info.zig +++ b/lib/std/debug/Info.zig @@ -39,7 +39,7 @@ pub fn load( ) LoadError!Info { switch (format) { .elf => { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); defer file.close(io); var elf_file: ElfFile = try .load(gpa, file, null, &.none); diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig index 3f0f620a90..ae904c0aec 100644 --- a/lib/std/debug/MachOFile.zig +++ b/lib/std/debug/MachOFile.zig @@ -512,7 +512,7 @@ fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) { + const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 155dac6fb8..124768687c 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -325,7 +325,7 @@ const Module = struct { } fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf { const load_result = if (mod.name.len > 0) res: { - var file = std.fs.cwd().openFile(mod.name, .{}) catch return error.MissingDebugInfo; + var file = std.fs.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name)); } else res: { @@ -334,7 +334,7 @@ const Module = struct { else => return error.ReadFailed, }; defer gpa.free(path); - var file = std.fs.cwd().openFile(path, .{}) catch return error.MissingDebugInfo; + var file = std.fs.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path)); }; diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 2491cf416c..15da616f3b 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -616,7 +616,7 @@ test { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) { + const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index 3af7223293..c7f9d8c352 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -387,7 +387,7 @@ const Module = struct { const section_view = section_view_ptr.?[0..coff_len]; coff_obj = coff.Coff.init(section_view, false) catch return error.InvalidDebugInfo; break :mapped .{ - .file = .adaptFromNewApi(coff_file), + .file = coff_file, .section_handle = section_handle, .section_view = section_view, }; @@ -432,7 +432,7 @@ const Module = struct { break :pdb null; }; const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: { - break :res std.fs.cwd().openFile(path, .{}); + break :res std.fs.cwd().openFile(io, path, .{}); } else res: { const self_dir = fs.selfExeDirPathAlloc(gpa) catch |err| switch (err) { error.OutOfMemory, error.Unexpected => |e| return e, @@ -441,7 +441,7 @@ const Module = struct { defer gpa.free(self_dir); const abs_path = try fs.path.join(gpa, &.{ self_dir, path }); defer gpa.free(abs_path); - break :res std.fs.cwd().openFile(abs_path, .{}); + break :res std.fs.cwd().openFile(io, abs_path, .{}); }; const pdb_file = pdb_file_open_result catch |err| switch (err) { error.FileNotFound, error.IsDir => break :pdb null, diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 9472e5d2a5..cb4daf7c50 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -287,23 +287,6 @@ pub fn symLinkAbsoluteW( return windows.CreateSymbolicLink(null, mem.span(sym_link_path_w), mem.span(target_path_w), flags.is_directory); } -pub const OpenSelfExeError = Io.File.OpenSelfExeError; - -/// Deprecated in favor of `Io.File.openSelfExe`. -pub fn openSelfExe(flags: File.OpenFlags) OpenSelfExeError!File { - if (native_os == .linux or native_os == .serenity or native_os == .windows) { - var threaded: Io.Threaded = .init_single_threaded; - const io = threaded.ioBasic(); - return .adaptFromNewApi(try Io.File.openSelfExe(io, flags)); - } - // Use of max_path_bytes here is valid as the resulting path is immediately - // opened with no modification. - var buf: [max_path_bytes]u8 = undefined; - const self_exe_path = try selfExePath(&buf); - buf[self_exe_path.len] = 0; - return openFileAbsolute(buf[0..self_exe_path.len :0], flags); -} - // This is `posix.ReadLinkError || posix.RealPathError` with impossible errors excluded pub const SelfExePathError = error{ FileNotFound, diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 36ccd3a6be..7d566da0e9 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -855,7 +855,7 @@ test "directory operations on files" { } // ensure the file still exists and is a file as a sanity check - file = try ctx.dir.openFile(test_file_name, .{}); + file = try ctx.dir.openFile(io, test_file_name, .{}); const stat = try file.stat(); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -895,12 +895,12 @@ test "file operations on directories" { if (native_os == .wasi and builtin.link_libc) { // wasmtime unexpectedly succeeds here, see https://github.com/ziglang/zig/issues/20747 - const handle = try ctx.dir.openFile(test_dir_name, .{ .mode = .read_write }); + const handle = try ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write }); handle.close(io); } else { // Note: The `.mode = .read_write` is necessary to ensure the error occurs on all platforms. // TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732 - try testing.expectError(error.IsDir, ctx.dir.openFile(test_dir_name, .{ .mode = .read_write })); + try testing.expectError(error.IsDir, ctx.dir.openFile(io, test_dir_name, .{ .mode = .read_write })); } if (ctx.path_type == .absolute and comptime PathType.absolute.isSupported(builtin.os)) { @@ -973,8 +973,8 @@ test "Dir.rename files" { try ctx.dir.rename(test_file_name, renamed_test_file_name); // Ensure the file was renamed - try testing.expectError(error.FileNotFound, ctx.dir.openFile(test_file_name, .{})); - file = try ctx.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, test_file_name, .{})); + file = try ctx.dir.openFile(io, renamed_test_file_name, .{}); file.close(io); // Rename to self succeeds @@ -986,8 +986,8 @@ test "Dir.rename files" { existing_file.close(io); try ctx.dir.rename(renamed_test_file_name, existing_file_path); - try testing.expectError(error.FileNotFound, ctx.dir.openFile(renamed_test_file_name, .{})); - file = try ctx.dir.openFile(existing_file_path, .{}); + try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, renamed_test_file_name, .{})); + file = try ctx.dir.openFile(io, existing_file_path, .{}); file.close(io); } }.impl); @@ -1026,7 +1026,7 @@ test "Dir.rename directories" { // Ensure the directory was renamed and the file still exists in it try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_renamed_path, .{})); dir = try ctx.dir.openDir(test_dir_renamed_again_path, .{}); - file = try dir.openFile("test_file", .{}); + file = try dir.openFile(io, "test_file", .{}); file.close(io); dir.close(io); } @@ -1119,8 +1119,8 @@ test "rename" { try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); // ensure the file was renamed - try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(test_file_name, .{})); - file = try tmp_dir2.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(io, test_file_name, .{})); + file = try tmp_dir2.dir.openFile(io, renamed_test_file_name, .{}); file.close(io); } @@ -1156,8 +1156,8 @@ test "renameAbsolute" { ); // ensure the file was renamed - try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(test_file_name, .{})); - file = try tmp_dir.dir.openFile(renamed_test_file_name, .{}); + try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(io, test_file_name, .{})); + file = try tmp_dir.dir.openFile(io, renamed_test_file_name, .{}); const stat = try file.stat(); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -1512,7 +1512,7 @@ test "setEndPos" { const file_name = "afile.txt"; try tmp.dir.writeFile(.{ .sub_path = file_name, .data = "ninebytes" }); - const f = try tmp.dir.openFile(file_name, .{ .mode = .read_write }); + const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write }); defer f.close(io); const initial_size = try f.getEndPos(); @@ -1856,7 +1856,7 @@ test "read from locked file" { .lock = .exclusive, }); defer f.close(io); - const f2 = try ctx.dir.openFile(filename, .{}); + const f2 = try ctx.dir.openFile(io, filename, .{}); defer f2.close(io); var buffer: [1]u8 = undefined; if (builtin.os.tag == .windows) { @@ -2041,12 +2041,12 @@ test "'.' and '..' in Io.Dir functions" { try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{}); try ctx.dir.rename(copy_path, rename_path); - const renamed_file = try ctx.dir.openFile(rename_path, .{}); + const renamed_file = try ctx.dir.openFile(io, rename_path, .{}); renamed_file.close(io); try ctx.dir.deleteFile(rename_path); try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" }); - var dir = ctx.dir.adaptToNewApi(); + var dir = ctx.dir; const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{}); try testing.expectEqual(Io.Dir.PrevStatus.stale, prev_status); @@ -2186,7 +2186,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{})); - var dir = ctx.dir.adaptToNewApi(); + var dir = ctx.dir; try testing.expectError(expected_err, dir.updateFile(io, invalid_path, dir, invalid_path, .{})); try testing.expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, .{})); @@ -2235,7 +2235,7 @@ test "read file non vectored" { try file_writer.interface.flush(); } - var file_reader: std.Io.File.Reader = .initAdapted(file, io, &.{}); + var file_reader: std.Io.File.Reader = .init(file, io, &.{}); var write_buffer: [100]u8 = undefined; var w: std.Io.Writer = .fixed(&write_buffer); @@ -2268,7 +2268,7 @@ test "seek keeping partial buffer" { } var read_buffer: [3]u8 = undefined; - var file_reader: Io.File.Reader = .initAdapted(file, io, &read_buffer); + var file_reader: Io.File.Reader = .init(file, io, &read_buffer); try testing.expectEqual(0, file_reader.logicalPos()); @@ -2301,7 +2301,7 @@ test "seekBy" { defer tmp_dir.cleanup(); try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" }); - const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only }); + const f = try tmp_dir.dir.openFile(io, "blah.txt", .{ .mode = .read_only }); defer f.close(io); var reader = f.readerStreaming(io, &.{}); try reader.seekBy(2); @@ -2332,7 +2332,7 @@ test "seekTo flushes buffered data" { } var read_buffer: [16]u8 = undefined; - var file_reader: std.Io.File.Reader = .initAdapted(file, io, &read_buffer); + var file_reader: std.Io.File.Reader = .init(file, io, &read_buffer); var buf: [4]u8 = undefined; try file_reader.interface.readSliceAll(&buf); @@ -2347,7 +2347,7 @@ test "File.Writer sendfile with buffered contents" { { try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); - const in = try tmp_dir.dir.openFile("a", .{}); + const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); const out = try tmp_dir.dir.createFile("b", .{}); defer out.close(io); @@ -2364,7 +2364,7 @@ test "File.Writer sendfile with buffered contents" { try out_w.interface.flush(); } - var check = try tmp_dir.dir.openFile("b", .{}); + var check = try tmp_dir.dir.openFile(io, "b", .{}); defer check.close(io); var check_buf: [4]u8 = undefined; var check_r = check.reader(io, &check_buf); diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index e4a5bd3738..c7d3f35d40 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -3002,7 +3002,7 @@ test "renameat" { }, cqe); // Validate that the old file doesn't exist anymore - try testing.expectError(error.FileNotFound, tmp.dir.openFile(old_path, .{})); + try testing.expectError(error.FileNotFound, tmp.dir.openFile(io, old_path, .{})); // Validate that the new file exists with the proper content var new_file_data: [16]u8 = undefined; @@ -3057,7 +3057,7 @@ test "unlinkat" { }, cqe); // Validate that the file doesn't exist anymore - _ = tmp.dir.openFile(path, .{}) catch |err| switch (err) { + _ = tmp.dir.openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => {}, else => std.debug.panic("unexpected error: {}", .{err}), }; @@ -3154,7 +3154,7 @@ test "symlinkat" { }, cqe); // Validate that the symlink exist - _ = try tmp.dir.openFile(link_path, .{}); + _ = try tmp.dir.openFile(io, link_path, .{}); } test "linkat" { diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 82fa2c41d1..19313e3ff7 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -164,10 +164,10 @@ test "linkat with different directories" { // Test 1: link from file in subdir back up to target in parent directory try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0); - const efd = try tmp.dir.openFile(target_name, .{}); + const efd = try tmp.dir.openFile(io, target_name, .{}); defer efd.close(io); - const nfd = try subdir.openFile(link_name, .{}); + const nfd = try subdir.openFile(io, link_name, .{}); defer nfd.close(io); { @@ -429,7 +429,7 @@ test "mmap" { // Map the whole file { - const file = try tmp.dir.openFile(test_out_file, .{}); + const file = try tmp.dir.openFile(io, test_out_file, .{}); defer file.close(io); const data = try posix.mmap( @@ -454,7 +454,7 @@ test "mmap" { // Map the upper half of the file { - const file = try tmp.dir.openFile(test_out_file, .{}); + const file = try tmp.dir.openFile(io, test_out_file, .{}); defer file.close(io); const data = try posix.mmap( diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 4f0c11797d..cc74da956a 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -817,7 +817,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion { // .dynstr section, and finding the max version number of symbols // that start with "GLIBC_2.". const glibc_so_basename = "libc.so.6"; - var file = dir.openFile(glibc_so_basename, .{}) catch |err| switch (err) { + var file = dir.openFile(io, glibc_so_basename, .{}) catch |err| switch (err) { error.NameTooLong => return error.Unexpected, error.BadPathName => return error.Unexpected, error.PipeBusy => return error.Unexpected, // Windows-only @@ -851,7 +851,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion { // Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system. var buffer: [8000]u8 = undefined; - var file_reader: Io.File.Reader = .initAdapted(file, io, &buffer); + var file_reader: Io.File.Reader = .init(file, io, &buffer); return glibcVerFromSoFile(&file_reader) catch |err| switch (err) { error.InvalidElfMagic, @@ -1053,7 +1053,7 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ var is_elf_file = false; defer if (!is_elf_file) file.close(io); - file_reader = .initAdapted(file, io, &file_reader_buffer); + file_reader = .init(file, io, &file_reader_buffer); file_name = undefined; // it aliases file_reader_buffer const header = elf.Header.read(&file_reader.interface) catch |hdr_err| switch (hdr_err) { diff --git a/src/Compilation.zig b/src/Compilation.zig index d75cba5a11..24b994f608 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1104,7 +1104,7 @@ pub const CObject = struct { const source_line = source_line: { if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0; - const file = fs.cwd().openFile(file_name, .{}) catch break :source_line 0; + const file = fs.cwd().openFile(io, file_name, .{}) catch break :source_line 0; defer file.close(io); var buffer: [1024]u8 = undefined; var file_reader = file.reader(io, &buffer); @@ -1179,7 +1179,7 @@ pub const CObject = struct { }; var buffer: [1024]u8 = undefined; - const file = try fs.cwd().openFile(path, .{}); + const file = try fs.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.reader(io, &buffer); var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface }); @@ -5354,14 +5354,14 @@ fn docsCopyModule( }, else => continue, } - var file = mod_dir.openFile(entry.path, .{}) catch |err| { + var file = mod_dir.openFile(io, entry.path, .{}) catch |err| { return comp.lockAndSetMiscFailure(.docs_copy, "unable to open {f}{s}: {t}", .{ root.fmt(comp), entry.path, err, }); }; defer file.close(io); const stat = try file.stat(); - var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size); + var file_reader: Io.File.Reader = .initSize(file, io, &buffer, stat.size); archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| { return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{ diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 988282097b..3bd05120ff 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -390,7 +390,7 @@ pub fn run(f: *Fetch) RunError!void { var server_header_buffer: [init_resource_buffer_size]u8 = undefined; const file_err = if (dir_err == error.NotDir) e: { - if (fs.cwd().openFile(path_or_url, .{})) |file| { + if (fs.cwd().openFile(io, path_or_url, .{})) |file| { var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) }; return f.runResource(path_or_url, &resource, null); } else |err| break :e err; @@ -995,7 +995,7 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u if (ascii.eqlIgnoreCase(uri.scheme, "file")) { const path = try uri.path.toRawMaybeAlloc(arena); - const file = f.parent_package_root.openFile(path, .{}) catch |err| { + const file = f.parent_package_root.openFile(io, path, .{}) catch |err| { return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{ f.parent_package_root, path, err, })); @@ -1677,7 +1677,7 @@ fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Er switch (hashed_file.kind) { .file => { - var file = try dir.openFile(hashed_file.fs_path, .{}); + var file = try dir.openFile(io, hashed_file.fs_path, .{}); defer file.close(io); // Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463 hasher.update(&.{ 0, 0 }); diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index abaa8fef73..ccae9440e2 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1714,7 +1714,7 @@ pub fn main() !void { const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat; - var pack_file = try std.fs.cwd().openFile(args[2], .{}); + var pack_file = try std.fs.cwd().openFile(io, args[2], .{}); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); diff --git a/src/Zcu.zig b/src/Zcu.zig index cd4a8c7783..d2634a8962 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1076,7 +1076,7 @@ pub const File = struct { var f = f: { const dir, const sub_path = file.path.openInfo(zcu.comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer f.close(io); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 55d6a3861f..45b1302138 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -94,7 +94,7 @@ pub fn updateFile( // In any case we need to examine the stat of the file to determine the course of action. var source_file = f: { const dir, const sub_path = file.path.openInfo(comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer source_file.close(io); @@ -2466,7 +2466,7 @@ fn updateEmbedFileInner( var file = f: { const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs); - break :f try dir.openFile(sub_path, .{}); + break :f try dir.openFile(io, sub_path, .{}); }; defer file.close(io); diff --git a/src/fmt.zig b/src/fmt.zig index 663d09e9cb..ce8a31fa4c 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -262,7 +262,7 @@ fn fmtPathFile( ) !void { const io = fmt.io; - const source_file = try dir.openFile(sub_path, .{}); + const source_file = try dir.openFile(io, sub_path, .{}); var file_closed = false; errdefer if (!file_closed) source_file.close(io); diff --git a/src/introspect.zig b/src/introspect.zig index 9b6797e7d8..d2faa9a55c 100644 --- a/src/introspect.zig +++ b/src/introspect.zig @@ -22,7 +22,7 @@ fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { // Try lib/zig/std/std.zig const lib_zig = "lib" ++ fs.path.sep_str ++ "zig"; var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir; - const file = test_zig_dir.openFile(test_index_file, .{}) catch { + const file = test_zig_dir.openFile(io, test_index_file, .{}) catch { test_zig_dir.close(io); break :zig_dir; }; @@ -32,7 +32,7 @@ fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { // Try lib/std/std.zig var test_zig_dir = base_dir.openDir("lib", .{}) catch return null; - const file = test_zig_dir.openFile(test_index_file, .{}) catch { + const file = test_zig_dir.openFile(io, test_index_file, .{}) catch { test_zig_dir.close(io); return null; }; diff --git a/src/link.zig b/src/link.zig index d5daf6fca7..073ec632c6 100644 --- a/src/link.zig +++ b/src/link.zig @@ -637,7 +637,7 @@ pub const File = struct { } } } - base.file = try emit.root_dir.handle.openFile(emit.sub_path, .{ .mode = .read_write }); + base.file = try emit.root_dir.handle.openFile(io, emit.sub_path, .{ .mode = .read_write }); }, .elf2, .coff2 => if (base.file == null) { const mf = if (base.cast(.elf2)) |elf| @@ -646,10 +646,10 @@ pub const File = struct { &coff.mf else unreachable; - mf.file = try base.emit.root_dir.handle.adaptToNewApi().openFile(io, base.emit.sub_path, .{ + mf.file = try base.emit.root_dir.handle.openFile(io, base.emit.sub_path, .{ .mode = .read_write, }); - base.file = .adaptFromNewApi(mf.file); + base.file = mf.file; try mf.ensureTotalCapacity(@intCast(mf.nodes.items[0].location().resolve(mf)[1])); }, .c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }), @@ -2007,7 +2007,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :tbd, else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }), }; @@ -2043,7 +2043,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :so, else => |e| fatal("unable to search for so library '{f}': {s}", .{ test_path, @errorName(e), @@ -2061,7 +2061,7 @@ fn resolveLibInput( .sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}), }; try checked_paths.print(gpa, "\n {f}", .{test_path}); - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => break :mingw, else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }), }; @@ -2115,7 +2115,7 @@ fn resolvePathInput( .static_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color), .shared_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color), .object => { - var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err| + var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err| fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) }); errdefer file.close(io); try resolved_inputs.append(gpa, .{ .object = .{ @@ -2127,7 +2127,7 @@ fn resolvePathInput( return null; }, .res => { - var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err| + var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err| fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) }); errdefer file.close(io); try resolved_inputs.append(gpa, .{ .res = .{ @@ -2164,7 +2164,7 @@ fn resolvePathInputLib( .static_library, .shared_library => true, else => false, }) { - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => return .no_match, else => |e| fatal("unable to search for {s} library '{f}': {s}", .{ @tagName(link_mode), std.fmt.alt(test_path, .formatEscapeChar), @errorName(e), @@ -2242,7 +2242,7 @@ fn resolvePathInputLib( return .ok; } - var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) { + var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => return .no_match, else => |e| fatal("unable to search for {s} library {f}: {s}", .{ @tagName(link_mode), test_path, @errorName(e), @@ -2253,7 +2253,7 @@ fn resolvePathInputLib( } pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Object { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); errdefer file.close(io); return .{ .path = path, @@ -2264,7 +2264,7 @@ pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Obje } pub fn openDso(io: Io, path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso { - var file = try path.root_dir.handle.openFile(path.sub_path, .{}); + var file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); errdefer file.close(io); return .{ .path = path, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f33e0ccdea..e1d52fb7c4 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -632,7 +632,7 @@ fn create( }; const coff = try arena.create(Coff); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(comp.io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); @@ -644,7 +644,7 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index e35444bc02..72fdb244a4 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -928,6 +928,7 @@ fn create( path: std.Build.Cache.Path, options: link.File.OpenOptions, ) !*Elf { + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(target.ofmt == .elf); const class: std.elf.CLASS = switch (target.ptrBitWidth()) { @@ -973,11 +974,11 @@ fn create( }; const elf = try arena.create(Elf); - const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); elf.* = .{ .base = .{ .tag = .elf2, @@ -985,7 +986,7 @@ fn create( .comp = comp, .emit = path, - .file = .adaptFromNewApi(file), + .file = file, .gc_sections = false, .print_gc_sections = false, .build_id = .none, @@ -3325,12 +3326,13 @@ fn flushInputSection(elf: *Elf, isi: Node.InputSectionIndex) !void { const file_loc = isi.fileLocation(elf); if (file_loc.size == 0) return; const comp = elf.base.comp; + const io = comp.io; const gpa = comp.gpa; const ii = isi.input(elf); const path = ii.path(elf); - const file = try path.root_dir.handle.adaptToNewApi().openFile(comp.io, path.sub_path, .{}); - defer file.close(comp.io); - var fr = file.reader(comp.io, &.{}); + const file = try path.root_dir.handle.openFile(io, path.sub_path, .{}); + defer file.close(io); + var fr = file.reader(io, &.{}); try fr.seekTo(file_loc.offset); var nw: MappedFile.Node.Writer = undefined; const si = isi.symbol(elf); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 93adc633ce..e837cc853a 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1126,7 +1126,9 @@ fn parseDependentDylibs(self: *MachO) !void { if (self.dylibs.items.len == 0) return; - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; const framework_dirs = self.framework_dirs; // TODO delete this, directories must instead be resolved by the frontend @@ -1183,7 +1185,7 @@ fn parseDependentDylibs(self: *MachO) !void { const path = if (existing_ext.len > 0) id.name[0 .. id.name.len - existing_ext.len] else id.name; for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| { test_path.clearRetainingCapacity(); - if (self.base.comp.sysroot) |root| { + if (comp.sysroot) |root| { try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext }); } else { try test_path.print("{s}{s}", .{ path, ext }); @@ -1235,7 +1237,7 @@ fn parseDependentDylibs(self: *MachO) !void { .path = Path.initCwd(full_path), .weak = is_weak, }; - const file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{}); + const file = try lib.path.root_dir.handle.openFile(io, lib.path.sub_path, .{}); const fh = try self.addFileHandle(file); const fat_arch = try self.parseFatFile(file, lib.path); const offset = if (fat_arch) |fa| fa.offset else 0; diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index d2a6c2a3ab..0f42442640 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -1,6 +1,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { - const gpa = macho_file.base.comp.gpa; - const diags = &macho_file.base.comp.link_diags; + const gpa = comp.gpa; + const io = comp.io; + const diags = &comp.link_diags; // TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list. var positionals = std.array_list.Managed(link.Input).init(gpa); @@ -19,7 +20,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat // debug info segments/sections (this is apparently by design by Apple), we copy // the *only* input file over. const path = positionals.items[0].path().?; - const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| + const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) }); const stat = in_file.stat() catch |err| return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) }); diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig index 7d4134ccaf..a61c6e764c 100644 --- a/src/link/MappedFile.zig +++ b/src/link/MappedFile.zig @@ -630,7 +630,7 @@ fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested // Resize the entire file if (ni == Node.Index.root) { try mf.ensureCapacityForSetLocation(gpa); - try Io.File.adaptFromNewApi(mf.file).setEndPos(new_size); + try mf.file.setEndPos(new_size); try mf.ensureTotalCapacity(@intCast(new_size)); ni.setLocationAssumeCapacity(mf, old_offset, new_size); return; diff --git a/src/main.zig b/src/main.zig index 3ca64881f8..b040b6c8ef 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4681,7 +4681,7 @@ fn cmdTranslateC( } else { const hex_digest = Cache.binToHex(result.digest); const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_basename }); - const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| { + const zig_file = comp.dirs.local_cache.handle.openFile(io, out_zig_path, .{}) catch |err| { const path = comp.dirs.local_cache.path orelse "."; fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{ path, @@ -6187,7 +6187,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const display_path = zig_source_path orelse ""; const source: [:0]const u8 = s: { var f = if (zig_source_path) |p| file: { - break :file fs.cwd().openFile(p, .{}) catch |err| { + break :file fs.cwd().openFile(io, p, .{}) catch |err| { fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) }); }; } else Io.File.stdin(); @@ -6494,7 +6494,7 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void { const cache_file = args[0]; - var f = fs.cwd().openFile(cache_file, .{}) catch |err| { + var f = fs.cwd().openFile(io, cache_file, .{}) catch |err| { fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) }); }; defer f.close(io); @@ -6541,7 +6541,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { const new_source_path = args[1]; const old_source = source: { - var f = fs.cwd().openFile(old_source_path, .{}) catch |err| + var f = fs.cwd().openFile(io, old_source_path, .{}) catch |err| fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6549,7 +6549,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); }; const new_source = source: { - var f = fs.cwd().openFile(new_source_path, .{}) catch |err| + var f = fs.cwd().openFile(io, new_source_path, .{}) catch |err| fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); -- cgit v1.2.3 From f53248a40936ebc9aaf75ddbd16e67ebec05ab84 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 13:39:09 -0800 Subject: update all std.fs.cwd() to std.Io.Dir.cwd() --- lib/compiler/aro/aro/Compilation.zig | 8 +- lib/compiler/aro/aro/Driver.zig | 10 +-- lib/compiler/aro/aro/Parser.zig | 27 +++---- lib/compiler/aro/aro/Preprocessor.zig | 6 +- lib/compiler/aro/aro/Tokenizer.zig | 5 +- lib/compiler/aro/aro/Value.zig | 11 +-- lib/compiler/aro/main.zig | 2 +- lib/compiler/objcopy.zig | 4 +- lib/compiler/reduce.zig | 6 +- lib/compiler/resinator/cli.zig | 2 +- lib/compiler/resinator/compile.zig | 4 +- lib/compiler/resinator/main.zig | 22 +++--- lib/compiler/std-docs.zig | 2 +- lib/compiler/translate-c/main.zig | 8 +- lib/std/Build.zig | 10 +-- lib/std/Build/Cache.zig | 8 +- lib/std/Build/Step.zig | 17 ++-- lib/std/Build/Step/CheckFile.zig | 4 +- lib/std/Build/Step/ConfigHeader.zig | 8 +- lib/std/Build/Step/Options.zig | 15 ++-- lib/std/Build/Step/Run.zig | 2 +- lib/std/Build/Watch.zig | 16 ++-- lib/std/Io/File.zig | 8 ++ lib/std/Io/Threaded.zig | 9 +-- lib/std/Io/Writer.zig | 6 +- lib/std/Io/net/test.zig | 2 +- lib/std/Io/test.zig | 10 +-- lib/std/Thread.zig | 4 +- lib/std/crypto/Certificate/Bundle/macos.zig | 2 +- lib/std/crypto/codecs/asn1/test.zig | 4 +- lib/std/debug.zig | 20 ++--- lib/std/debug/ElfFile.zig | 2 +- lib/std/debug/MachOFile.zig | 2 +- lib/std/debug/SelfInfo/Elf.zig | 19 +++-- lib/std/debug/SelfInfo/MachO.zig | 2 +- lib/std/debug/SelfInfo/Windows.zig | 4 +- lib/std/dynamic_library.zig | 8 +- lib/std/fs/test.zig | 116 +++++++++++++--------------- lib/std/os/linux/IoUring.zig | 24 +++--- lib/std/os/linux/test.zig | 6 +- lib/std/os/windows.zig | 4 +- lib/std/posix.zig | 14 ++-- lib/std/posix/test.zig | 52 ++++++------- lib/std/process/Child.zig | 2 +- lib/std/std.zig | 2 +- lib/std/tar.zig | 14 ++-- lib/std/testing.zig | 2 +- lib/std/zig/LibCInstallation.zig | 12 +-- lib/std/zig/WindowsSdk.zig | 2 +- lib/std/zig/system.zig | 6 +- lib/std/zig/system/darwin/macos.zig | 7 +- lib/std/zip.zig | 4 +- src/Compilation.zig | 20 ++--- src/Package/Fetch.zig | 12 +-- src/Package/Fetch/git.zig | 12 +-- src/Zcu/PerThread.zig | 4 +- src/codegen/llvm.zig | 17 ++-- src/fmt.zig | 6 +- src/introspect.zig | 4 +- src/libs/freebsd.zig | 12 +-- src/libs/glibc.zig | 16 ++-- src/libs/mingw.zig | 17 ++-- src/libs/netbsd.zig | 12 +-- src/link/C.zig | 4 +- src/link/Coff.zig | 6 +- src/link/Elf.zig | 4 +- src/link/Lld.zig | 4 +- src/link/MachO.zig | 14 ++-- src/link/MachO/CodeSignature.zig | 2 +- src/link/SpirV.zig | 3 +- src/link/Wasm.zig | 4 +- src/main.zig | 36 ++++----- 72 files changed, 398 insertions(+), 377 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 09e4861d13..b3e4d5544d 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -2253,7 +2253,7 @@ test "addSourceFromBuffer" { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("path", str); @@ -2267,7 +2267,7 @@ test "addSourceFromBuffer" { var arena: std.heap.ArenaAllocator = .init(allocator); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); _ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n"); @@ -2313,7 +2313,7 @@ test "addSourceFromBuffer - exhaustive check for carriage return elimination" { var buf: [alphabet.len]u8 = @splat(alphabet[0]); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var source_count: u32 = 0; @@ -2341,7 +2341,7 @@ test "ignore BOM at beginning of file" { const Test = struct { fn run(arena: Allocator, buf: []const u8) !void { var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("file.c", buf); diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index f933e3ce52..fec3cea0f8 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -1327,7 +1327,7 @@ fn processSource( const dep_file_name = try d.getDepFileName(source, writer_buf[0..std.fs.max_name_bytes]); const file = if (dep_file_name) |path| - d.comp.cwd.createFile(path, .{}) catch |er| + d.comp.cwd.createFile(io, path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, errorDescription(er) }) else Io.File.stdout(); @@ -1352,7 +1352,7 @@ fn processSource( } const file = if (d.output_name) |some| - d.comp.cwd.createFile(some, .{}) catch |er| + d.comp.cwd.createFile(io, some, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ some, errorDescription(er) }) else Io.File.stdout(); @@ -1405,7 +1405,7 @@ fn processSource( defer assembly.deinit(gpa); if (d.only_preprocess_and_compile) { - const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) }); defer out_file.close(io); @@ -1419,7 +1419,7 @@ fn processSource( // then assemble to out_file_name var assembly_name_buf: [std.fs.max_name_bytes]u8 = undefined; const assembly_out_file_name = try d.getRandomFilename(&assembly_name_buf, ".s"); - const out_file = d.comp.cwd.createFile(assembly_out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, assembly_out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ assembly_out_file_name, errorDescription(er) }); defer out_file.close(io); assembly.writeToFile(out_file) catch |er| @@ -1455,7 +1455,7 @@ fn processSource( }; defer obj.deinit(); - const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er| + const out_file = d.comp.cwd.createFile(io, out_file_name, .{}) catch |er| return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) }); defer out_file.close(io); diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index 4a89e0d460..fc21ee4d0b 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const mem = std.mem; const Allocator = mem.Allocator; const assert = std.debug.assert; @@ -211,7 +212,7 @@ fn checkIdentifierCodepointWarnings(p: *Parser, codepoint: u21, loc: Source.Loca const prev_total = p.diagnostics.total; var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); if (!char_info.isC99IdChar(codepoint)) { @@ -425,7 +426,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) if (p.diagnostics.effectiveKind(diagnostic) == .off) return; var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); p.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -447,7 +448,7 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) }, p.pp.expansionSlice(tok_i), true); } -fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !void { +fn formatArgs(p: *Parser, w: *Io.Writer, fmt: []const u8, args: anytype) !void { var i: usize = 0; inline for (std.meta.fields(@TypeOf(args))) |arg_info| { const arg = @field(args, arg_info.name); @@ -476,13 +477,13 @@ fn formatArgs(p: *Parser, w: *std.Io.Writer, fmt: []const u8, args: anytype) !vo try w.writeAll(fmt[i..]); } -fn formatTokenId(w: *std.Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize { +fn formatTokenId(w: *Io.Writer, fmt: []const u8, tok_id: Tree.Token.Id) !usize { const i = Diagnostics.templateIndex(w, fmt, "{tok_id}"); try w.writeAll(tok_id.symbol()); return i; } -fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType) !usize { +fn formatQualType(p: *Parser, w: *Io.Writer, fmt: []const u8, qt: QualType) !usize { const i = Diagnostics.templateIndex(w, fmt, "{qt}"); try w.writeByte('\''); try qt.print(p.comp, w); @@ -501,7 +502,7 @@ fn formatQualType(p: *Parser, w: *std.Io.Writer, fmt: []const u8, qt: QualType) return i; } -fn formatResult(p: *Parser, w: *std.Io.Writer, fmt: []const u8, res: Result) !usize { +fn formatResult(p: *Parser, w: *Io.Writer, fmt: []const u8, res: Result) !usize { const i = Diagnostics.templateIndex(w, fmt, "{value}"); switch (res.val.opt_ref) { .none => try w.writeAll("(none)"), @@ -524,7 +525,7 @@ const Normalized = struct { return .{ .str = str }; } - pub fn format(ctx: Normalized, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Normalized, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{normalized}"); var it: std.unicode.Utf8Iterator = .{ .bytes = ctx.str, @@ -558,7 +559,7 @@ const Codepoint = struct { return .{ .codepoint = codepoint }; } - pub fn format(ctx: Codepoint, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Codepoint, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{codepoint}"); try w.print("{X:0>4}", .{ctx.codepoint}); return i; @@ -572,7 +573,7 @@ const Escaped = struct { return .{ .str = str }; } - pub fn format(ctx: Escaped, w: *std.Io.Writer, fmt: []const u8) !usize { + pub fn format(ctx: Escaped, w: *Io.Writer, fmt: []const u8) !usize { const i = Diagnostics.templateIndex(w, fmt, "{s}"); try std.zig.stringEscape(ctx.str, w); return i; @@ -1453,7 +1454,7 @@ fn decl(p: *Parser) Error!bool { return true; } -fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *std.Io.Writer.Allocating) !?[]const u8 { +fn staticAssertMessage(p: *Parser, cond_node: Node.Index, maybe_message: ?Result, allocating: *Io.Writer.Allocating) !?[]const u8 { const w = &allocating.writer; const cond = cond_node.get(&p.tree); @@ -1526,7 +1527,7 @@ fn staticAssert(p: *Parser) Error!bool { } else { if (!res.val.toBool(p.comp)) { var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); if (p.staticAssertMessage(res_node, str, &allocating) catch return error.OutOfMemory) |message| { @@ -9719,7 +9720,7 @@ fn primaryExpr(p: *Parser) Error!?Result { qt = some.qt; } else if (p.func.qt) |func_qt| { var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var allocating: Io.Writer.Allocating = .init(sf.get()); defer allocating.deinit(); func_qt.printNamed(p.tokSlice(p.func.name), p.comp, &allocating.writer) catch return error.OutOfMemory; @@ -10608,7 +10609,7 @@ test "Node locations" { const arena = arena_state.allocator(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); const file = try comp.addSourceFromBuffer("file.c", diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index d47727cbf0..e8343dc83a 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -3900,7 +3900,7 @@ test "Preserve pragma tokens sometimes" { defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); try comp.addDefaultPragmaHandlers(); @@ -3967,7 +3967,7 @@ test "destringify" { var arena: std.heap.ArenaAllocator = .init(gpa); defer arena.deinit(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena.allocator(), std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp, .default); defer pp.deinit(); @@ -4030,7 +4030,7 @@ test "Include guards" { const arena = arena_state.allocator(); var diagnostics: Diagnostics = .{ .output = .ignore }; - var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, std.fs.cwd()); + var comp = Compilation.init(gpa, arena, std.testing.io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp, .default); defer pp.deinit(); diff --git a/lib/compiler/aro/aro/Tokenizer.zig b/lib/compiler/aro/aro/Tokenizer.zig index c497c5ce82..198d49364a 100644 --- a/lib/compiler/aro/aro/Tokenizer.zig +++ b/lib/compiler/aro/aro/Tokenizer.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const Compilation = @import("Compilation.zig"); @@ -2326,7 +2327,7 @@ test "Tokenizer fuzz test" { fn testOne(_: @This(), input_bytes: []const u8) anyerror!void { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes); @@ -2351,7 +2352,7 @@ test "Tokenizer fuzz test" { fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void { var arena: std.heap.ArenaAllocator = .init(std.testing.allocator); defer arena.deinit(); - var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); if (langopts) |provided| { comp.langopts = provided; diff --git a/lib/compiler/aro/aro/Value.zig b/lib/compiler/aro/aro/Value.zig index 25a2d1824f..14949ce03b 100644 --- a/lib/compiler/aro/aro/Value.zig +++ b/lib/compiler/aro/aro/Value.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; @@ -80,7 +81,7 @@ test "minUnsignedBits" { defer arena_state.deinit(); const arena = arena_state.allocator(); - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query)); @@ -119,7 +120,7 @@ test "minSignedBits" { defer arena_state.deinit(); const arena = arena_state.allocator(); - var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd()); + var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, Io.Dir.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query)); @@ -1080,7 +1081,7 @@ const NestedPrint = union(enum) { }, }; -pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void { +pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void { try w.writeByte('&'); try w.writeAll(base); if (!offset.isZero(comp)) { @@ -1089,7 +1090,7 @@ pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w } } -pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!?NestedPrint { +pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!?NestedPrint { if (qt.is(comp, .bool)) { try w.writeAll(if (v.isZero(comp)) "false" else "true"); return null; @@ -1116,7 +1117,7 @@ pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer return null; } -pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void { +pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *Io.Writer) Io.Writer.Error!void { const size: Compilation.CharUnitSize = @enumFromInt(qt.childType(comp).sizeof(comp)); const without_null = bytes[0 .. bytes.len - @intFromEnum(size)]; try w.writeByte('"'); diff --git a/lib/compiler/aro/main.zig b/lib/compiler/aro/main.zig index 66c8add4a3..d1be1dbb21 100644 --- a/lib/compiler/aro/main.zig +++ b/lib/compiler/aro/main.zig @@ -59,7 +59,7 @@ pub fn main() u8 { } }, }; - var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |er| switch (er) { + var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |er| switch (er) { error.OutOfMemory => { std.debug.print("out of memory\n", .{}); if (fast_exit) process.exit(1); diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index 485e644daa..e48f76a6a6 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -152,7 +152,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void defer threaded.deinit(); const io = threaded.io(); - const input_file = fs.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); + const input_file = Io.Dir.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); defer input_file.close(io); const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); @@ -180,7 +180,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const mode = if (out_fmt != .elf or only_keep_debug) Io.File.default_mode else stat.mode; - var output_file = try fs.cwd().createFile(output, .{ .mode = mode }); + var output_file = try Io.Dir.cwd().createFile(io, output, .{ .mode = mode }); defer output_file.close(io); var out = output_file.writer(&output_buffer); diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig index bbd3d172b4..d3f33ad81a 100644 --- a/lib/compiler/reduce.zig +++ b/lib/compiler/reduce.zig @@ -233,7 +233,7 @@ pub fn main() !void { } } - try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); // std.debug.print("trying this code:\n{s}\n", .{rendered.items}); const interestingness = try runCheck(arena, interestingness_argv.items); @@ -274,7 +274,7 @@ pub fn main() !void { fixups.clearRetainingCapacity(); rendered.clearRetainingCapacity(); try tree.render(gpa, &rendered.writer, fixups); - try std.fs.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); return std.process.cleanExit(); } @@ -398,7 +398,7 @@ fn transformationsToFixups( } fn parse(gpa: Allocator, file_path: []const u8) !Ast { - const source_code = std.fs.cwd().readFileAllocOptions( + const source_code = Io.Dir.cwd().readFileAllocOptions( file_path, gpa, .limited(std.math.maxInt(u32)), diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index bb54289e3e..ae4ece2968 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -2003,7 +2003,7 @@ test "maybeAppendRC" { // Create the file so that it's found. In this scenario, .rc should not get // appended. - var file = try tmp.dir.createFile("foo", .{}); + var file = try tmp.dir.createFile(io, "foo", .{}); file.close(io); try options.maybeAppendRC(tmp.dir); try std.testing.expectEqualStrings("foo", options.input_source.filename); diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig index 7dc77e5ee1..3e046a10c1 100644 --- a/lib/compiler/resinator/compile.zig +++ b/lib/compiler/resinator/compile.zig @@ -111,7 +111,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io try search_dirs.append(allocator, .{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) }); } } - // Re-open the passed in cwd since we want to be able to close it (std.fs.cwd() shouldn't be closed) + // Re-open the passed in cwd since we want to be able to close it (Io.Dir.cwd() shouldn't be closed) const cwd_dir = options.cwd.openDir(".", .{}) catch |err| { try options.diagnostics.append(.{ .err = .failed_to_open_cwd, @@ -406,7 +406,7 @@ pub const Compiler = struct { // `/test.bin` relative to include paths and instead only treats it as // an absolute path. if (std.fs.path.isAbsolute(path)) { - const file = try utils.openFileNotDir(std.fs.cwd(), path, .{}); + const file = try utils.openFileNotDir(Io.Dir.cwd(), path, .{}); errdefer file.close(io); if (self.dependencies) |dependencies| { diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index c726a06cf4..416abc2ab7 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -67,7 +67,7 @@ pub fn main() !void { }, else => |e| return e, }; - try options.maybeAppendRC(std.fs.cwd()); + try options.maybeAppendRC(Io.Dir.cwd()); if (!zig_integration) { // print any warnings/notes @@ -141,7 +141,7 @@ pub fn main() !void { if (!zig_integration) std.debug.unlockStderrWriter(); } - var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, std.fs.cwd()); + var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, Io.Dir.cwd()); defer comp.deinit(); var argv: std.ArrayList([]const u8) = .empty; @@ -196,7 +196,7 @@ pub fn main() !void { }; }, .filename => |input_filename| { - break :full_input std.fs.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| { + break :full_input Io.Dir.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| { try error_handler.emitMessage(gpa, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) }); std.process.exit(1); }; @@ -212,7 +212,7 @@ pub fn main() !void { try output_file.writeAll(full_input); }, .filename => |output_filename| { - try std.fs.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); + try Io.Dir.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); }, } return; @@ -277,7 +277,7 @@ pub fn main() !void { const output_buffered_stream = res_stream_writer.interface(); compile(gpa, io, final_input, output_buffered_stream, .{ - .cwd = std.fs.cwd(), + .cwd = Io.Dir.cwd(), .diagnostics = &diagnostics, .source_mappings = &mapping_results.mappings, .dependencies = maybe_dependencies, @@ -294,7 +294,7 @@ pub fn main() !void { .warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page, }) catch |err| switch (err) { error.ParseError, error.CompileError => { - try error_handler.emitDiagnostics(gpa, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings); + try error_handler.emitDiagnostics(gpa, Io.Dir.cwd(), final_input, &diagnostics, mapping_results.mappings); // Delete the output file on error res_stream.cleanupAfterError(io); std.process.exit(1); @@ -306,12 +306,12 @@ pub fn main() !void { // print any warnings/notes if (!zig_integration) { - diagnostics.renderToStdErr(std.fs.cwd(), final_input, mapping_results.mappings); + diagnostics.renderToStdErr(Io.Dir.cwd(), final_input, mapping_results.mappings); } // write the depfile if (options.depfile_path) |depfile_path| { - var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| { + var depfile = Io.Dir.cwd().createFile(io, depfile_path, .{}) catch |err| { try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) }); std.process.exit(1); }; @@ -440,7 +440,7 @@ const IoStream = struct { // Delete the output file on error file.close(io); // Failing to delete is not really a big deal, so swallow any errors - std.fs.cwd().deleteFile(self.name) catch {}; + Io.Dir.cwd().deleteFile(self.name) catch {}; }, .stdio, .memory, .closed => return, } @@ -457,8 +457,8 @@ const IoStream = struct { switch (source) { .filename => |filename| return .{ .file = switch (io) { - .input => try openFileNotDir(std.fs.cwd(), filename, .{}), - .output => try std.fs.cwd().createFile(filename, .{}), + .input => try openFileNotDir(Io.Dir.cwd(), filename, .{}), + .output => try Io.Dir.cwd().createFile(io, filename, .{}), }, }, .stdio => |file| return .{ .stdio = file }, diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index e4efac28cd..87c4da9faa 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -40,7 +40,7 @@ pub fn main() !void { const zig_exe_path = argv.next().?; const global_cache_path = argv.next().?; - var lib_dir = try std.fs.cwd().openDir(zig_lib_directory, .{}); + var lib_dir = try Io.Dir.cwd().openDir(zig_lib_directory, .{}); defer lib_dir.close(io); var listen_port: u16 = 0; diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig index 830c70e424..d0a873fd78 100644 --- a/lib/compiler/translate-c/main.zig +++ b/lib/compiler/translate-c/main.zig @@ -47,7 +47,7 @@ pub fn main() u8 { }; defer diagnostics.deinit(); - var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |err| switch (err) { + var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, Io.Dir.cwd()) catch |err| switch (err) { error.OutOfMemory => { std.debug.print("ran out of memory initializing C compilation\n", .{}); if (fast_exit) process.exit(1); @@ -226,7 +226,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration const dep_file_name = try d.getDepFileName(source, out_buf[0..std.fs.max_name_bytes]); const file = if (dep_file_name) |path| - d.comp.cwd.createFile(path, .{}) catch |er| + d.comp.cwd.createFile(io, path, .{}) catch |er| return d.fatal("unable to create dependency file '{s}': {s}", .{ path, aro.Driver.errorDescription(er) }) else Io.File.stdout(); @@ -253,10 +253,10 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration if (d.output_name) |path| blk: { if (std.mem.eql(u8, path, "-")) break :blk; if (std.fs.path.dirname(path)) |dirname| { - std.fs.cwd().makePath(dirname) catch |err| + Io.Dir.cwd().makePath(dirname) catch |err| return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) }); } - out_file = std.fs.cwd().createFile(path, .{}) catch |err| { + out_file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| { return d.fatal("failed to create output file '{s}': {s}", .{ path, aro.Driver.errorDescription(err) }); }; close_out_file = true; diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 7bfdbb6449..cc2f70fd2f 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1702,13 +1702,13 @@ pub fn addCheckFile( pub fn truncateFile(b: *Build, dest_path: []const u8) (Io.Dir.MakeError || Io.Dir.StatFileError)!void { const io = b.graph.io; if (b.verbose) log.info("truncate {s}", .{dest_path}); - const cwd = fs.cwd(); - var src_file = cwd.createFile(dest_path, .{}) catch |err| switch (err) { + const cwd = Io.Dir.cwd(); + var src_file = cwd.createFile(io, dest_path, .{}) catch |err| switch (err) { error.FileNotFound => blk: { if (fs.path.dirname(dest_path)) |dirname| { try cwd.makePath(dirname); } - break :blk try cwd.createFile(dest_path, .{}); + break :blk try cwd.createFile(io, dest_path, .{}); }, else => |e| return e, }; @@ -1846,7 +1846,7 @@ pub fn runAllowFail( }; errdefer b.allocator.free(stdout); - const term = try child.wait(); + const term = try child.wait(io); switch (term) { .Exited => |code| { if (code != 0) { @@ -2193,7 +2193,7 @@ fn dependencyInner( const build_root: std.Build.Cache.Directory = .{ .path = build_root_string, - .handle = fs.cwd().openDir(build_root_string, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(build_root_string, .{}) catch |err| { std.debug.print("unable to open '{s}': {s}\n", .{ build_root_string, @errorName(err), }); diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 42459c033d..fdcb2ab714 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -508,7 +508,7 @@ pub const Manifest = struct { // and `want_shared_lock` is set, a shared lock might be sufficient, so we'll // open with a shared lock instead. while (true) { - if (self.cache.manifest_dir.createFile(&manifest_file_path, .{ + if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{ .read = true, .truncate = false, .lock = .exclusive, @@ -543,7 +543,7 @@ pub const Manifest = struct { return error.CacheCheckFailed; } - if (self.cache.manifest_dir.createFile(&manifest_file_path, .{ + if (self.cache.manifest_dir.createFile(io, &manifest_file_path, .{ .read = true, .truncate = false, .lock = .exclusive, @@ -873,7 +873,7 @@ pub const Manifest = struct { if (man.want_refresh_timestamp) { man.want_refresh_timestamp = false; - var file = man.cache.manifest_dir.createFile("timestamp", .{ + var file = man.cache.manifest_dir.createFile(io, "timestamp", .{ .read = true, .truncate = true, }) catch |err| switch (err) { @@ -1324,7 +1324,7 @@ fn hashFile(file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.PReadErro fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { const test_out_file = "test-filetimestamp.tmp"; - var file = try dir.createFile(test_out_file, .{ + var file = try dir.createFile(io, test_out_file, .{ .read = true, .truncate = true, }); diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 56ef32e8d8..2ec1c0ef31 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -401,6 +401,9 @@ pub fn evalZigProcess( web_server: ?*Build.WebServer, gpa: Allocator, ) !?Path { + const b = s.owner; + const io = b.graph.io; + // If an error occurs, it's happened in this command: assert(s.result_failed_command == null); s.result_failed_command = try allocPrintCmd(gpa, null, argv); @@ -411,7 +414,7 @@ pub fn evalZigProcess( const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) { error.BrokenPipe => { // Process restart required. - const term = zp.child.wait() catch |e| { + const term = zp.child.wait(io) catch |e| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], e }); }; _ = term; @@ -427,7 +430,7 @@ pub fn evalZigProcess( if (s.result_error_msgs.items.len > 0 and result == null) { // Crash detected. - const term = zp.child.wait() catch |e| { + const term = zp.child.wait(io) catch |e| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], e }); }; s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0; @@ -439,9 +442,7 @@ pub fn evalZigProcess( return result; } assert(argv.len != 0); - const b = s.owner; const arena = b.allocator; - const io = b.graph.io; try handleChildProcUnsupported(s); try handleVerbose(s.owner, null, argv); @@ -478,7 +479,7 @@ pub fn evalZigProcess( zp.child.stdin.?.close(io); zp.child.stdin = null; - const term = zp.child.wait() catch |err| { + const term = zp.child.wait(io) catch |err| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], err }); }; s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0; @@ -519,7 +520,7 @@ pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u pub fn installDir(s: *Step, dest_path: []const u8) !Io.Dir.MakePathStatus { const b = s.owner; try handleVerbose(b, null, &.{ "install", "-d", dest_path }); - return std.fs.cwd().makePathStatus(dest_path) catch |err| + return Io.Dir.cwd().makePathStatus(dest_path) catch |err| return s.fail("unable to create dir '{s}': {t}", .{ dest_path, err }); } @@ -895,7 +896,7 @@ pub fn addWatchInput(step: *Step, lazy_file: Build.LazyPath) Allocator.Error!voi try addWatchInputFromPath(step, .{ .root_dir = .{ .path = null, - .handle = std.fs.cwd(), + .handle = Io.Dir.cwd(), }, .sub_path = std.fs.path.dirname(path_string) orelse "", }, std.fs.path.basename(path_string)); @@ -920,7 +921,7 @@ pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Alloc try addDirectoryWatchInputFromPath(step, .{ .root_dir = .{ .path = null, - .handle = std.fs.cwd(), + .handle = Io.Dir.cwd(), }, .sub_path = path_string, }); diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index efeedc8b80..560b6ad050 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -3,7 +3,9 @@ //! TODO: generalize the code in std.testing.expectEqualStrings and make this //! CheckFile step produce those helpful diagnostics when there is not a match. const CheckFile = @This(); + const std = @import("std"); +const Io = std.Io; const Step = std.Build.Step; const fs = std.fs; const mem = std.mem; @@ -53,7 +55,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try step.singleUnchangingWatchInput(check_file.source); const src_path = check_file.source.getPath2(b, step); - const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index df2419764d..ea7d9d99ff 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -1,5 +1,7 @@ -const std = @import("std"); const ConfigHeader = @This(); + +const std = @import("std"); +const Io = std.Io; const Step = std.Build.Step; const Allocator = std.mem.Allocator; const Writer = std.Io.Writer; @@ -205,7 +207,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .autoconf_undef, .autoconf_at => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read autoconf input file '{s}': {s}", .{ src_path, @errorName(err), }); @@ -219,7 +221,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cmake => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read cmake input file '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 9f5665e93a..1416e0e916 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -1,12 +1,13 @@ -const std = @import("std"); +const Options = @This(); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const fs = std.fs; const Step = std.Build.Step; const GeneratedFile = std.Build.GeneratedFile; const LazyPath = std.Build.LazyPath; -const Options = @This(); - pub const base_id: Step.Id = .options; step: Step, @@ -542,11 +543,11 @@ test Options { .cache = .{ .io = io, .gpa = arena.allocator(), - .manifest_dir = std.fs.cwd(), + .manifest_dir = Io.Dir.cwd(), }, .zig_exe = "test", .env_map = std.process.EnvMap.init(arena.allocator()), - .global_cache_root = .{ .path = "test", .handle = std.fs.cwd() }, + .global_cache_root = .{ .path = "test", .handle = Io.Dir.cwd() }, .host = .{ .query = .{}, .result = try std.zig.system.resolveTargetQuery(io, .{}), @@ -557,8 +558,8 @@ test Options { var builder = try std.Build.create( &graph, - .{ .path = "test", .handle = std.fs.cwd() }, - .{ .path = "test", .handle = std.fs.cwd() }, + .{ .path = "test", .handle = Io.Dir.cwd() }, + .{ .path = "test", .handle = Io.Dir.cwd() }, &.{}, ); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 7c54c8048e..af6bc20438 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1023,7 +1023,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, options, null); - const dep_file_dir = std.fs.cwd(); + const dep_file_dir = Io.Dir.cwd(); const dep_file_basename = dep_output_file.generated_file.getPath2(b, step); if (has_side_effects) try man.addDepFile(dep_file_dir, dep_file_basename) diff --git a/lib/std/Build/Watch.zig b/lib/std/Build/Watch.zig index ff06ad3ff3..f7ac47961e 100644 --- a/lib/std/Build/Watch.zig +++ b/lib/std/Build/Watch.zig @@ -122,7 +122,7 @@ const Os = switch (builtin.os.tag) { }) catch return error.NameTooLong; const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer); stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle); - try posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID); + try posix.name_to_handle_at(path.root_dir.handle.handle, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID); const stack_lfh: FileHandle = .{ .handle = stack_ptr }; return stack_lfh.clone(gpa); } @@ -222,7 +222,7 @@ const Os = switch (builtin.os.tag) { posix.fanotify_mark(fan_fd, .{ .ADD = true, .ONLYDIR = true, - }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { + }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| { fatal("unable to watch {f}: {s}", .{ path, @errorName(err) }); }; } @@ -275,7 +275,7 @@ const Os = switch (builtin.os.tag) { posix.fanotify_mark(fan_fd, .{ .REMOVE = true, .ONLYDIR = true, - }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { + }, fan_mask, path.root_dir.handle.handle, path.subPathOrDot()) catch |err| switch (err) { error.FileNotFound => {}, // Expected, harmless. else => |e| std.log.warn("unable to unwatch '{f}': {s}", .{ path, @errorName(e) }), }; @@ -353,7 +353,7 @@ const Os = switch (builtin.os.tag) { // The following code is a drawn out NtCreateFile call. (mostly adapted from Io.Dir.makeOpenDirAccessMaskW) // It's necessary in order to get the specific flags that are required when calling ReadDirectoryChangesW. var dir_handle: windows.HANDLE = undefined; - const root_fd = path.root_dir.handle.fd; + const root_fd = path.root_dir.handle.handle; const sub_path = path.subPathOrDot(); const sub_path_w = try windows.sliceToPrefixedFileW(root_fd, sub_path); const path_len_bytes = std.math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong; @@ -681,9 +681,9 @@ const Os = switch (builtin.os.tag) { if (!gop.found_existing) { const skip_open_dir = path.sub_path.len == 0; const dir_fd = if (skip_open_dir) - path.root_dir.handle.fd + path.root_dir.handle.handle else - posix.openat(path.root_dir.handle.fd, path.sub_path, dir_open_flags, 0) catch |err| { + posix.openat(path.root_dir.handle.handle, path.sub_path, dir_open_flags, 0) catch |err| { fatal("failed to open directory {f}: {s}", .{ path, @errorName(err) }); }; // Empirically the dir has to stay open or else no events are triggered. @@ -750,7 +750,7 @@ const Os = switch (builtin.os.tag) { // to access that data via the dir_fd field. const path = w.dir_table.keys()[i]; const dir_fd = if (path.sub_path.len == 0) - path.root_dir.handle.fd + path.root_dir.handle.handle else handles.items(.dir_fd)[i]; assert(dir_fd != -1); @@ -761,7 +761,7 @@ const Os = switch (builtin.os.tag) { const last_dir_fd = fd: { const last_path = w.dir_table.keys()[handles.len - 1]; const last_dir_fd = if (last_path.sub_path.len == 0) - last_path.root_dir.handle.fd + last_path.root_dir.handle.handle else handles.items(.dir_fd)[handles.len - 1]; assert(last_dir_fd != -1); diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 8e71f648e2..a9b4775772 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -527,6 +527,14 @@ pub fn writerStreaming(file: File, io: Io, buffer: []u8) Writer { return .initStreaming(file, io, buffer); } +/// Equivalent to creating a streaming writer, writing `bytes`, and then flushing. +pub fn writeStreamingAll(file: File, io: Io, bytes: []const u8) Writer.Error!void { + var index: usize = 0; + while (index < bytes.len) { + index += try io.vtable.fileWriteStreaming(io.userdata, file, &.{}, &.{bytes[index..]}, 1); + } +} + pub const LockError = error{ SystemResources, FileLocksUnsupported, diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index fb76002201..124f886515 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2361,7 +2361,7 @@ fn dirCreateFilePosix( .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when accessing procfs. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -2670,7 +2670,7 @@ fn dirOpenFilePosix( .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when opening procfs files. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -3287,7 +3287,7 @@ fn dirRealPathPosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, out_b .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + .SRCH => return error.FileNotFound, // Linux when accessing procfs. .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -5548,7 +5548,6 @@ fn fileReadStreamingPosix(userdata: ?*anyopaque, file: File, data: [][]u8) File. switch (e) { .INVAL => |err| return errnoBug(err), .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => |err| { if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory. @@ -5672,7 +5671,6 @@ fn fileReadPositionalPosix(userdata: ?*anyopaque, file: File, data: [][]u8, offs switch (e) { .INVAL => |err| return errnoBug(err), .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => |err| { if (native_os == .wasi) return error.NotOpenForReading; // File operation on directory. @@ -6312,7 +6310,6 @@ fn fileWriteStreaming( switch (e) { .INVAL => return error.InvalidArgument, .FAULT => |err| return errnoBug(err), - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => |err| return errnoBug(err), // `connect` was never called. diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig index 3f25bc6a26..63ae6d93a0 100644 --- a/lib/std/Io/Writer.zig +++ b/lib/std/Io/Writer.zig @@ -2835,7 +2835,7 @@ test "discarding sendFile" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [256]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); @@ -2857,7 +2857,7 @@ test "allocating sendFile" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); @@ -2881,7 +2881,7 @@ test sendFileReading { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); var r_buffer: [2]u8 = undefined; var file_writer: File.Writer = .init(file, &r_buffer); diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig index 5818f6c3f7..c9ed0d3284 100644 --- a/lib/std/Io/net/test.zig +++ b/lib/std/Io/net/test.zig @@ -278,7 +278,7 @@ test "listen on a unix socket, send bytes, receive bytes" { defer testing.allocator.free(socket_path); const socket_addr = try net.UnixAddress.init(socket_path); - defer std.fs.cwd().deleteFile(socket_path) catch {}; + defer Io.Dir.cwd().deleteFile(socket_path) catch {}; var server = try socket_addr.listen(io, .{}); defer server.socket.close(io); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 9f21fe50e7..e731dc18d7 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -27,7 +27,7 @@ test "write a file, read it, then delete it" { random.bytes(data[0..]); const tmp_file_name = "temp_test_file.txt"; { - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var file_writer = file.writer(&.{}); @@ -40,7 +40,7 @@ test "write a file, read it, then delete it" { { // Make sure the exclusive flag is honored. - try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(tmp_file_name, .{ .exclusive = true })); + try expectError(File.OpenError.PathAlreadyExists, tmp.dir.createFile(io, tmp_file_name, .{ .exclusive = true })); } { @@ -70,7 +70,7 @@ test "File seek ops" { const io = testing.io; const tmp_file_name = "temp_test_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); try file.writeAll(&([_]u8{0x55} ** 8192)); @@ -96,7 +96,7 @@ test "setEndPos" { defer tmp.cleanup(); const tmp_file_name = "temp_test_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); // Verify that the file size changes and the file offset is not moved @@ -121,7 +121,7 @@ test "updateTimes" { defer tmp.cleanup(); const tmp_file_name = "just_a_temporary_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{ .read = true }); + var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true }); defer file.close(io); const stat_old = try file.stat(); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 102bb59415..8453bc4c81 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -208,7 +208,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); - const file = try std.fs.cwd().openFile(io, path, .{ .mode = .write_only }); + const file = try Io.Dir.cwd().openFile(io, path, .{ .mode = .write_only }); defer file.close(io); try file.writeAll(name); @@ -325,7 +325,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co var threaded: std.Io.Threaded = .init_single_threaded; const io = threaded.ioBasic(); - const file = try std.fs.cwd().openFile(io, path, .{}); + const file = try Io.Dir.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.readerStreaming(io, &.{}); diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index 473505ac51..444d8da675 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -19,7 +19,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanM _ = io; // TODO migrate file system to use std.Io for (keychain_paths) |keychain_path| { - const bytes = std.fs.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { + const bytes = Io.Dir.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { error.StreamTooLong => return error.FileTooBig, else => |e| return e, }; diff --git a/lib/std/crypto/codecs/asn1/test.zig b/lib/std/crypto/codecs/asn1/test.zig index ff854fcbde..3dbedb9f80 100644 --- a/lib/std/crypto/codecs/asn1/test.zig +++ b/lib/std/crypto/codecs/asn1/test.zig @@ -73,8 +73,8 @@ test AllTypes { try std.testing.expectEqualSlices(u8, encoded, buf); // Use this to update test file. - // const dir = try std.fs.cwd().openDir("lib/std/crypto/asn1", .{}); - // var file = try dir.createFile(path, .{}); + // const dir = try Io.Dir.cwd().openDir("lib/std/crypto/asn1", .{}); + // var file = try dir.createFile(io, path, .{}); // defer file.close(io); // try file.writeAll(buf); } diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 97741ecb40..5df0eef2d5 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -60,7 +60,7 @@ pub const cpu_context = @import("debug/cpu_context.zig"); /// }; /// /// Only required if `can_unwind == true`. Unwinds a single stack frame, returning the frame's /// /// return address, or 0 if the end of the stack has been reached. -/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) SelfInfoError!usize; +/// pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) SelfInfoError!usize; /// ``` pub const SelfInfo = if (@hasDecl(root, "debug") and @hasDecl(root.debug, "SelfInfo")) root.debug.SelfInfo @@ -558,9 +558,9 @@ pub fn defaultPanic( stderr.print("{s}\n", .{msg}) catch break :trace; if (@errorReturnTrace()) |t| if (t.index > 0) { - stderr.writeAll("error return context:\n") catch break :trace; + stderr.writeStreamingAll("error return context:\n") catch break :trace; writeStackTrace(t, stderr, tty_config) catch break :trace; - stderr.writeAll("\nstack trace:\n") catch break :trace; + stderr.writeStreamingAll("\nstack trace:\n") catch break :trace; }; writeCurrentStackTrace(.{ .first_address = first_trace_addr orelse @returnAddress(), @@ -575,7 +575,7 @@ pub fn defaultPanic( // A panic happened while trying to print a previous panic message. // We're still holding the mutex but that's fine as we're going to // call abort(). - File.stderr().writeAll("aborting due to recursive panic\n") catch {}; + File.stderr().writeStreamingAll("aborting due to recursive panic\n") catch {}; }, else => {}, // Panicked while printing the recursive panic message. } @@ -960,7 +960,7 @@ const StackIterator = union(enum) { }, }; - fn next(it: *StackIterator) Result { + fn next(it: *StackIterator, io: Io) Result { switch (it.*) { .ctx_first => |context_ptr| { // After the first frame, start actually unwinding. @@ -976,7 +976,7 @@ const StackIterator = union(enum) { .di => |*unwind_context| { const di = getSelfDebugInfo() catch unreachable; const di_gpa = getDebugInfoAllocator(); - const ret_addr = di.unwindFrame(di_gpa, unwind_context) catch |err| { + const ret_addr = di.unwindFrame(di_gpa, io, unwind_context) catch |err| { const pc = unwind_context.pc; const fp = unwind_context.getFp(); it.* = .{ .fp = fp }; @@ -1297,7 +1297,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("line_overlaps_page_boundary.zig", .{}); + const file = try test_dir.dir.createFile(io, "line_overlaps_page_boundary.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "line_overlaps_page_boundary.zig" }); defer gpa.free(path); @@ -1316,7 +1316,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("file_ends_on_page_boundary.zig", .{}); + const file = try test_dir.dir.createFile(io, "file_ends_on_page_boundary.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "file_ends_on_page_boundary.zig" }); defer gpa.free(path); @@ -1330,7 +1330,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("very_long_first_line_spanning_multiple_pages.zig", .{}); + const file = try test_dir.dir.createFile(io, "very_long_first_line_spanning_multiple_pages.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" }); defer gpa.free(path); @@ -1356,7 +1356,7 @@ test printLineFromFile { aw.clearRetainingCapacity(); } { - const file = try test_dir.dir.createFile("file_of_newlines.zig", .{}); + const file = try test_dir.dir.createFile(io, "file_of_newlines.zig", .{}); defer file.close(io); const path = try fs.path.join(gpa, &.{ test_dir_path, "file_of_newlines.zig" }); defer gpa.free(path); diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig index a0f1188ade..203ee8effb 100644 --- a/lib/std/debug/ElfFile.zig +++ b/lib/std/debug/ElfFile.zig @@ -375,7 +375,7 @@ fn loadSeparateDebugFile( args: anytype, ) Allocator.Error!?[]align(std.heap.page_size_min) const u8 { const path = try std.fmt.allocPrint(arena, fmt, args); - const elf_file = std.fs.cwd().openFile(io, path, .{}) catch return null; + const elf_file = Io.Dir.cwd().openFile(io, path, .{}) catch return null; defer elf_file.close(io); const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) { diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig index ae904c0aec..18126a1c29 100644 --- a/lib/std/debug/MachOFile.zig +++ b/lib/std/debug/MachOFile.zig @@ -512,7 +512,7 @@ fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { + const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 213389bf04..6ed18bcb80 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -29,13 +29,12 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void { } pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol { - _ = io; const module = try si.findModule(gpa, address, .exclusive); defer si.rwlock.unlock(); const vaddr = address - module.load_offset; - const loaded_elf = try module.getLoadedElf(gpa); + const loaded_elf = try module.getLoadedElf(gpa, io); if (loaded_elf.file.dwarf) |*dwarf| { if (!loaded_elf.scanned_dwarf) { dwarf.open(gpa, native_endian) catch |err| switch (err) { @@ -180,7 +179,7 @@ comptime { } } pub const UnwindContext = Dwarf.SelfUnwinder; -pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize { +pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, io: Io, context: *UnwindContext) Error!usize { comptime assert(can_unwind); { @@ -201,7 +200,7 @@ pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error @memset(si.unwind_cache.?, .empty); } - const unwind_sections = try module.getUnwindSections(gpa); + const unwind_sections = try module.getUnwindSections(gpa, io); for (unwind_sections) |*unwind| { if (context.computeRules(gpa, unwind, module.load_offset, null)) |entry| { entry.populate(si.unwind_cache.?); @@ -261,12 +260,12 @@ const Module = struct { }; /// Assumes we already hold an exclusive lock. - fn getUnwindSections(mod: *Module, gpa: Allocator) Error![]Dwarf.Unwind { - if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa); + fn getUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error![]Dwarf.Unwind { + if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa, io); const us = &(mod.unwind.? catch |err| return err); return us.buf[0..us.len]; } - fn loadUnwindSections(mod: *Module, gpa: Allocator) Error!UnwindSections { + fn loadUnwindSections(mod: *Module, gpa: Allocator, io: Io) Error!UnwindSections { var us: UnwindSections = .{ .buf = undefined, .len = 0, @@ -284,7 +283,7 @@ const Module = struct { } else { // There is no `.eh_frame_hdr` section. There may still be an `.eh_frame` or `.debug_frame` // section, but we'll have to load the binary to get at it. - const loaded = try mod.getLoadedElf(gpa); + const loaded = try mod.getLoadedElf(gpa, io); // If both are present, we can't just pick one -- the info could be split between them. // `.debug_frame` is likely to be the more complete section, so we'll prioritize that one. if (loaded.file.debug_frame) |*debug_frame| { @@ -325,7 +324,7 @@ const Module = struct { } fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf { const load_result = if (mod.name.len > 0) res: { - var file = std.fs.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; + var file = Io.Dir.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name)); } else res: { @@ -334,7 +333,7 @@ const Module = struct { else => return error.ReadFailed, }; defer gpa.free(path); - var file = std.fs.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; + var file = Io.Dir.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo; defer file.close(io); break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path)); }; diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 15da616f3b..db8e5334e6 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -616,7 +616,7 @@ test { /// Uses `mmap` to map the file at `path` into memory. fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 { - const file = std.fs.cwd().openFile(io, path, .{}) catch |err| switch (err) { + const file = Io.Dir.cwd().openFile(io, path, .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return error.ReadFailed, }; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index f0ac30cca2..9874efd497 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -432,7 +432,7 @@ const Module = struct { break :pdb null; }; const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: { - break :res std.fs.cwd().openFile(io, path, .{}); + break :res Io.Dir.cwd().openFile(io, path, .{}); } else res: { const self_dir = std.process.executableDirPathAlloc(io, gpa) catch |err| switch (err) { error.OutOfMemory, error.Unexpected => |e| return e, @@ -441,7 +441,7 @@ const Module = struct { defer gpa.free(self_dir); const abs_path = try fs.path.join(gpa, &.{ self_dir, path }); defer gpa.free(abs_path); - break :res std.fs.cwd().openFile(io, abs_path, .{}); + break :res Io.Dir.cwd().openFile(io, abs_path, .{}); }; const pdb_file = pdb_file_open_result catch |err| switch (err) { error.FileNotFound, error.IsDir => break :pdb null, diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 7db177ad70..a1801d00d0 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -160,7 +160,7 @@ pub const ElfDynLib = struct { fn openPath(path: []const u8, io: Io) !Io.Dir { if (path.len == 0) return error.NotDir; var parts = std.mem.tokenizeScalar(u8, path, '/'); - var parent = if (path[0] == '/') try std.fs.cwd().openDir("/", .{}) else std.fs.cwd(); + var parent = if (path[0] == '/') try Io.Dir.cwd().openDir("/", .{}) else Io.Dir.cwd(); while (parts.next()) |part| { const child = try parent.openDir(part, .{}); parent.close(io); @@ -174,7 +174,7 @@ pub const ElfDynLib = struct { while (paths.next()) |p| { var dir = openPath(p) catch continue; defer dir.close(io); - const fd = posix.openat(dir.fd, file_name, .{ + const fd = posix.openat(dir.handle, file_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true, }, 0) catch continue; @@ -184,9 +184,9 @@ pub const ElfDynLib = struct { } fn resolveFromParent(io: Io, dir_path: []const u8, file_name: []const u8) ?posix.fd_t { - var dir = std.fs.cwd().openDir(dir_path, .{}) catch return null; + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch return null; defer dir.close(io); - return posix.openat(dir.fd, file_name, .{ + return posix.openat(dir.handle, file_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true, }, 0) catch null; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index f4bdecf89d..aab86d40a6 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -46,7 +46,7 @@ const PathType = enum { // The final path may not actually exist which would cause realpath to fail. // So instead, we get the path of the dir and join it with the relative path. var fd_path_buf: [fs.max_path_bytes]u8 = undefined; - const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf); + const dir_path = try std.os.getFdPath(dir.handle, &fd_path_buf); return fs.path.joinZ(allocator, &.{ dir_path, relative_path }); } }.transform, @@ -55,7 +55,7 @@ const PathType = enum { // Any drive absolute path (C:\foo) can be converted into a UNC path by // using '127.0.0.1' as the server name and '$' as the share name. var fd_path_buf: [fs.max_path_bytes]u8 = undefined; - const dir_path = try std.os.getFdPath(dir.fd, &fd_path_buf); + const dir_path = try std.os.getFdPath(dir.handle, &fd_path_buf); const windows_path_type = windows.getWin32PathType(u8, dir_path); switch (windows_path_type) { .unc_absolute => return fs.path.joinZ(allocator, &.{ dir_path, relative_path }), @@ -256,7 +256,7 @@ fn testReadLinkW(allocator: mem.Allocator, dir: Dir, target_path: []const u8, sy const target_path_w = try std.unicode.wtf8ToWtf16LeAlloc(allocator, target_path); defer allocator.free(target_path_w); // Calling the W functions directly requires the path to be NT-prefixed - const symlink_path_w = try std.os.windows.sliceToPrefixedFileW(dir.fd, symlink_path); + const symlink_path_w = try std.os.windows.sliceToPrefixedFileW(dir.handle, symlink_path); const wtf16_buffer = try allocator.alloc(u16, target_path_w.len); defer allocator.free(wtf16_buffer); const actual = try dir.readLinkW(symlink_path_w.span(), wtf16_buffer); @@ -288,9 +288,11 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { var symlink: Dir = switch (builtin.target.os.tag) { .windows => windows_symlink: { - const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.fd, "symlink"); + const sub_path_w = try windows.cStrToPrefixedFileW(ctx.dir.handle, "symlink"); - var handle: windows.HANDLE = undefined; + var result: Dir = .{ + .handle = undefined, + }; const path_len_bytes = @as(u16, @intCast(sub_path_w.span().len * 2)); var nt_name = windows.UNICODE_STRING{ @@ -300,26 +302,16 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { }; var attr: windows.OBJECT_ATTRIBUTES = .{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), - .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.fd, - .Attributes = .{}, + .RootDirectory = if (fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else ctx.dir.handle, + .Attributes = 0, .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var io_status_block: windows.IO_STATUS_BLOCK = undefined; const rc = windows.ntdll.NtCreateFile( - &handle, - .{ - .SPECIFIC = .{ .FILE_DIRECTORY = .{ - .READ_EA = true, - .TRAVERSE = true, - .READ_ATTRIBUTES = true, - } }, - .STANDARD = .{ - .RIGHTS = .READ, - .SYNCHRONIZE = true, - }, - }, + &result.handle, + windows.STANDARD_RIGHTS_READ | windows.FILE_READ_ATTRIBUTES | windows.FILE_READ_EA | windows.SYNCHRONIZE | windows.FILE_TRAVERSE, &attr, &io_status_block, null, @@ -337,7 +329,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { ); switch (rc) { - .SUCCESS => break :windows_symlink .{ .fd = handle }, + .SUCCESS => break :windows_symlink .{ .fd = result.handle }, else => return windows.unexpectedStatus(rc), } }, @@ -351,8 +343,8 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { .ACCMODE = .RDONLY, .CLOEXEC = true, }; - const fd = try posix.openatZ(ctx.dir.fd, &sub_path_c, flags, 0); - break :linux_symlink Dir{ .fd = fd }; + const fd = try posix.openatZ(ctx.dir.handle, &sub_path_c, flags, 0); + break :linux_symlink .{ .handle = fd }; }, else => unreachable, }; @@ -456,7 +448,7 @@ test "openDirAbsolute" { test "openDir cwd parent '..'" { const io = testing.io; - var dir = fs.cwd().openDir("..", .{}) catch |err| { + var dir = Io.Dir.cwd().openDir("..", .{}) catch |err| { if (native_os == .wasi and err == error.PermissionDenied) { return; // This is okay. WASI disallows escaping from the fs sandbox } @@ -534,7 +526,7 @@ test "Dir.Iterator" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -570,7 +562,7 @@ test "Dir.Iterator many entries" { var buf: [4]u8 = undefined; // Enough to store "1024". while (i < num) : (i += 1) { const name = try std.fmt.bufPrint(&buf, "{}", .{i}); - const file = try tmp_dir.dir.createFile(name, .{}); + const file = try tmp_dir.dir.createFile(io, name, .{}); file.close(io); } @@ -603,7 +595,7 @@ test "Dir.Iterator twice" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -638,7 +630,7 @@ test "Dir.Iterator reset" { defer tmp_dir.cleanup(); // First, create a couple of entries to iterate over. - const file = try tmp_dir.dir.createFile("some_file", .{}); + const file = try tmp_dir.dir.createFile(io, "some_file", .{}); file.close(io); try tmp_dir.dir.makeDir("some_dir"); @@ -769,7 +761,7 @@ test "readFileAlloc" { var tmp_dir = tmpDir(.{}); defer tmp_dir.cleanup(); - var file = try tmp_dir.dir.createFile("test_file", .{ .read = true }); + var file = try tmp_dir.dir.createFile(io, "test_file", .{ .read = true }); defer file.close(io); const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024)); @@ -843,7 +835,7 @@ test "directory operations on files" { const test_file_name = try ctx.transformPath("test_file"); - var file = try ctx.dir.createFile(test_file_name, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try testing.expectError(error.PathAlreadyExists, ctx.dir.makeDir(test_file_name)); @@ -876,7 +868,7 @@ test "file operations on directories" { try ctx.dir.makeDir(test_dir_name); - try testing.expectError(error.IsDir, ctx.dir.createFile(test_dir_name, .{})); + try testing.expectError(error.IsDir, ctx.dir.createFile(io, test_dir_name, .{})); try testing.expectError(error.IsDir, ctx.dir.deleteFile(test_dir_name)); switch (native_os) { .dragonfly, .netbsd => { @@ -969,7 +961,7 @@ test "Dir.rename files" { // Renaming files const test_file_name = try ctx.transformPath("test_file"); const renamed_test_file_name = try ctx.transformPath("test_file_renamed"); - var file = try ctx.dir.createFile(test_file_name, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try ctx.dir.rename(test_file_name, renamed_test_file_name); @@ -983,7 +975,7 @@ test "Dir.rename files" { // Rename to existing file succeeds const existing_file_path = try ctx.transformPath("existing_file"); - var existing_file = try ctx.dir.createFile(existing_file_path, .{ .read = true }); + var existing_file = try ctx.dir.createFile(io, existing_file_path, .{ .read = true }); existing_file.close(io); try ctx.dir.rename(renamed_test_file_name, existing_file_path); @@ -1017,7 +1009,7 @@ test "Dir.rename directories" { var dir = try ctx.dir.openDir(test_dir_renamed_path, .{}); // Put a file in the directory - var file = try dir.createFile("test_file", .{ .read = true }); + var file = try dir.createFile(io, "test_file", .{ .read = true }); file.close(io); dir.close(io); @@ -1070,7 +1062,7 @@ test "Dir.rename directory onto non-empty dir" { try ctx.dir.makeDir(test_dir_path); var target_dir = try ctx.dir.makeOpenPath(target_dir_path, .{}); - var file = try target_dir.createFile("test_file", .{ .read = true }); + var file = try target_dir.createFile(io, "test_file", .{ .read = true }); file.close(io); target_dir.close(io); @@ -1094,7 +1086,7 @@ test "Dir.rename file <-> dir" { const test_file_path = try ctx.transformPath("test_file"); const test_dir_path = try ctx.transformPath("test_dir"); - var file = try ctx.dir.createFile(test_file_path, .{ .read = true }); + var file = try ctx.dir.createFile(io, test_file_path, .{ .read = true }); file.close(io); try ctx.dir.makeDir(test_dir_path); try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path)); @@ -1115,7 +1107,7 @@ test "rename" { // Renaming files const test_file_name = "test_file"; const renamed_test_file_name = "test_file_renamed"; - var file = try tmp_dir1.dir.createFile(test_file_name, .{ .read = true }); + var file = try tmp_dir1.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); @@ -1149,7 +1141,7 @@ test "renameAbsolute" { // Renaming files const test_file_name = "test_file"; const renamed_test_file_name = "test_file_renamed"; - var file = try tmp_dir.dir.createFile(test_file_name, .{ .read = true }); + var file = try tmp_dir.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); try fs.renameAbsolute( try fs.path.join(allocator, &.{ base_path, test_file_name }), @@ -1454,7 +1446,7 @@ test "writev, readv" { var write_vecs: [2][]const u8 = .{ line1, line2 }; var read_vecs: [2][]u8 = .{ &buf2, &buf1 }; - var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); var writer = src_file.writerStreaming(&.{}); @@ -1484,7 +1476,7 @@ test "pwritev, preadv" { var buf2: [line2.len]u8 = undefined; var read_vecs: [2][]u8 = .{ &buf2, &buf1 }; - var src_file = try tmp.dir.createFile("test.txt", .{ .read = true }); + var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); var writer = src_file.writer(&.{}); @@ -1584,14 +1576,14 @@ test "sendfile" { const line2 = "second line\n"; var vecs = [_][]const u8{ line1, line2 }; - var src_file = try dir.createFile("sendfile1.txt", .{ .read = true }); + var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); { var fw = src_file.writer(&.{}); try fw.interface.writeVecAll(&vecs); } - var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true }); + var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true }); defer dest_file.close(io); const header1 = "header1\n"; @@ -1627,12 +1619,12 @@ test "sendfile with buffered data" { var dir = try tmp.dir.openDir("os_test_tmp", .{}); defer dir.close(io); - var src_file = try dir.createFile("sendfile1.txt", .{ .read = true }); + var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); try src_file.writeAll("AAAABBBB"); - var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true }); + var dest_file = try dir.createFile(io, "sendfile2.txt", .{ .read = true }); defer dest_file.close(io); var src_buffer: [32]u8 = undefined; @@ -1718,10 +1710,10 @@ test "open file with exclusive nonblocking lock twice" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1735,10 +1727,10 @@ test "open file with shared and exclusive nonblocking lock" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1752,10 +1744,10 @@ test "open file with exclusive and shared nonblocking lock" { const io = ctx.io; const filename = try ctx.transformPath("file_nonblocking_lock_test.txt"); - const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true }); + const file1 = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive, .lock_nonblocking = true }); defer file1.close(io); - const file2 = ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true }); + const file2 = ctx.dir.createFile(io, filename, .{ .lock = .shared, .lock_nonblocking = true }); try testing.expectError(error.WouldBlock, file2); } }.impl); @@ -1769,13 +1761,13 @@ test "open file with exclusive lock twice, make sure second lock waits" { const io = ctx.io; const filename = try ctx.transformPath("file_lock_test.txt"); - const file = try ctx.dir.createFile(filename, .{ .lock = .exclusive }); + const file = try ctx.dir.createFile(io, filename, .{ .lock = .exclusive }); errdefer file.close(io); const S = struct { fn checkFn(dir: *Io.Dir, path: []const u8, started: *std.Thread.ResetEvent, locked: *std.Thread.ResetEvent) !void { started.set(); - const file1 = try dir.createFile(path, .{ .lock = .exclusive }); + const file1 = try dir.createFile(io, path, .{ .lock = .exclusive }); locked.set(); file1.close(io); @@ -1847,13 +1839,13 @@ test "read from locked file" { const filename = try ctx.transformPath("read_lock_file_test.txt"); { - const f = try ctx.dir.createFile(filename, .{ .read = true }); + const f = try ctx.dir.createFile(io, filename, .{ .read = true }); defer f.close(io); var buffer: [1]u8 = undefined; _ = try f.read(&buffer); } { - const f = try ctx.dir.createFile(filename, .{ + const f = try ctx.dir.createFile(io, filename, .{ .read = true, .lock = .exclusive, }); @@ -2037,7 +2029,7 @@ test "'.' and '..' in Io.Dir functions" { var created_subdir = try ctx.dir.openDir(subdir_path, .{}); created_subdir.close(io); - const created_file = try ctx.dir.createFile(file_path, .{}); + const created_file = try ctx.dir.createFile(io, file_path, .{}); created_file.close(io); try ctx.dir.access(file_path, .{}); @@ -2103,7 +2095,7 @@ test "chmod" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile("test_file", .{ .mode = 0o600 }); + const file = try tmp.dir.createFile(io, "test_file", .{ .mode = 0o600 }); defer file.close(io); try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat()).mode & 0o7777); @@ -2127,7 +2119,7 @@ test "chown" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile("test_file", .{}); + const file = try tmp.dir.createFile(io, "test_file", .{}); defer file.close(io); try file.chown(null, null); @@ -2228,7 +2220,7 @@ test "read file non vectored" { const contents = "hello, world!\n"; - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); { var file_writer: File.Writer = .init(file, &.{}); @@ -2260,7 +2252,7 @@ test "seek keeping partial buffer" { const contents = "0123456789"; - const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true }); + const file = try tmp_dir.dir.createFile(io, "input.txt", .{ .read = true }); defer file.close(io); { var file_writer: File.Writer = .init(file, &.{}); @@ -2321,7 +2313,7 @@ test "seekTo flushes buffered data" { const contents = "data"; - const file = try tmp.dir.createFile("seek.bin", .{ .read = true }); + const file = try tmp.dir.createFile(io, "seek.bin", .{ .read = true }); defer file.close(io); { var buf: [16]u8 = undefined; @@ -2350,7 +2342,7 @@ test "File.Writer sendfile with buffered contents" { try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); - const out = try tmp_dir.dir.createFile("b", .{}); + const out = try tmp_dir.dir.createFile(io, "b", .{}); defer out.close(io); var in_buf: [2]u8 = undefined; @@ -2397,7 +2389,7 @@ test "readlinkat" { // create a symbolic link if (native_os == .windows) { std.os.windows.CreateSymbolicLink( - tmp.dir.fd, + tmp.dir.handle, &[_]u16{ 'l', 'i', 'n', 'k' }, &[_:0]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' }, false, @@ -2407,7 +2399,7 @@ test "readlinkat" { else => return err, }; } else { - try posix.symlinkat("file.txt", tmp.dir.fd, "link"); + try posix.symlinkat("file.txt", tmp.dir.handle, "link"); } // read the link diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index c7d3f35d40..0972a302da 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -1991,7 +1991,7 @@ test "writev/fsync/readv" { defer tmp.cleanup(); const path = "test_io_uring_writev_fsync_readv"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2062,7 +2062,7 @@ test "write/read" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); const path = "test_io_uring_write_read"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2110,12 +2110,12 @@ test "splice/read" { var tmp = std.testing.tmpDir(.{}); const path_src = "test_io_uring_splice_src"; - const file_src = try tmp.dir.createFile(path_src, .{ .read = true, .truncate = true }); + const file_src = try tmp.dir.createFile(io, path_src, .{ .read = true, .truncate = true }); defer file_src.close(io); const fd_src = file_src.handle; const path_dst = "test_io_uring_splice_dst"; - const file_dst = try tmp.dir.createFile(path_dst, .{ .read = true, .truncate = true }); + const file_dst = try tmp.dir.createFile(io, path_dst, .{ .read = true, .truncate = true }); defer file_dst.close(io); const fd_dst = file_dst.handle; @@ -2185,7 +2185,7 @@ test "write_fixed/read_fixed" { defer tmp.cleanup(); const path = "test_io_uring_write_read_fixed"; - const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true }); + const file = try tmp.dir.createFile(io, path, .{ .read = true, .truncate = true }); defer file.close(io); const fd = file.handle; @@ -2306,7 +2306,7 @@ test "close" { defer tmp.cleanup(); const path = "test_io_uring_close"; - const file = try tmp.dir.createFile(path, .{}); + const file = try tmp.dir.createFile(io, path, .{}); errdefer file.close(io); const sqe_close = try ring.close(0x44444444, file.handle); @@ -2652,7 +2652,7 @@ test "fallocate" { defer tmp.cleanup(); const path = "test_io_uring_fallocate"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try testing.expectEqual(@as(u64, 0), (try file.stat()).size); @@ -2699,7 +2699,7 @@ test "statx" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); const path = "test_io_uring_statx"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try testing.expectEqual(@as(u64, 0), (try file.stat()).size); @@ -2969,7 +2969,7 @@ test "renameat" { // Write old file with data - const old_file = try tmp.dir.createFile(old_path, .{ .truncate = true, .mode = 0o666 }); + const old_file = try tmp.dir.createFile(io, old_path, .{ .truncate = true, .mode = 0o666 }); defer old_file.close(io); try old_file.writeAll("hello"); @@ -3028,7 +3028,7 @@ test "unlinkat" { // Write old file with data - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); // Submit unlinkat @@ -3125,7 +3125,7 @@ test "symlinkat" { const path = "test_io_uring_symlinkat"; const link_path = "test_io_uring_symlinkat_link"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); // Submit symlinkat @@ -3177,7 +3177,7 @@ test "linkat" { // Write file with data - const first_file = try tmp.dir.createFile(first_path, .{ .truncate = true, .mode = 0o666 }); + const first_file = try tmp.dir.createFile(io, first_path, .{ .truncate = true, .mode = 0o666 }); defer first_file.close(io); try first_file.writeAll("hello"); diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index 39606ddfac..d7cfb4e138 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -18,7 +18,7 @@ test "fallocate" { defer tmp.cleanup(); const path = "test_fallocate"; - const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 }); + const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); try expect((try file.stat()).size == 0); @@ -85,7 +85,7 @@ test "statx" { defer tmp.cleanup(); const tmp_file_name = "just_a_temporary_file.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var buf: linux.Statx = undefined; @@ -121,7 +121,7 @@ test "fadvise" { defer tmp.cleanup(); const tmp_file_name = "temp_posix_fadvise.txt"; - var file = try tmp.dir.createFile(tmp_file_name, .{}); + var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); var buf: [2048]u8 = undefined; diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index 6b5b678b20..ba5282256f 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -4639,8 +4639,8 @@ pub fn wToPrefixedFileW(dir: ?HANDLE, path: [:0]const u16) Wtf16ToPrefixedFileWE break :path_to_get path; } // We can also skip GetFinalPathNameByHandle if the handle matches - // the handle returned by fs.cwd() - if (dir.? == std.fs.cwd().fd) { + // the handle returned by Io.Dir.cwd() + if (dir.? == Io.Dir.cwd().fd) { break :path_to_get path; } // At this point, we know we have a relative path that had too many diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 392987ec50..f4aa970413 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -15,15 +15,16 @@ //! deal with the exception. const builtin = @import("builtin"); -const root = @import("root"); +const native_os = builtin.os.tag; + const std = @import("std.zig"); +const Io = std.Io; const mem = std.mem; const fs = std.fs; -const max_path_bytes = fs.max_path_bytes; +const max_path_bytes = std.fs.max_path_bytes; const maxInt = std.math.maxInt; const cast = std.math.cast; const assert = std.debug.assert; -const native_os = builtin.os.tag; const page_size_min = std.heap.page_size_min; test { @@ -797,7 +798,6 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize { .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .CANCELED => return error.Canceled, .BADF => return error.NotOpenForReading, // Can be a race condition. @@ -917,7 +917,6 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { .INTR => continue, .INVAL => return error.InvalidArgument, .FAULT => unreachable, - .SRCH => return error.ProcessNotFound, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. @@ -985,7 +984,8 @@ pub fn openZ(file_path: [*:0]const u8, flags: O, perm: mode_t) OpenError!fd_t { .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, - .SRCH => return error.ProcessNotFound, + // Can happen on Linux when opening procfs files. + .SRCH => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, @@ -1560,7 +1560,7 @@ pub fn mkdirZ(dir_path: [*:0]const u8, mode: mode_t) MakeDirError!void { pub fn mkdirW(dir_path_w: []const u16, mode: mode_t) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(dir_path_w, .{ - .dir = fs.cwd().fd, + .dir = Io.Dir.cwd().handle, .access_mask = .{ .STANDARD = .{ .SYNCHRONIZE = true }, .GENERIC = .{ .READ = true }, diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 0071a72a26..dc63be6e14 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -148,7 +148,7 @@ test "linkat with different directories" { try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "example" }); // Test 1: link from file in subdir back up to target in parent directory - try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0); + try posix.linkat(tmp.dir.handle, target_name, subdir.handle, link_name, 0); const efd = try tmp.dir.openFile(io, target_name, .{}); defer efd.close(io); @@ -164,7 +164,7 @@ test "linkat with different directories" { } // Test 2: remove link - try posix.unlinkat(subdir.fd, link_name, 0); + try posix.unlinkat(subdir.handle, link_name, 0); _, const elink = try getLinkInfo(efd.handle); try testing.expectEqual(@as(posix.nlink_t, 1), elink); } @@ -373,7 +373,7 @@ test "mmap" { // Create a file used for testing mmap() calls with a file descriptor { - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); var stream = file.writer(&.{}); @@ -444,7 +444,7 @@ test "fcntl" { const test_out_file = "os_tmp_test"; - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); // Note: The test assumes createFile opens the file with CLOEXEC @@ -495,7 +495,7 @@ test "fsync" { defer tmp.cleanup(); const test_out_file = "os_tmp_test"; - const file = try tmp.dir.createFile(test_out_file, .{}); + const file = try tmp.dir.createFile(io, test_out_file, .{}); defer file.close(io); try posix.fsync(file.handle); @@ -617,7 +617,7 @@ test "dup & dup2" { defer tmp.cleanup(); { - var file = try tmp.dir.createFile("os_dup_test", .{}); + var file = try tmp.dir.createFile(io, "os_dup_test", .{}); defer file.close(io); var duped = Io.File{ .handle = try posix.dup(file.handle) }; @@ -659,7 +659,7 @@ test "writev longer than IOV_MAX" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pwritev", .{}); + var file = try tmp.dir.createFile(io, "pwritev", .{}); defer file.close(io); const iovecs = [_]posix.iovec_const{.{ .base = "a", .len = 1 }} ** (posix.IOV_MAX + 1); @@ -684,7 +684,7 @@ test "POSIX file locking with fcntl" { defer tmp.cleanup(); // Create a temporary lock file - var file = try tmp.dir.createFile("lock", .{ .read = true }); + var file = try tmp.dir.createFile(io, "lock", .{ .read = true }); defer file.close(io); try file.setEndPos(2); const fd = file.handle; @@ -881,7 +881,7 @@ test "isatty" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("foo", .{}); + var file = try tmp.dir.createFile(io, "foo", .{}); defer file.close(io); try expectEqual(posix.isatty(file.handle), false); @@ -893,7 +893,7 @@ test "pread with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pread_empty", .{ .read = true }); + var file = try tmp.dir.createFile(io, "pread_empty", .{ .read = true }); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -909,7 +909,7 @@ test "write with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("write_empty", .{}); + var file = try tmp.dir.createFile(io, "write_empty", .{}); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -925,7 +925,7 @@ test "pwrite with empty buffer" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var file = try tmp.dir.createFile("pwrite_empty", .{}); + var file = try tmp.dir.createFile(io, "pwrite_empty", .{}); defer file.close(io); const bytes = try a.alloc(u8, 0); @@ -965,35 +965,35 @@ test "fchmodat smoke test" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - try expectError(error.FileNotFound, posix.fchmodat(tmp.dir.fd, "regfile", 0o666, 0)); + try expectError(error.FileNotFound, posix.fchmodat(tmp.dir.handle, "regfile", 0o666, 0)); const fd = try posix.openat( - tmp.dir.fd, + tmp.dir.handle, "regfile", .{ .ACCMODE = .WRONLY, .CREAT = true, .EXCL = true, .TRUNC = true }, 0o644, ); posix.close(fd); - try posix.symlinkat("regfile", tmp.dir.fd, "symlink"); - const sym_mode = try getFileMode(tmp.dir.fd, "symlink"); + try posix.symlinkat("regfile", tmp.dir.handle, "symlink"); + const sym_mode = try getFileMode(tmp.dir.handle, "symlink"); - try posix.fchmodat(tmp.dir.fd, "regfile", 0o640, 0); - try expectMode(tmp.dir.fd, "regfile", 0o640); - try posix.fchmodat(tmp.dir.fd, "regfile", 0o600, posix.AT.SYMLINK_NOFOLLOW); - try expectMode(tmp.dir.fd, "regfile", 0o600); + try posix.fchmodat(tmp.dir.handle, "regfile", 0o640, 0); + try expectMode(tmp.dir.handle, "regfile", 0o640); + try posix.fchmodat(tmp.dir.handle, "regfile", 0o600, posix.AT.SYMLINK_NOFOLLOW); + try expectMode(tmp.dir.handle, "regfile", 0o600); - try posix.fchmodat(tmp.dir.fd, "symlink", 0o640, 0); - try expectMode(tmp.dir.fd, "regfile", 0o640); - try expectMode(tmp.dir.fd, "symlink", sym_mode); + try posix.fchmodat(tmp.dir.handle, "symlink", 0o640, 0); + try expectMode(tmp.dir.handle, "regfile", 0o640); + try expectMode(tmp.dir.handle, "symlink", sym_mode); var test_link = true; - posix.fchmodat(tmp.dir.fd, "symlink", 0o600, posix.AT.SYMLINK_NOFOLLOW) catch |err| switch (err) { + posix.fchmodat(tmp.dir.handle, "symlink", 0o600, posix.AT.SYMLINK_NOFOLLOW) catch |err| switch (err) { error.OperationNotSupported => test_link = false, else => |e| return e, }; if (test_link) - try expectMode(tmp.dir.fd, "symlink", 0o600); - try expectMode(tmp.dir.fd, "regfile", 0o640); + try expectMode(tmp.dir.handle, "symlink", 0o600); + try expectMode(tmp.dir.handle, "regfile", 0o640); } const CommonOpenFlags = packed struct { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index b774303901..33faeef061 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -677,7 +677,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); if (self.cwd_dir) |cwd| { - posix.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); + posix.fchdir(cwd.handle) catch |err| forkChildErrReport(err_pipe[1], err); } else if (self.cwd) |cwd| { posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); } diff --git a/lib/std/std.zig b/lib/std/std.zig index 106811859b..1690c0575c 100644 --- a/lib/std/std.zig +++ b/lib/std/std.zig @@ -114,7 +114,7 @@ pub const options: Options = if (@hasDecl(root, "std_options")) root.std_options pub const Options = struct { enable_segfault_handler: bool = debug.default_enable_segfault_handler, - /// Function used to implement `std.fs.cwd` for WASI. + /// Function used to implement `std.Io.Dir.cwd` for WASI. wasiCwd: fn () os.wasi.fd_t = os.defaultWasiCwd, /// The current log level. diff --git a/lib/std/tar.zig b/lib/std/tar.zig index d861314fec..8a0bbb342f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -610,7 +610,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } }, .file => { - if (createDirAndFile(dir, file_name, fileMode(file.mode, options))) |fs_file| { + if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| { defer fs_file.close(io); var file_writer = fs_file.writer(&file_contents_buffer); try it.streamRemaining(file, &file_writer.interface); @@ -638,12 +638,12 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } } -fn createDirAndFile(dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { - const fs_file = dir.createFile(file_name, .{ .exclusive = true, .mode = mode }) catch |err| { +fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { + const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }) catch |err| { if (err == error.FileNotFound) { if (std.fs.path.dirname(file_name)) |dir_name| { try dir.makePath(dir_name); - return try dir.createFile(file_name, .{ .exclusive = true, .mode = mode }); + return try dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }); } } return err; @@ -880,9 +880,9 @@ test "create file and symlink" { var root = testing.tmpDir(.{}); defer root.cleanup(); - var file = try createDirAndFile(root.dir, "file1", default_mode); + var file = try createDirAndFile(io, root.dir, "file1", default_mode); file.close(io); - file = try createDirAndFile(root.dir, "a/b/c/file2", default_mode); + file = try createDirAndFile(io, root.dir, "a/b/c/file2", default_mode); file.close(io); createDirAndSymlink(root.dir, "a/b/c/file2", "symlink1") catch |err| { @@ -894,7 +894,7 @@ test "create file and symlink" { // Danglink symlnik, file created later try createDirAndSymlink(root.dir, "../../../g/h/i/file4", "j/k/l/symlink3"); - file = try createDirAndFile(root.dir, "g/h/i/file4", default_mode); + file = try createDirAndFile(io, root.dir, "g/h/i/file4", default_mode); file.close(io); } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 19038543a6..99d67ec132 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -628,7 +628,7 @@ pub fn tmpDir(opts: Io.Dir.OpenOptions) TmpDir { var sub_path: [TmpDir.sub_path_len]u8 = undefined; _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes); - const cwd = std.fs.cwd(); + const cwd = Io.Dir.cwd(); var cache_dir = cwd.makeOpenPath(".zig-cache", .{}) catch @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir"); defer cache_dir.close(io); diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig index c8bde2ab02..80317850df 100644 --- a/lib/std/zig/LibCInstallation.zig +++ b/lib/std/zig/LibCInstallation.zig @@ -57,7 +57,7 @@ pub fn parse( } } - const contents = try std.fs.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); + const contents = try Io.Dir.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); defer allocator.free(contents); var it = std.mem.tokenizeScalar(u8, contents, '\n'); @@ -337,7 +337,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F // search in reverse order const search_path_untrimmed = search_paths.items[search_paths.items.len - path_i - 1]; const search_path = std.mem.trimStart(u8, search_path_untrimmed, " "); - var search_dir = fs.cwd().openDir(search_path, .{}) catch |err| switch (err) { + var search_dir = Io.Dir.cwd().openDir(search_path, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -392,7 +392,7 @@ fn findNativeIncludeDirWindows( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -440,7 +440,7 @@ fn findNativeCrtDirWindows( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -508,7 +508,7 @@ fn findNativeKernel32LibDir( result_buf.shrinkAndFree(0); try result_buf.print("{s}\\Lib\\{s}\\um\\{s}", .{ install.path, install.version, arch_sub_dir }); - var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, @@ -544,7 +544,7 @@ fn findNativeMsvcIncludeDir( const dir_path = try fs.path.join(allocator, &[_][]const u8{ up2, "include" }); errdefer allocator.free(dir_path); - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| switch (err) { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.NoDevice, diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index 6b6e4fa9f7..dca474020a 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -828,7 +828,7 @@ const MsvcLibDir = struct { try lib_dir_buf.appendSlice("VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); var default_tools_version_buf: [512]u8 = undefined; - const default_tools_version_contents = std.fs.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch { + const default_tools_version_contents = Io.Dir.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch { return error.PathNotFound; }; var tokenizer = std.mem.tokenizeAny(u8, default_tools_version_contents, " \r\n"); diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index d3bafc16f2..9fa0546c3b 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -1,11 +1,12 @@ const builtin = @import("builtin"); +const native_endian = builtin.cpu.arch.endian(); + const std = @import("../std.zig"); const mem = std.mem; const elf = std.elf; const fs = std.fs; const assert = std.debug.assert; const Target = std.Target; -const native_endian = builtin.cpu.arch.endian(); const posix = std.posix; const Io = std.Io; @@ -69,7 +70,7 @@ pub fn getExternalExecutor( if (os_match and cpu_ok) native: { if (options.link_libc) { if (candidate.dynamic_linker.get()) |candidate_dl| { - fs.cwd().access(candidate_dl, .{}) catch { + Io.Dir.cwd().access(candidate_dl, .{}) catch { bad_result = .{ .bad_dl = candidate_dl }; break :native; }; @@ -710,6 +711,7 @@ fn abiAndDynamicLinkerFromFile( error.SystemResources, error.FileSystem, error.SymLinkLoop, + error.Canceled, error.Unexpected, => |e| return e, }; diff --git a/lib/std/zig/system/darwin/macos.zig b/lib/std/zig/system/darwin/macos.zig index 9bb4e34e3b..4ff6846a09 100644 --- a/lib/std/zig/system/darwin/macos.zig +++ b/lib/std/zig/system/darwin/macos.zig @@ -1,9 +1,10 @@ -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const mem = std.mem; const testing = std.testing; - const Target = std.Target; /// Detect macOS version. @@ -54,7 +55,7 @@ pub fn detect(target_os: *Target.Os) !void { // approx. 4 times historical file size var buf: [2048]u8 = undefined; - if (std.fs.cwd().readFile(path, &buf)) |bytes| { + if (Io.Dir.cwd().readFile(path, &buf)) |bytes| { if (parseSystemVersion(bytes)) |ver| { // never return non-canonical `10.(16+)` if (!(ver.major == 10 and ver.minor >= 16)) { diff --git a/lib/std/zip.zig b/lib/std/zip.zig index 0ca77c98a1..9d08847092 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -564,9 +564,9 @@ pub const Iterator = struct { defer parent_dir.close(io); const basename = std.fs.path.basename(filename); - break :blk try parent_dir.createFile(basename, .{ .exclusive = true }); + break :blk try parent_dir.createFile(io, basename, .{ .exclusive = true }); } - break :blk try dest.createFile(filename, .{ .exclusive = true }); + break :blk try dest.createFile(io, filename, .{ .exclusive = true }); }; defer out_file.close(io); var out_file_buffer: [1024]u8 = undefined; diff --git a/src/Compilation.zig b/src/Compilation.zig index 24b994f608..5f15ef5f74 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -450,7 +450,7 @@ pub const Path = struct { const dir = switch (p.root) { .none => { const cwd_sub_path = absToCwdRelative(p.sub_path, dirs.cwd); - return .{ fs.cwd(), cwd_sub_path }; + return .{ Io.Dir.cwd(), cwd_sub_path }; }, .zig_lib => dirs.zig_lib.handle, .global_cache => dirs.global_cache.handle, @@ -723,7 +723,7 @@ pub const Directories = struct { pub fn deinit(dirs: *Directories, io: Io) void { // The local and global caches could be the same. - const close_local = dirs.local_cache.handle.fd != dirs.global_cache.handle.fd; + const close_local = dirs.local_cache.handle.handle != dirs.global_cache.handle.handle; dirs.global_cache.handle.close(io); if (close_local) dirs.local_cache.handle.close(io); @@ -814,7 +814,7 @@ pub const Directories = struct { return .{ .path = if (std.mem.eql(u8, name, ".")) null else name, .handle = .{ - .fd = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}), + .handle = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}), }, }; } @@ -824,8 +824,8 @@ pub const Directories = struct { }; const nonempty_path = if (path.len == 0) "." else path; const handle_or_err = switch (thing) { - .@"zig lib" => fs.cwd().openDir(nonempty_path, .{}), - .@"global cache", .@"local cache" => fs.cwd().makeOpenPath(nonempty_path, .{}), + .@"zig lib" => Io.Dir.cwd().openDir(nonempty_path, .{}), + .@"global cache", .@"local cache" => Io.Dir.cwd().makeOpenPath(nonempty_path, .{}), }; return .{ .path = if (path.len == 0) null else path, @@ -1104,7 +1104,7 @@ pub const CObject = struct { const source_line = source_line: { if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0; - const file = fs.cwd().openFile(io, file_name, .{}) catch break :source_line 0; + const file = Io.Dir.cwd().openFile(io, file_name, .{}) catch break :source_line 0; defer file.close(io); var buffer: [1024]u8 = undefined; var file_reader = file.reader(io, &buffer); @@ -1179,7 +1179,7 @@ pub const CObject = struct { }; var buffer: [1024]u8 = undefined; - const file = try fs.cwd().openFile(io, path, .{}); + const file = try Io.Dir.cwd().openFile(io, path, .{}); defer file.close(io); var file_reader = file.reader(io, &buffer); var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface }); @@ -2109,7 +2109,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, }, }; // These correspond to std.zig.Server.Message.PathPrefix. - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(options.dirs.zig_lib); cache.addPrefix(options.dirs.local_cache); cache.addPrefix(options.dirs.global_cache); @@ -5220,7 +5220,7 @@ fn createDepFile( binfile: Cache.Path, ) anyerror!void { var buf: [4096]u8 = undefined; - var af = try std.fs.cwd().atomicFile(depfile, .{ .write_buffer = &buf }); + var af = try Io.Dir.cwd().atomicFile(depfile, .{ .write_buffer = &buf }); defer af.deinit(); comp.writeDepFile(binfile, &af.file_writer.interface) catch return af.file_writer.err.?; @@ -5284,7 +5284,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { }; } - var tar_file = out_dir.createFile("sources.tar", .{}) catch |err| { + var tar_file = out_dir.createFile(io, "sources.tar", .{}) catch |err| { return comp.lockAndSetMiscFailure( .docs_copy, "unable to create '{f}/sources.tar': {s}", diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 3bd05120ff..8a30529bc5 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -383,14 +383,14 @@ pub fn run(f: *Fetch) RunError!void { }, .remote => |remote| remote, .path_or_url => |path_or_url| { - if (fs.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| { + if (Io.Dir.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| { var resource: Resource = .{ .dir = dir }; return f.runResource(path_or_url, &resource, null); } else |dir_err| { var server_header_buffer: [init_resource_buffer_size]u8 = undefined; const file_err = if (dir_err == error.NotDir) e: { - if (fs.cwd().openFile(io, path_or_url, .{})) |file| { + if (Io.Dir.cwd().openFile(io, path_or_url, .{})) |file| { var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) }; return f.runResource(path_or_url, &resource, null); } else |err| break :e err; @@ -1303,7 +1303,7 @@ fn unzip( const random_integer = std.crypto.random.int(u64); zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer); - break cache_root.handle.createFile(&zip_path, .{ + break cache_root.handle.createFile(io, &zip_path, .{ .exclusive = true, .read = true, }) catch |err| switch (err) { @@ -1365,7 +1365,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U { var pack_dir = try out_dir.makeOpenPath(".git", .{}); defer pack_dir.close(io); - var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true }); + var pack_file = try pack_dir.createFile(io, "pkg.pack", .{ .read = true }); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = b: { @@ -1376,7 +1376,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U break :b pack_file_writer.moveToReader(io); }; - var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true }); + var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); @@ -2235,7 +2235,7 @@ test "set executable bit based on file content" { fn saveEmbedFile(io: Io, comptime tarball_name: []const u8, dir: Io.Dir) !void { //const tarball_name = "duplicate_paths_excluded.tar.gz"; const tarball_content = @embedFile("Fetch/testdata/" ++ tarball_name); - var tmp_file = try dir.createFile(tarball_name, .{}); + var tmp_file = try dir.createFile(io, tarball_name, .{}); defer tmp_file.close(io); try tmp_file.writeAll(tarball_content); } diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index ccae9440e2..7b08a89cae 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -264,7 +264,7 @@ pub const Repository = struct { try repository.odb.seekOid(entry.oid); const file_object = try repository.odb.readObject(); if (file_object.type != .blob) return error.InvalidFile; - var file = dir.createFile(entry.name, .{ .exclusive = true }) catch |e| { + var file = dir.createFile(io, entry.name, .{ .exclusive = true }) catch |e| { const file_name = try std.fs.path.join(diagnostics.allocator, &.{ current_path, entry.name }); errdefer diagnostics.allocator.free(file_name); try diagnostics.errors.append(diagnostics.allocator, .{ .unable_to_create_file = .{ @@ -1584,14 +1584,14 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u var git_dir = testing.tmpDir(.{}); defer git_dir.cleanup(); - var pack_file = try git_dir.dir.createFile("testrepo.pack", .{ .read = true }); + var pack_file = try git_dir.dir.createFile(io, "testrepo.pack", .{ .read = true }); defer pack_file.close(io); try pack_file.writeAll(testrepo_pack); var pack_file_buffer: [2000]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); - var index_file = try git_dir.dir.createFile("testrepo.idx", .{ .read = true }); + var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); @@ -1714,20 +1714,20 @@ pub fn main() !void { const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat; - var pack_file = try std.fs.cwd().openFile(io, args[2], .{}); + var pack_file = try Io.Dir.cwd().openFile(io, args[2], .{}); defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = pack_file.reader(io, &pack_file_buffer); const commit = try Oid.parse(format, args[3]); - var worktree = try std.fs.cwd().makeOpenPath(args[4], .{}); + var worktree = try Io.Dir.cwd().makeOpenPath(args[4], .{}); defer worktree.close(io); var git_dir = try worktree.makeOpenPath(".git", .{}); defer git_dir.close(io); std.debug.print("Starting index...\n", .{}); - var index_file = try git_dir.createFile("idx", .{ .read = true }); + var index_file = try git_dir.createFile(io, "idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [4096]u8 = undefined; var index_file_writer = index_file.writer(&index_file_buffer); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 45b1302138..9a75b2096e 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -170,7 +170,7 @@ pub fn updateFile( // version. Likewise if we're working on AstGen and another process asks for // the cached file, they'll get it. const cache_file = while (true) { - break zir_dir.createFile(&hex_digest, .{ + break zir_dir.createFile(io, &hex_digest, .{ .read = true, .truncate = false, .lock = lock, @@ -196,7 +196,7 @@ pub fn updateFile( cache_directory, }); } - break zir_dir.createFile(&hex_digest, .{ + break zir_dir.createFile(io, &hex_digest, .{ .read = true, .truncate = false, .lock = lock, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cb4fe0459f..4fc58c2c4b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1,19 +1,22 @@ -const std = @import("std"); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const log = std.log.scoped(.codegen); const math = std.math; const DW = std.dwarf; - const Builder = std.zig.llvm.Builder; + +const build_options = @import("build_options"); const llvm = if (build_options.have_llvm) @import("llvm/bindings.zig") else @compileError("LLVM unavailable"); + const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); -const build_options = @import("build_options"); const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); @@ -964,7 +967,7 @@ pub const Object = struct { if (std.mem.eql(u8, path, "-")) { o.builder.dump(); } else { - o.builder.printToFilePath(std.fs.cwd(), path) catch |err| { + o.builder.printToFilePath(Io.Dir.cwd(), path) catch |err| { log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) }); }; } @@ -978,7 +981,7 @@ pub const Object = struct { o.builder.clearAndFree(); if (options.pre_bc_path) |path| { - var file = std.fs.cwd().createFile(path, .{}) catch |err| + var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(io); @@ -991,7 +994,7 @@ pub const Object = struct { options.post_ir_path == null and options.post_bc_path == null) return; if (options.post_bc_path) |path| { - var file = std.fs.cwd().createFile(path, .{}) catch |err| + var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) }); defer file.close(io); @@ -2711,7 +2714,7 @@ pub const Object = struct { } fn allocTypeName(o: *Object, pt: Zcu.PerThread, ty: Type) Allocator.Error![:0]const u8 { - var aw: std.Io.Writer.Allocating = .init(o.gpa); + var aw: Io.Writer.Allocating = .init(o.gpa); defer aw.deinit(); ty.print(&aw.writer, pt, null) catch |err| switch (err) { error.WriteFailed => return error.OutOfMemory, diff --git a/src/fmt.zig b/src/fmt.zig index ce8a31fa4c..36a3833986 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -182,11 +182,11 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! // Mark any excluded files/directories as already seen, // so that they are skipped later during actual processing for (excluded_files.items) |file_path| { - const stat = fs.cwd().statFile(file_path) catch |err| switch (err) { + const stat = Io.Dir.cwd().statFile(file_path) catch |err| switch (err) { error.FileNotFound => continue, // On Windows, statFile does not work for directories error.IsDir => dir: { - var dir = try fs.cwd().openDir(file_path, .{}); + var dir = try Io.Dir.cwd().openDir(file_path, .{}); defer dir.close(io); break :dir try dir.stat(); }, @@ -196,7 +196,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! } for (input_files.items) |file_path| { - try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path); + try fmtPath(&fmt, file_path, check_flag, Io.Dir.cwd(), file_path); } try fmt.stdout_writer.interface.flush(); if (fmt.any_error) { diff --git a/src/introspect.zig b/src/introspect.zig index d2faa9a55c..04ddf47e8a 100644 --- a/src/introspect.zig +++ b/src/introspect.zig @@ -82,7 +82,7 @@ pub fn findZigLibDirFromSelfExe( cwd_path: []const u8, self_exe_path: []const u8, ) error{ OutOfMemory, FileNotFound }!Cache.Directory { - const cwd = fs.cwd(); + const cwd = Io.Dir.cwd(); var cur_path: []const u8 = self_exe_path; while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) { var base_dir = cwd.openDir(dirname, .{}) catch continue; @@ -206,7 +206,7 @@ pub fn resolveSuitableLocalCacheDir(arena: Allocator, cwd: []const u8) Allocator var cur_dir = cwd; while (true) { const joined = try fs.path.join(arena, &.{ cur_dir, Package.build_zig_basename }); - if (fs.cwd().access(joined, .{})) |_| { + if (Io.Dir.cwd().access(joined, .{})) |_| { return try fs.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename }); } else |err| switch (err) { error.FileNotFound => { diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index 8c5e0afe4b..cfd8d5554c 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -446,7 +446,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -468,7 +468,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -986,7 +986,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -1014,7 +1014,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.getSoVersion(&target.os), + so_files.dir_path.sub_path, path.sep, lib.name, lib.getSoVersion(&target.os), }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index bec20ff3d4..e3d8ce1f7f 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -681,7 +681,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -703,7 +703,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -775,7 +775,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye try stubs_asm.appendSlice(".text\n"); var sym_i: usize = 0; - var sym_name_buf: std.Io.Writer.Allocating = .init(arena); + var sym_name_buf: Io.Writer.Allocating = .init(arena); var opt_symbol_name: ?[]const u8 = null; var versions_buffer: [32]u8 = undefined; var versions_len: usize = undefined; @@ -796,7 +796,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye // twice, which causes a "duplicate symbol" assembler error. var versions_written = std.AutoArrayHashMap(Version, void).init(arena); - var inc_reader: std.Io.Reader = .fixed(metadata.inclusions); + var inc_reader: Io.Reader = .fixed(metadata.inclusions); const fn_inclusions_len = try inc_reader.takeInt(u16, .little); @@ -1130,7 +1130,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -1156,7 +1156,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover, + so_files.dir_path.sub_path, path.sep, lib.name, lib.sover, }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index 005696e1fc..b3ca51e833 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -1,7 +1,8 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; -const path = std.fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const log = std.log.scoped(.mingw); @@ -259,7 +260,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -304,7 +305,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { .output = .{ .to_list = .{ .arena = .init(gpa) } }, }; defer diagnostics.deinit(); - var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, std.fs.cwd()); + var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, Io.Dir.cwd()); defer aro_comp.deinit(); aro_comp.target = .fromZigTarget(target.*); @@ -343,7 +344,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { } const members = members: { - var aw: std.Io.Writer.Allocating = .init(gpa); + var aw: Io.Writer.Allocating = .init(gpa); errdefer aw.deinit(); try pp.prettyPrintTokens(&aw.writer, .result_only); @@ -376,7 +377,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { errdefer gpa.free(lib_final_path); { - const lib_final_file = try o_dir.createFile(final_lib_basename, .{ .truncate = true }); + const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true }); defer lib_final_file.close(io); var buffer: [1024]u8 = undefined; var file_writer = lib_final_file.writer(&buffer); @@ -442,7 +443,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{ lib_path, lib_name }); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, @@ -459,7 +460,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{lib_name}); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, @@ -476,7 +477,7 @@ fn findDef( } else { try override_path.print(fmt_path, .{lib_name}); } - if (std.fs.cwd().access(override_path.items, .{})) |_| { + if (Io.Dir.cwd().access(override_path.items, .{})) |_| { return override_path.toOwnedSlice(); } else |err| switch (err) { error.FileNotFound => {}, diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index 67e6a2f903..cb6a80d69d 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -1,9 +1,9 @@ const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const mem = std.mem; const log = std.log; -const fs = std.fs; -const path = fs.path; +const path = std.Io.Dir.path; const assert = std.debug.assert; const Version = std.SemanticVersion; const Path = std.Build.Cache.Path; @@ -387,7 +387,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .io = io, .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), }; - cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); cache.addPrefix(comp.dirs.global_cache); defer cache.manifest_dir.close(io); @@ -409,7 +409,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -640,7 +640,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye .lock = man.toOwnedLock(), .dir_path = .{ .root_dir = comp.dirs.global_cache, - .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest), + .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest), }, }); } @@ -661,7 +661,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C const so_path: Path = .{ .root_dir = so_files.dir_path.root_dir, .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{ - so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover, + so_files.dir_path.sub_path, path.sep, lib.name, lib.sover, }) catch return comp.setAllocFailure(), }; task_buffer[task_buffer_i] = .{ .load_dso = so_path }; diff --git a/src/link/C.zig b/src/link/C.zig index 04c92443e5..a001f8fdd9 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -136,7 +136,7 @@ pub fn createEmpty( assert(!use_lld); assert(!use_llvm); - const file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ // Truncation is done on `flush`. .truncate = false, }); @@ -792,7 +792,7 @@ pub fn flushEmitH(zcu: *Zcu) !void { } const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory; - const file = try directory.handle.createFile(emit_h.loc.basename, .{ + const file = try directory.handle.createFile(io, emit_h.loc.basename, .{ // We set the end position explicitly below; by not truncating the file, we possibly // make it easier on the file system by doing 1 reallocation instead of two. .truncate = false, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index e1d52fb7c4..009e59ed0d 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -631,12 +631,14 @@ fn create( else => return error.UnsupportedCOFFArchitecture, }; + const io = comp.io; + const coff = try arena.create(Coff); - const file = try path.root_dir.handle.createFile(comp.io, path.sub_path, .{ + const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), }); - errdefer file.close(comp.io); + errdefer file.close(io); coff.* = .{ .base = .{ .tag = .coff2, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 584a50c7f2..53812a37ec 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -313,9 +313,11 @@ pub fn createEmpty( const is_obj = output_mode == .Obj; const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static); + const io = comp.io; + // What path should this ELF linker code output to? const sub_path = emit.sub_path; - self.base.file = try emit.root_dir.handle.createFile(sub_path, .{ + self.base.file = try emit.root_dir.handle.createFile(io, sub_path, .{ .truncate = true, .read = true, .mode = link.File.determineMode(output_mode, link_mode), diff --git a/src/link/Lld.zig b/src/link/Lld.zig index 49f6d3f7c7..b25b9da9d9 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -1572,7 +1572,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void { // report a nice error here with the file path if it fails instead of // just returning the error code. // chmod does not interact with umask, so we use a conservative -rwxr--r-- here. - std.posix.fchmodat(fs.cwd().fd, full_out_path, 0o744, 0) catch |err| switch (err) { + std.posix.fchmodat(Io.Dir.cwd().handle, full_out_path, 0o744, 0) catch |err| switch (err) { error.OperationNotSupported => unreachable, // Not a symlink. else => |e| return e, }; @@ -1624,7 +1624,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi const rand_int = std.crypto.random.int(u64); const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp"; - const rsp_file = try comp.dirs.local_cache.handle.createFile(rsp_path, .{}); + const rsp_file = try comp.dirs.local_cache.handle.createFile(io, rsp_path, .{}); defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err| log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) }); { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e837cc853a..0f6127e10e 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -219,7 +219,9 @@ pub fn createEmpty( }; errdefer self.base.destroy(); - self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + self.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, .mode = link.File.determineMode(output_mode, link_mode), @@ -1082,7 +1084,7 @@ fn accessLibPath( test_path.clearRetainingCapacity(); try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1110,7 +1112,7 @@ fn accessFrameworkPath( ext, }); try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -1191,7 +1193,7 @@ fn parseDependentDylibs(self: *MachO) !void { try test_path.print("{s}{s}", .{ path, ext }); } try checked_paths.append(try arena.dupe(u8, test_path.items)); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; @@ -3289,7 +3291,7 @@ pub fn reopenDebugInfo(self: *MachO) !void { var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{}); defer d_sym_bundle.close(io); - self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{ + self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{ .truncate = false, .read = true, }); @@ -4370,7 +4372,7 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi // The file/property is also available with vendored libc. fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 { const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" }); - const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); + const contents = try Io.Dir.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{}); if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string; return error.SdkVersionFailure; diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 5f9a9ecac9..814faf234a 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -247,7 +247,7 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void { } pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void { - const inner = try fs.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); + const inner = try Io.Dir.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); self.entitlements = .{ .inner = inner }; } diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 7e28dc0a8b..d13caaa315 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -33,6 +33,7 @@ pub fn createEmpty( options: link.File.OpenOptions, ) !*Linker { const gpa = comp.gpa; + const io = comp.io; const target = &comp.root_mod.resolved_target.result; assert(!comp.config.use_lld); // Caught by Compilation.Config.resolve @@ -78,7 +79,7 @@ pub fn createEmpty( }; errdefer linker.deinit(); - linker.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + linker.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, }); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 7ab1e0eb4b..5f89625d56 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2997,7 +2997,9 @@ pub fn createEmpty( .named => |name| (try wasm.internString(name)).toOptional(), }; - wasm.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{ + const io = comp.io; + + wasm.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, .mode = if (fs.has_executable_bit) diff --git a/src/main.zig b/src/main.zig index b040b6c8ef..67b7384b57 100644 --- a/src/main.zig +++ b/src/main.zig @@ -713,7 +713,7 @@ const Emit = union(enum) { } else e: { // If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would. if (fs.path.dirname(path)) |dir_path| { - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| { fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) }); }; dir.close(io); @@ -3304,7 +3304,7 @@ fn buildOutputType( } else emit: { // If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would. if (fs.path.dirname(path)) |dir_path| { - var dir = fs.cwd().openDir(dir_path, .{}) catch |err| { + var dir = Io.Dir.cwd().openDir(dir_path, .{}) catch |err| { fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) }); }; dir.close(io); @@ -3389,7 +3389,7 @@ fn buildOutputType( // file will not run and this temp file will be leaked. The filename // will be a hash of its contents — so multiple invocations of // `zig cc -` will result in the same temp file name. - var f = try dirs.local_cache.handle.createFile(dump_path, .{}); + var f = try dirs.local_cache.handle.createFile(io, dump_path, .{}); defer f.close(io); // Re-using the hasher from Cache, since the functional requirements @@ -4773,7 +4773,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! var ok_count: usize = 0; for (template_paths) |template_path| { - if (templates.write(arena, fs.cwd(), sanitized_root_name, template_path, fingerprint)) |_| { + if (templates.write(arena, Io.Dir.cwd(), sanitized_root_name, template_path, fingerprint)) |_| { std.log.info("created {s}", .{template_path}); ok_count += 1; } else |err| switch (err) { @@ -5227,7 +5227,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) if (system_pkg_dir_path) |p| { job_queue.global_cache = .{ .path = p, - .handle = fs.cwd().openDir(p, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(p, .{}) catch |err| { fatal("unable to open system package directory '{s}': {s}", .{ p, @errorName(err), }); @@ -5823,7 +5823,7 @@ const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, /// Initialize the arguments from a Response File. "*.rsp" fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile { const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit - const cmd_line = try fs.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); + const cmd_line = try Io.Dir.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); errdefer allocator.free(cmd_line); return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line); @@ -6187,7 +6187,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const display_path = zig_source_path orelse ""; const source: [:0]const u8 = s: { var f = if (zig_source_path) |p| file: { - break :file fs.cwd().openFile(io, p, .{}) catch |err| { + break :file Io.Dir.cwd().openFile(io, p, .{}) catch |err| { fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) }); }; } else Io.File.stdin(); @@ -6494,7 +6494,7 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void { const cache_file = args[0]; - var f = fs.cwd().openFile(io, cache_file, .{}) catch |err| { + var f = Io.Dir.cwd().openFile(io, cache_file, .{}) catch |err| { fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) }); }; defer f.close(io); @@ -6541,7 +6541,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { const new_source_path = args[1]; const old_source = source: { - var f = fs.cwd().openFile(io, old_source_path, .{}) catch |err| + var f = Io.Dir.cwd().openFile(io, old_source_path, .{}) catch |err| fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6549,7 +6549,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) }); }; const new_source = source: { - var f = fs.cwd().openFile(io, new_source_path, .{}) catch |err| + var f = Io.Dir.cwd().openFile(io, new_source_path, .{}) catch |err| fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) }); defer f.close(io); var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer); @@ -6845,7 +6845,7 @@ fn accessFrameworkPath( framework_dir_path, framework_name, framework_name, ext, }); try checked_paths.print("\n {s}", .{test_path.items}); - fs.cwd().access(test_path.items, .{}) catch |err| switch (err) { + Io.Dir.cwd().access(test_path.items, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| fatal("unable to search for {s} framework '{s}': {s}", .{ ext, test_path.items, @errorName(e), @@ -6957,7 +6957,7 @@ fn cmdFetch( var global_cache_directory: Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); break :l .{ - .handle = try fs.cwd().makeOpenPath(p, .{}), + .handle = try Io.Dir.cwd().makeOpenPath(p, .{}), .path = p, }; }; @@ -7260,7 +7260,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { if (options.build_file) |bf| { if (fs.path.dirname(bf)) |dirname| { - const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + const dir = Io.Dir.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory to build file from argument 'build-file', '{s}': {s}", .{ dirname, @errorName(err) }); }; return .{ @@ -7272,7 +7272,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { return .{ .build_zig_basename = build_zig_basename, - .directory = .{ .path = null, .handle = fs.cwd() }, + .directory = .{ .path = null, .handle = Io.Dir.cwd() }, .cleanup_build_dir = null, }; } @@ -7280,8 +7280,8 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot { var dirname: []const u8 = cwd_path; while (true) { const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig_basename }); - if (fs.cwd().access(joined_path, .{})) |_| { - const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + if (Io.Dir.cwd().access(joined_path, .{})) |_| { + const dir = Io.Dir.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) }); }; return .{ @@ -7443,7 +7443,7 @@ const Templates = struct { } }; fn writeSimpleTemplateFile(io: Io, file_name: []const u8, comptime fmt: []const u8, args: anytype) !void { - const f = try fs.cwd().createFile(file_name, .{ .exclusive = true }); + const f = try Io.Dir.cwd().createFile(io, file_name, .{ .exclusive = true }); defer f.close(io); var buf: [4096]u8 = undefined; var fw = f.writer(&buf); @@ -7591,7 +7591,7 @@ fn addLibDirectoryWarn2( ignore_not_found: bool, ) void { lib_directories.appendAssumeCapacity(.{ - .handle = fs.cwd().openDir(path, .{}) catch |err| { + .handle = Io.Dir.cwd().openDir(path, .{}) catch |err| { if (err == error.FileNotFound and ignore_not_found) return; warn("unable to open library directory '{s}': {s}", .{ path, @errorName(err) }); return; -- cgit v1.2.3 From 9f4d40b1f9bffc4137055b8a07f042ecfa398124 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 14:17:52 -0800 Subject: update all stat() to stat(io) --- lib/compiler/objcopy.zig | 2 +- lib/compiler/resinator/utils.zig | 2 +- lib/compiler/std-docs.zig | 2 +- lib/std/Build/Cache.zig | 9 ++++---- lib/std/Build/WebServer.zig | 2 +- lib/std/Io/test.zig | 4 ++-- lib/std/dynamic_library.zig | 2 +- lib/std/fs/test.zig | 22 ++++++++++---------- lib/std/os/linux/IoUring.zig | 6 +++--- lib/std/os/linux/test.zig | 4 ++-- src/Compilation.zig | 2 +- src/Zcu.zig | 2 +- src/Zcu/PerThread.zig | 4 ++-- src/fmt.zig | 6 +++--- src/link.zig | 6 +++--- src/link/Elf.zig | 5 +++-- src/link/Elf/Archive.zig | 45 +++++++++++++++++++++------------------- src/link/Elf/Object.zig | 7 +++++-- src/link/MachO.zig | 3 ++- src/link/MachO/Archive.zig | 3 ++- src/link/MachO/Object.zig | 4 +++- src/link/MachO/relocatable.zig | 2 +- src/link/tapi.zig | 4 ++-- 23 files changed, 80 insertions(+), 68 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index e48f76a6a6..6473a10dd0 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -155,7 +155,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const input_file = Io.Dir.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err }); defer input_file.close(io); - const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); + const stat = input_file.stat(io) catch |err| fatal("failed to stat {s}: {t}", .{ input, err }); var in: File.Reader = .initSize(input_file, io, &input_buffer, stat.size); diff --git a/lib/compiler/resinator/utils.zig b/lib/compiler/resinator/utils.zig index 3f03b73c54..285342ad74 100644 --- a/lib/compiler/resinator/utils.zig +++ b/lib/compiler/resinator/utils.zig @@ -38,7 +38,7 @@ pub fn openFileNotDir( errdefer file.close(io); // https://github.com/ziglang/zig/issues/5732 if (builtin.os.tag != .windows) { - const stat = try file.stat(); + const stat = try file.stat(io); if (stat.kind == .directory) return error.IsDir; diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 37e72d49e5..d5beab5f17 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -227,7 +227,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void { } var file = try entry.dir.openFile(io, entry.basename, .{}); defer file.close(io); - const stat = try file.stat(); + const stat = try file.stat(io); var file_reader: std.Io.File.Reader = .{ .file = file, .interface = std.Io.File.Reader.initInterface(&.{}), diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index fdcb2ab714..0176d71a1c 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -775,7 +775,7 @@ pub const Manifest = struct { }; defer this_file.close(io); - const actual_stat = this_file.stat() catch |err| { + const actual_stat = this_file.stat(io) catch |err| { self.diagnostic = .{ .file_stat = .{ .file_index = idx, .err = err, @@ -883,7 +883,7 @@ pub const Manifest = struct { defer file.close(io); // Save locally and also save globally (we still hold the global lock). - const stat = file.stat() catch |err| switch (err) { + const stat = file.stat(io) catch |err| switch (err) { error.Canceled => return error.Canceled, else => return true, }; @@ -909,7 +909,8 @@ pub const Manifest = struct { } fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: Io.File) !void { - const actual_stat = try handle.stat(); + const io = self.cache.io; + const actual_stat = try handle.stat(io); ch_file.stat = .{ .size = actual_stat.size, .mtime = actual_stat.mtime, @@ -1333,7 +1334,7 @@ fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { dir.deleteFile(test_out_file) catch {}; } - return (try file.stat()).mtime; + return (try file.stat(io)).mtime; } test "cache file and then recall it" { diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 9938d5e1b0..472e87b05a 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -509,7 +509,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons continue; }; defer file.close(io); - const stat = try file.stat(); + const stat = try file.stat(io); var read_buffer: [1024]u8 = undefined; var file_reader: Io.File.Reader = .initSize(file, io, &read_buffer, stat.size); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index e731dc18d7..8b9d714ee8 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -124,13 +124,13 @@ test "updateTimes" { var file = try tmp.dir.createFile(io, tmp_file_name, .{ .read = true }); defer file.close(io); - const stat_old = try file.stat(); + const stat_old = try file.stat(io); // Set atime and mtime to 5s before try file.updateTimes( stat_old.atime.subDuration(.fromSeconds(5)), stat_old.mtime.subDuration(.fromSeconds(5)), ); - const stat_new = try file.stat(); + const stat_new = try file.stat(io); try expect(stat_new.atime.nanoseconds < stat_old.atime.nanoseconds); try expect(stat_new.mtime.nanoseconds < stat_old.mtime.nanoseconds); } diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index b1965b60ee..f02caa1f5b 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -226,7 +226,7 @@ pub const ElfDynLib = struct { defer posix.close(fd); const file: Io.File = .{ .handle = fd }; - const stat = try file.stat(); + const stat = try file.stat(io); const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; const page_size = std.heap.pageSize(); diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 9924cafaf8..7b12ba4271 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -350,7 +350,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" { }; defer symlink.close(io); - const stat = try symlink.stat(); + const stat = try symlink.stat(io); try testing.expectEqual(File.Kind.sym_link, stat.kind); } }.impl); @@ -396,7 +396,7 @@ test "openDirAbsolute" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const tmp_ino = (try tmp.dir.stat()).inode; + const tmp_ino = (try tmp.dir.stat(io)).inode; try tmp.dir.makeDir("subdir"); const sub_path = try tmp.dir.realpathAlloc(testing.allocator, "subdir"); @@ -406,7 +406,7 @@ test "openDirAbsolute" { var tmp_sub = try fs.openDirAbsolute(sub_path, .{}); defer tmp_sub.close(io); - const sub_ino = (try tmp_sub.stat()).inode; + const sub_ino = (try tmp_sub.stat(io)).inode; { // Can open sub_path + ".." @@ -416,7 +416,7 @@ test "openDirAbsolute" { var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(io); - const ino = (try dir.stat()).inode; + const ino = (try dir.stat(io)).inode; try testing.expectEqual(tmp_ino, ino); } @@ -428,7 +428,7 @@ test "openDirAbsolute" { var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(io); - const ino = (try dir.stat()).inode; + const ino = (try dir.stat(io)).inode; try testing.expectEqual(sub_ino, ino); } @@ -440,7 +440,7 @@ test "openDirAbsolute" { var dir = try fs.openDirAbsolute(dir_path, .{}); defer dir.close(io); - const ino = (try dir.stat()).inode; + const ino = (try dir.stat(io)).inode; try testing.expectEqual(tmp_ino, ino); } } @@ -849,7 +849,7 @@ test "directory operations on files" { // ensure the file still exists and is a file as a sanity check file = try ctx.dir.openFile(io, test_file_name, .{}); - const stat = try file.stat(); + const stat = try file.stat(io); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); } @@ -1151,7 +1151,7 @@ test "renameAbsolute" { // ensure the file was renamed try testing.expectError(error.FileNotFound, tmp_dir.dir.openFile(io, test_file_name, .{})); file = try tmp_dir.dir.openFile(io, renamed_test_file_name, .{}); - const stat = try file.stat(); + const stat = try file.stat(io); try testing.expectEqual(File.Kind.file, stat.kind); file.close(io); @@ -2099,17 +2099,17 @@ test "chmod" { const file = try tmp.dir.createFile(io, "test_file", .{ .mode = 0o600 }); defer file.close(io); - try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat()).mode & 0o7777); + try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat(io)).mode & 0o7777); try file.chmod(0o644); - try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat()).mode & 0o7777); + try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat(io)).mode & 0o7777); try tmp.dir.makeDir("test_dir"); var dir = try tmp.dir.openDir(io, "test_dir", .{ .iterate = true }); defer dir.close(io); try dir.chmod(0o700); - try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat()).mode & 0o7777); + try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat(io)).mode & 0o7777); } test "chown" { diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig index 167c6ef398..3c9a313a4a 100644 --- a/lib/std/os/linux/IoUring.zig +++ b/lib/std/os/linux/IoUring.zig @@ -2655,7 +2655,7 @@ test "fallocate" { const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); - try testing.expectEqual(@as(u64, 0), (try file.stat()).size); + try testing.expectEqual(@as(u64, 0), (try file.stat(io)).size); const len: u64 = 65536; const sqe = try ring.fallocate(0xaaaaaaaa, file.handle, 0, 0, len); @@ -2681,7 +2681,7 @@ test "fallocate" { .flags = 0, }, cqe); - try testing.expectEqual(len, (try file.stat()).size); + try testing.expectEqual(len, (try file.stat(io)).size); } test "statx" { @@ -2702,7 +2702,7 @@ test "statx" { const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); - try testing.expectEqual(@as(u64, 0), (try file.stat()).size); + try testing.expectEqual(@as(u64, 0), (try file.stat(io)).size); try file.writeAll("foobar"); diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig index d7cfb4e138..cfafebe826 100644 --- a/lib/std/os/linux/test.zig +++ b/lib/std/os/linux/test.zig @@ -21,7 +21,7 @@ test "fallocate" { const file = try tmp.dir.createFile(io, path, .{ .truncate = true, .mode = 0o666 }); defer file.close(io); - try expect((try file.stat()).size == 0); + try expect((try file.stat(io)).size == 0); const len: i64 = 65536; switch (linux.errno(linux.fallocate(file.handle, 0, 0, len))) { @@ -31,7 +31,7 @@ test "fallocate" { else => |errno| std.debug.panic("unhandled errno: {}", .{errno}), } - try expect((try file.stat()).size == len); + try expect((try file.stat(io)).size == len); } test "getpid" { diff --git a/src/Compilation.zig b/src/Compilation.zig index ef01fbb07c..3a48705880 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5367,7 +5367,7 @@ fn docsCopyModule( }); }; defer file.close(io); - const stat = try file.stat(); + const stat = try file.stat(io); var file_reader: Io.File.Reader = .initSize(file, io, &buffer, stat.size); archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| { diff --git a/src/Zcu.zig b/src/Zcu.zig index d2634a8962..1f2fe89236 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1080,7 +1080,7 @@ pub const File = struct { }; defer f.close(io); - const stat = f.stat() catch |err| switch (err) { + const stat = f.stat(io) catch |err| switch (err) { error.Streaming => { // Since `file.stat` is populated, this was previously a file stream; since it is // now not a file stream, it must have changed. diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 9a75b2096e..74196705c3 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -98,7 +98,7 @@ pub fn updateFile( }; defer source_file.close(io); - const stat = try source_file.stat(); + const stat = try source_file.stat(io); const want_local_cache = switch (file.path.root) { .none, .local_cache => true, @@ -2470,7 +2470,7 @@ fn updateEmbedFileInner( }; defer file.close(io); - const stat: Cache.File.Stat = .fromFs(try file.stat()); + const stat: Cache.File.Stat = .fromFs(try file.stat(io)); if (ef.val != .none) { const old_stat = ef.stat; diff --git a/src/fmt.zig b/src/fmt.zig index dc67619d54..1a1e5298e2 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -188,7 +188,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! error.IsDir => dir: { var dir = try Io.Dir.cwd().openDir(io, file_path, .{}); defer dir.close(io); - break :dir try dir.stat(); + break :dir try dir.stat(io); }, else => |e| return e, }; @@ -227,7 +227,7 @@ fn fmtPathDir( var dir = try parent_dir.openDir(io, parent_sub_path, .{ .iterate = true }); defer dir.close(io); - const stat = try dir.stat(); + const stat = try dir.stat(io); if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; var dir_it = dir.iterate(); @@ -266,7 +266,7 @@ fn fmtPathFile( var file_closed = false; errdefer if (!file_closed) source_file.close(io); - const stat = try source_file.stat(); + const stat = try source_file.stat(io); if (stat.kind == .directory) return error.IsDir; diff --git a/src/link.zig b/src/link.zig index 073ec632c6..06d18ec2d5 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1112,10 +1112,10 @@ pub const File = struct { fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: Io.File) anyerror!void { const comp = base.comp; + const io = comp.io; const diags = &comp.link_diags; const gpa = comp.gpa; - const io = comp.io; - const stat = try file.stat(); + const stat = try file.stat(io); const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig; const buf = try gpa.alloc(u8, size); defer gpa.free(buf); @@ -2180,7 +2180,7 @@ fn resolvePathInputLib( // Appears to be an ELF or archive file. return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query); } - const stat = file.stat() catch |err| + const stat = file.stat(io) catch |err| fatal("failed to stat {f}: {s}", .{ test_path, @errorName(err) }); const size = std.math.cast(u32, stat.size) orelse fatal("{f}: linker script too big", .{test_path}); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 53812a37ec..13a624a295 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -742,7 +742,7 @@ pub fn loadInput(self: *Elf, input: link.Input) !void { .dso_exact => @panic("TODO"), .object => |obj| try parseObject(self, obj), .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib), - .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target), + .dso => |dso| try parseDso(gpa, io, diags, dso, &self.shared_objects, &self.files, target), } } @@ -1136,6 +1136,7 @@ fn parseArchive( fn parseDso( gpa: Allocator, + io: Io, diags: *Diags, dso: link.Input.Dso, shared_objects: *std.StringArrayHashMapUnmanaged(File.Index), @@ -1147,7 +1148,7 @@ fn parseDso( const handle = dso.file; - const stat = Stat.fromFs(try handle.stat()); + const stat = Stat.fromFs(try handle.stat(io)); var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target); defer header.deinit(gpa); diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig index a9961bf8f9..0f786c1a47 100644 --- a/src/link/Elf/Archive.zig +++ b/src/link/Elf/Archive.zig @@ -1,3 +1,21 @@ +const Archive = @This(); + +const std = @import("std"); +const Io = std.Io; +const assert = std.debug.assert; +const elf = std.elf; +const fs = std.fs; +const log = std.log.scoped(.link); +const mem = std.mem; +const Path = std.Build.Cache.Path; +const Allocator = std.mem.Allocator; + +const Diags = @import("../../link.zig").Diags; +const Elf = @import("../Elf.zig"); +const File = @import("file.zig").File; +const Object = @import("Object.zig"); +const StringTable = @import("../StringTable.zig"); + objects: []const Object, /// '\n'-delimited strtab: []const u8, @@ -10,6 +28,7 @@ pub fn deinit(a: *Archive, gpa: Allocator) void { pub fn parse( gpa: Allocator, + io: Io, diags: *Diags, file_handles: *const std.ArrayList(File.Handle), path: Path, @@ -25,7 +44,7 @@ pub fn parse( pos += magic_buffer.len; } - const size = (try handle.stat()).size; + const size = (try handle.stat(io)).size; var objects: std.ArrayList(Object) = .empty; defer objects.deinit(gpa); @@ -120,7 +139,7 @@ pub fn setArHdr(opts: struct { @memset(mem.asBytes(&hdr), 0x20); { - var writer: std.Io.Writer = .fixed(&hdr.ar_name); + var writer: Io.Writer = .fixed(&hdr.ar_name); switch (opts.name) { .symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable, .strtab => writer.print("//", .{}) catch unreachable, @@ -133,7 +152,7 @@ pub fn setArHdr(opts: struct { hdr.ar_gid[0] = '0'; hdr.ar_mode[0] = '0'; { - var writer: std.Io.Writer = .fixed(&hdr.ar_size); + var writer: Io.Writer = .fixed(&hdr.ar_size); writer.print("{d}", .{opts.size}) catch unreachable; } hdr.ar_fmag = elf.ARFMAG.*; @@ -206,7 +225,7 @@ pub const ArSymtab = struct { ar: ArSymtab, elf_file: *Elf, - fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void { + fn default(f: Format, writer: *Io.Writer) Io.Writer.Error!void { const ar = f.ar; const elf_file = f.elf_file; for (ar.symtab.items, 0..) |entry, i| { @@ -261,7 +280,7 @@ pub const ArStrtab = struct { try writer.writeAll(ar.buffer.items); } - pub fn format(ar: ArStrtab, writer: *std.Io.Writer) std.Io.Writer.Error!void { + pub fn format(ar: ArStrtab, writer: *Io.Writer) Io.Writer.Error!void { try writer.print("{f}", .{std.ascii.hexEscape(ar.buffer.items, .lower)}); } }; @@ -277,19 +296,3 @@ pub const ArState = struct { /// Total size of the contributing object (excludes ar_hdr). size: u64 = 0, }; - -const std = @import("std"); -const assert = std.debug.assert; -const elf = std.elf; -const fs = std.fs; -const log = std.log.scoped(.link); -const mem = std.mem; -const Path = std.Build.Cache.Path; -const Allocator = std.mem.Allocator; - -const Diags = @import("../../link.zig").Diags; -const Archive = @This(); -const Elf = @import("../Elf.zig"); -const File = @import("file.zig").File; -const Object = @import("Object.zig"); -const StringTable = @import("../StringTable.zig"); diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index c0dde4176a..7dacfb3a62 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -122,13 +122,14 @@ pub fn parse( pub fn parseCommon( self: *Object, gpa: Allocator, + io: Io, diags: *Diags, path: Path, handle: Io.File, target: *const std.Target, ) !void { const offset = if (self.archive) |ar| ar.offset else 0; - const file_size = (try handle.stat()).size; + const file_size = (try handle.stat(io)).size; const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr)); defer gpa.free(header_buffer); @@ -1122,9 +1123,11 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf } pub fn updateArSize(self: *Object, elf_file: *Elf) !void { + const comp = elf_file.base.comp; + const io = comp.io; self.output_ar_state.size = if (self.archive) |ar| ar.size else size: { const handle = elf_file.fileHandle(self.file_handle); - break :size (try handle.stat()).size; + break :size (try handle.stat(io)).size; }; } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 0f6127e10e..78e035e2ad 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -925,6 +925,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u const comp = self.base.comp; const gpa = comp.gpa; + const io = comp.io; const abs_path = try std.fs.path.resolvePosix(gpa, &.{ comp.dirs.cwd, @@ -934,7 +935,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u errdefer gpa.free(abs_path); const file = self.getFileHandle(handle_index); - const stat = try file.stat(); + const stat = try file.stat(io); const mtime = stat.mtime.toSeconds(); const index: File.Index = @intCast(try self.files.addOne(gpa)); self.files.set(index, .{ .object = .{ diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig index d1962412c4..122a408533 100644 --- a/src/link/MachO/Archive.zig +++ b/src/link/MachO/Archive.zig @@ -6,6 +6,7 @@ pub fn deinit(self: *Archive, allocator: Allocator) void { pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File.HandleIndex, fat_arch: ?fat.Arch) !void { const comp = macho_file.base.comp; + const io = comp.io; const gpa = comp.gpa; const diags = &comp.link_diags; @@ -14,7 +15,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File const handle = macho_file.getFileHandle(handle_index); const offset = if (fat_arch) |ar| ar.offset else 0; - const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat()).size; + const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat(io)).size; var pos: usize = offset + SARMAG; while (true) { diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 5fc77fe763..1a1799f551 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -1689,9 +1689,11 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, macho_file: *M } pub fn updateArSize(self: *Object, macho_file: *MachO) !void { + const comp = macho_file.base.comp; + const io = comp.io; self.output_ar_state.size = if (self.in_archive) |ar| ar.size else size: { const file = macho_file.getFileHandle(self.file_handle); - break :size (try file.stat()).size; + break :size (try file.stat(io)).size; }; } diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index 0f42442640..e9f78a8ef2 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -22,7 +22,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat const path = positionals.items[0].path().?; const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err| return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) }); - const stat = in_file.stat() catch |err| + const stat = in_file.stat(io) catch |err| return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) }); const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err| return diags.fail("failed to copy range of file {f}: {s}", .{ path, @errorName(err) }); diff --git a/src/link/tapi.zig b/src/link/tapi.zig index fff25b7544..046f43eae2 100644 --- a/src/link/tapi.zig +++ b/src/link/tapi.zig @@ -139,9 +139,9 @@ pub const LibStub = struct { /// Typed contents of the tbd file. inner: []Tbd, - pub fn loadFromFile(allocator: Allocator, file: Io.File) TapiError!LibStub { + pub fn loadFromFile(allocator: Allocator, io: Io, file: Io.File) TapiError!LibStub { const filesize = blk: { - const stat = file.stat() catch break :blk std.math.maxInt(u32); + const stat = file.stat(io) catch break :blk std.math.maxInt(u32); break :blk @min(stat.size, std.math.maxInt(u32)); }; const source = try allocator.alloc(u8, filesize); -- cgit v1.2.3 From 6f46570958af8ae27308eb4a9470e05f33aaa522 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 15:23:18 -0800 Subject: link.MachO: update parallel hasher to std.Io --- lib/std/Build/Cache.zig | 66 ++++++++++++++++++---------------------- lib/std/Build/WebServer.zig | 6 ++-- lib/std/Io.zig | 4 +-- lib/std/Io/File.zig | 34 +++++++++++++++++++-- lib/std/fs/test.zig | 14 ++++----- src/link/MachO/CodeSignature.zig | 17 ++++++----- src/link/MachO/hasher.zig | 24 ++++++--------- src/link/MachO/uuid.zig | 16 +++++----- 8 files changed, 100 insertions(+), 81 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 0176d71a1c..dab1926f53 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -800,7 +800,7 @@ pub const Manifest = struct { } var actual_digest: BinDigest = undefined; - hashFile(this_file, &actual_digest) catch |err| { + hashFile(io, this_file, &actual_digest) catch |err| { self.diagnostic = .{ .file_read = .{ .file_index = idx, .err = err, @@ -908,9 +908,11 @@ pub const Manifest = struct { } } - fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: Io.File) !void { + fn populateFileHashHandle(self: *Manifest, ch_file: *File, io_file: Io.File) !void { const io = self.cache.io; - const actual_stat = try handle.stat(io); + const gpa = self.cache.gpa; + + const actual_stat = try io_file.stat(io); ch_file.stat = .{ .size = actual_stat.size, .mtime = actual_stat.mtime, @@ -924,19 +926,17 @@ pub const Manifest = struct { } if (ch_file.max_file_size) |max_file_size| { - if (ch_file.stat.size > max_file_size) { - return error.FileTooBig; - } + if (ch_file.stat.size > max_file_size) return error.FileTooBig; - const contents = try self.cache.gpa.alloc(u8, @as(usize, @intCast(ch_file.stat.size))); - errdefer self.cache.gpa.free(contents); + // Hash while reading from disk, to keep the contents in the cpu + // cache while doing hashing. + const contents = try gpa.alloc(u8, @intCast(ch_file.stat.size)); + errdefer gpa.free(contents); - // Hash while reading from disk, to keep the contents in the cpu cache while - // doing hashing. var hasher = hasher_init; var off: usize = 0; while (true) { - const bytes_read = try handle.pread(contents[off..], off); + const bytes_read = try io_file.readPositional(io, &.{contents[off..]}, off); if (bytes_read == 0) break; hasher.update(contents[off..][0..bytes_read]); off += bytes_read; @@ -945,7 +945,7 @@ pub const Manifest = struct { ch_file.contents = contents; } else { - try hashFile(handle, &ch_file.bin_digest); + try hashFile(io, io_file, &ch_file.bin_digest); } self.hash.hasher.update(&ch_file.bin_digest); @@ -1169,13 +1169,11 @@ pub const Manifest = struct { fn downgradeToSharedLock(self: *Manifest) !void { if (!self.have_exclusive_lock) return; + const io = self.cache.io; - // WASI does not currently support flock, so we bypass it here. - // TODO: If/when flock is supported on WASI, this check should be removed. - // See https://github.com/WebAssembly/wasi-filesystem/issues/2 - if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) { + if (std.process.can_spawn or !builtin.single_threaded) { const manifest_file = self.manifest_file.?; - try manifest_file.downgradeLock(); + try manifest_file.downgradeLock(io); } self.have_exclusive_lock = false; @@ -1184,16 +1182,14 @@ pub const Manifest = struct { fn upgradeToExclusiveLock(self: *Manifest) error{CacheCheckFailed}!bool { if (self.have_exclusive_lock) return false; assert(self.manifest_file != null); + const io = self.cache.io; - // WASI does not currently support flock, so we bypass it here. - // TODO: If/when flock is supported on WASI, this check should be removed. - // See https://github.com/WebAssembly/wasi-filesystem/issues/2 - if (builtin.os.tag != .wasi or std.process.can_spawn or !builtin.single_threaded) { + if (std.process.can_spawn or !builtin.single_threaded) { const manifest_file = self.manifest_file.?; // Here we intentionally have a period where the lock is released, in case there are // other processes holding a shared lock. - manifest_file.unlock(); - manifest_file.lock(.exclusive) catch |err| { + manifest_file.unlock(io); + manifest_file.lock(io, .exclusive) catch |err| { self.diagnostic = .{ .manifest_lock = err }; return error.CacheCheckFailed; }; @@ -1206,12 +1202,8 @@ pub const Manifest = struct { /// The `Manifest` remains safe to deinit. /// Don't forget to call `writeManifest` before this! pub fn toOwnedLock(self: *Manifest) Lock { - const lock: Lock = .{ - .manifest_file = self.manifest_file.?, - }; - - self.manifest_file = null; - return lock; + defer self.manifest_file = null; + return .{ .manifest_file = self.manifest_file.? }; } /// Releases the manifest file and frees any memory the Manifest was using. @@ -1223,7 +1215,7 @@ pub const Manifest = struct { if (self.manifest_file) |file| { if (builtin.os.tag == .windows) { // See Lock.release for why this is required on Windows - file.unlock(); + file.unlock(io); } file.close(io); @@ -1308,15 +1300,15 @@ pub fn writeSmallFile(dir: Io.Dir, sub_path: []const u8, data: []const u8) !void } } -fn hashFile(file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.PReadError!void { - var buf: [1024]u8 = undefined; +fn hashFile(io: Io, file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.ReadPositionalError!void { + var buffer: [2048]u8 = undefined; var hasher = hasher_init; - var off: u64 = 0; + var offset: u64 = 0; while (true) { - const bytes_read = try file.pread(&buf, off); - if (bytes_read == 0) break; - hasher.update(buf[0..bytes_read]); - off += bytes_read; + const n = try file.readPositional(io, &.{&buffer}, offset); + if (n == 0) break; + hasher.update(buffer[0..n]); + offset += n; } hasher.final(bin_digest); } diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 472e87b05a..162d17f070 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -218,9 +218,9 @@ pub fn finishBuild(ws: *WebServer, opts: struct { else => {}, } if (@bitSizeOf(usize) != 64) { - // Current implementation depends on posix.mmap()'s second parameter, `length: usize`, - // being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case - // on 32-bit platforms. + // Current implementation depends on posix.mmap()'s second + // parameter, `length: usize`, being compatible with file system's + // u64 return value. This is not the case on 32-bit platforms. // Affects or affected by issues #5185, #22523, and #22464. std.process.fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)}); } diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 17fb75fe54..9d6dcef615 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -692,9 +692,9 @@ pub const VTable = struct { fileWriteFileStreaming: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit) File.Writer.WriteFileError!usize, fileWriteFilePositional: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit, offset: u64) File.WriteFilePositionalError!usize, /// Returns 0 on end of stream. - fileReadStreaming: *const fn (?*anyopaque, File, data: [][]u8) File.Reader.Error!usize, + fileReadStreaming: *const fn (?*anyopaque, File, data: []const []u8) File.Reader.Error!usize, /// Returns 0 on end of stream. - fileReadPositional: *const fn (?*anyopaque, File, data: [][]u8, offset: u64) File.ReadPositionalError!usize, + fileReadPositional: *const fn (?*anyopaque, File, data: []const []u8, offset: u64) File.ReadPositionalError!usize, fileSeekBy: *const fn (?*anyopaque, File, relative_offset: i64) File.SeekError!void, fileSeekTo: *const fn (?*anyopaque, File, absolute_offset: u64) File.SeekError!void, fileSync: *const fn (?*anyopaque, File) File.SyncError!void, diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index a9b4775772..5e89025478 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -466,13 +466,21 @@ pub fn setTimestampsNow(file: File, io: Io) SetTimestampsError!void { pub const ReadPositionalError = Reader.Error || error{Unseekable}; -pub fn readPositional(file: File, io: Io, buffer: [][]u8, offset: u64) ReadPositionalError!usize { +/// Returns 0 on end of stream. +/// +/// See also: +/// * `reader` +pub fn readPositional(file: File, io: Io, buffer: []const []u8, offset: u64) ReadPositionalError!usize { return io.vtable.fileReadPositional(io.userdata, file, buffer, offset); } pub const WritePositionalError = Writer.Error || error{Unseekable}; -pub fn writePositional(file: File, io: Io, buffer: [][]const u8, offset: u64) WritePositionalError!usize { +/// Returns 0 on end of stream. +/// +/// See also: +/// * `writer` +pub fn writePositional(file: File, io: Io, buffer: []const []const u8, offset: u64) WritePositionalError!usize { return io.vtable.fileWritePositional(io.userdata, file, buffer, offset); } @@ -501,13 +509,35 @@ pub const WriteFilePositionalError = Writer.WriteFileError || error{Unseekable}; /// /// Positional is more threadsafe, since the global seek position is not /// affected. +/// +/// See also: +/// * `readerStreaming` pub fn reader(file: File, io: Io, buffer: []u8) Reader { return .init(file, io, buffer); } +/// Equivalent to creating a positional reader and reading multiple times to fill `buffer`. +/// +/// Returns number of bytes read into `buffer`. If less than `buffer.len`, end of file occurred. +/// +/// See also: +/// * `reader` +pub fn readPositionalAll(file: File, io: Io, buffer: []u8, offset: u64) ReadPositionalError!usize { + var index: usize = 0; + while (index != buffer.len) { + const amt = try file.readPositional(io, &.{buffer[index..]}, offset + index); + if (amt == 0) break; + index += amt; + } + return index; +} + /// Positional is more threadsafe, since the global seek position is not /// affected, but when such syscalls are not available, preemptively /// initializing in streaming mode skips a failed syscall. +/// +/// See also: +/// * `reader` pub fn readerStreaming(file: File, io: Io, buffer: []u8) Reader { return .initStreaming(file, io, buffer); } diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 7b12ba4271..e044a97620 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -1455,7 +1455,7 @@ test "writev, readv" { try writer.interface.writeVecAll(&write_vecs); try writer.interface.flush(); - try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.getEndPos()); + try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.length(io)); var reader = writer.moveToReader(io); try reader.seekTo(0); @@ -1486,7 +1486,7 @@ test "pwritev, preadv" { try writer.seekTo(16); try writer.interface.writeVecAll(&lines); try writer.interface.flush(); - try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.getEndPos()); + try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.length(io)); var reader = writer.moveToReader(io); try reader.seekTo(16); @@ -1511,13 +1511,13 @@ test "setEndPos" { const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write }); defer f.close(io); - const initial_size = try f.getEndPos(); + const initial_size = try f.length(io); var buffer: [32]u8 = undefined; var reader = f.reader(io, &.{}); { try f.setEndPos(initial_size); - try testing.expectEqual(initial_size, try f.getEndPos()); + try testing.expectEqual(initial_size, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(initial_size, try reader.interface.readSliceShort(&buffer)); try testing.expectEqualStrings("ninebytes", buffer[0..@intCast(initial_size)]); @@ -1526,7 +1526,7 @@ test "setEndPos" { { const larger = initial_size + 4; try f.setEndPos(larger); - try testing.expectEqual(larger, try f.getEndPos()); + try testing.expectEqual(larger, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(larger, try reader.interface.readSliceShort(&buffer)); try testing.expectEqualStrings("ninebytes\x00\x00\x00\x00", buffer[0..@intCast(larger)]); @@ -1535,14 +1535,14 @@ test "setEndPos" { { const smaller = initial_size - 5; try f.setEndPos(smaller); - try testing.expectEqual(smaller, try f.getEndPos()); + try testing.expectEqual(smaller, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(smaller, try reader.interface.readSliceShort(&buffer)); try testing.expectEqualStrings("nine", buffer[0..@intCast(smaller)]); } try f.setEndPos(0); - try testing.expectEqual(0, try f.getEndPos()); + try testing.expectEqual(0, try f.length(io)); try reader.seekTo(0); try testing.expectEqual(0, try reader.interface.readSliceShort(&buffer)); } diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 814faf234a..ec516d4af0 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -12,7 +12,7 @@ const Sha256 = std.crypto.hash.sha2.Sha256; const Allocator = std.mem.Allocator; const trace = @import("../../tracy.zig").trace; -const Hasher = @import("hasher.zig").ParallelHasher; +const ParallelHasher = @import("hasher.zig").ParallelHasher; const MachO = @import("../MachO.zig"); const hash_size = Sha256.digest_length; @@ -268,7 +268,9 @@ pub fn writeAdhocSignature( const tracy = trace(@src()); defer tracy.end(); - const allocator = macho_file.base.comp.gpa; + const comp = macho_file.base.comp; + const gpa = comp.gpa; + const io = comp.io; var header: macho.SuperBlob = .{ .magic = macho.CSMAGIC_EMBEDDED_SIGNATURE, @@ -276,7 +278,7 @@ pub fn writeAdhocSignature( .count = 0, }; - var blobs = std.array_list.Managed(Blob).init(allocator); + var blobs = std.array_list.Managed(Blob).init(gpa); defer blobs.deinit(); self.code_directory.inner.execSegBase = opts.exec_seg_base; @@ -286,13 +288,12 @@ pub fn writeAdhocSignature( const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size)); - try self.code_directory.code_slots.ensureTotalCapacityPrecise(allocator, total_pages); + try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; self.code_directory.inner.nCodeSlots = total_pages; // Calculate hash for each page (in file) and write it to the buffer - var hasher = Hasher(Sha256){ .allocator = allocator, .io = macho_file.base.comp.io }; - try hasher.hash(opts.file, self.code_directory.code_slots.items, .{ + try ParallelHasher(Sha256).hash(gpa, io, opts.file, self.code_directory.code_slots.items, .{ .chunk_size = self.page_size, .max_file_size = opts.file_size, }); @@ -304,7 +305,7 @@ pub fn writeAdhocSignature( var hash: [hash_size]u8 = undefined; if (self.requirements) |*req| { - var a: std.Io.Writer.Allocating = .init(allocator); + var a: std.Io.Writer.Allocating = .init(gpa); defer a.deinit(); try req.write(&a.writer); Sha256.hash(a.written(), &hash, .{}); @@ -316,7 +317,7 @@ pub fn writeAdhocSignature( } if (self.entitlements) |*ents| { - var a: std.Io.Writer.Allocating = .init(allocator); + var a: std.Io.Writer.Allocating = .init(gpa); defer a.deinit(); try ents.write(&a.writer); Sha256.hash(a.written(), &hash, .{}); diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig index 8cf53071c8..2e0129d240 100644 --- a/src/link/MachO/hasher.zig +++ b/src/link/MachO/hasher.zig @@ -1,5 +1,6 @@ const std = @import("std"); const Io = std.Io; +const assert = std.debug.assert; const Allocator = std.mem.Allocator; const trace = @import("../../tracy.zig").trace; @@ -8,20 +9,15 @@ pub fn ParallelHasher(comptime Hasher: type) type { const hash_size = Hasher.digest_length; return struct { - allocator: Allocator, - io: std.Io, - - pub fn hash(self: Self, file: Io.File, out: [][hash_size]u8, opts: struct { + pub fn hash(self: Self, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct { chunk_size: u64 = 0x4000, max_file_size: ?u64 = null, }) !void { const tracy = trace(@src()); defer tracy.end(); - const io = self.io; - const file_size = blk: { - const file_size = opts.max_file_size orelse try file.getEndPos(); + const file_size = opts.max_file_size orelse try file.length(io); break :blk std.math.cast(usize, file_size) orelse return error.Overflow; }; const chunk_size = std.math.cast(usize, opts.chunk_size) orelse return error.Overflow; @@ -29,12 +25,12 @@ pub fn ParallelHasher(comptime Hasher: type) type { const buffer = try self.allocator.alloc(u8, chunk_size * out.len); defer self.allocator.free(buffer); - const results = try self.allocator.alloc(Io.File.PReadError!usize, out.len); + const results = try self.allocator.alloc(Io.File.ReadPositionalError!usize, out.len); defer self.allocator.free(results); { - var group: std.Io.Group = .init; - errdefer group.cancel(io); + var group: Io.Group = .init; + defer group.cancel(io); for (out, results, 0..) |*out_buf, *result, i| { const fstart = i * chunk_size; @@ -42,7 +38,7 @@ pub fn ParallelHasher(comptime Hasher: type) type { file_size - fstart else chunk_size; - group.async(io, worker, .{ + group.async(worker, .{ file, fstart, buffer[fstart..][0..fsize], @@ -61,11 +57,9 @@ pub fn ParallelHasher(comptime Hasher: type) type { fstart: usize, buffer: []u8, out: *[hash_size]u8, - err: *Io.File.PReadError!usize, + err: *Io.File.ReadPositionalError!usize, ) void { - const tracy = trace(@src()); - defer tracy.end(); - err.* = file.preadAll(buffer, fstart); + err.* = file.readPositionalAll(buffer, fstart); Hasher.hash(buffer, out, .{}); } diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig index 4d8eac7523..a75799d01e 100644 --- a/src/link/MachO/uuid.zig +++ b/src/link/MachO/uuid.zig @@ -4,7 +4,7 @@ const Md5 = std.crypto.hash.Md5; const trace = @import("../../tracy.zig").trace; const Compilation = @import("../../Compilation.zig"); -const Hasher = @import("hasher.zig").ParallelHasher; +const ParallelHasher = @import("hasher.zig").ParallelHasher; /// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce /// the final digest. @@ -16,21 +16,23 @@ pub fn calcUuid(comp: *const Compilation, file: Io.File, file_size: u64, out: *[ const tracy = trace(@src()); defer tracy.end(); + const gpa = comp.gpa; + const io = comp.io; + const chunk_size: usize = 1024 * 1024; const num_chunks: usize = std.math.cast(usize, @divTrunc(file_size, chunk_size)) orelse return error.Overflow; const actual_num_chunks = if (@rem(file_size, chunk_size) > 0) num_chunks + 1 else num_chunks; - const hashes = try comp.gpa.alloc([Md5.digest_length]u8, actual_num_chunks); - defer comp.gpa.free(hashes); + const hashes = try gpa.alloc([Md5.digest_length]u8, actual_num_chunks); + defer gpa.free(hashes); - var hasher = Hasher(Md5){ .allocator = comp.gpa, .io = comp.io }; - try hasher.hash(file, hashes, .{ + try ParallelHasher(Md5).hash(gpa, io, file, hashes, .{ .chunk_size = chunk_size, .max_file_size = file_size, }); - const final_buffer = try comp.gpa.alloc(u8, actual_num_chunks * Md5.digest_length); - defer comp.gpa.free(final_buffer); + const final_buffer = try gpa.alloc(u8, actual_num_chunks * Md5.digest_length); + defer gpa.free(final_buffer); for (hashes, 0..) |hash, i| { @memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash); -- cgit v1.2.3 From 4218344dd3178f2fd3d9d00e9ff6895ee344df6d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 17:21:52 -0800 Subject: std.Build.Cache: remove readSmallFile and writeSmallFile These were to support optimizations involving detecting when to avoid calling into LLD, which are no longer implemented. --- lib/compiler/build_runner.zig | 2 +- lib/compiler/reduce.zig | 12 +++++--- lib/compiler/resinator/main.zig | 2 +- lib/compiler/std-docs.zig | 2 +- lib/std/Build/Cache.zig | 36 ++++-------------------- lib/std/Build/Step/ConfigHeader.zig | 2 +- lib/std/Build/Step/Run.zig | 2 +- lib/std/Build/Step/UpdateSourceFiles.zig | 2 +- lib/std/Build/Step/WriteFile.zig | 2 +- lib/std/Build/WebServer.zig | 2 +- lib/std/debug.zig | 4 +-- lib/std/fs/test.zig | 48 +++++++++++++++++++------------- lib/std/posix/test.zig | 2 +- src/Compilation.zig | 4 +-- src/libs/freebsd.zig | 4 +-- src/libs/glibc.zig | 4 +-- src/libs/netbsd.zig | 2 +- src/main.zig | 6 ++-- 18 files changed, 63 insertions(+), 75 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 36c73e96eb..9150d84470 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -459,7 +459,7 @@ pub fn main() !void { } const s = std.fs.path.sep_str; const tmp_sub_path = "tmp" ++ s ++ (output_tmp_nonce orelse fatal("missing -Z arg", .{})); - local_cache_directory.handle.writeFile(.{ + local_cache_directory.handle.writeFile(io, .{ .sub_path = tmp_sub_path, .data = buffer.items, .flags = .{ .exclusive = true }, diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig index d3f33ad81a..0bfa1902ab 100644 --- a/lib/compiler/reduce.zig +++ b/lib/compiler/reduce.zig @@ -55,6 +55,10 @@ pub fn main() !void { var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); + var threaded: std.Io.Threaded = .init(gpa); + defer threaded.deinit(); + const io = threaded.io(); + const args = try std.process.argsAlloc(arena); var opt_checker_path: ?[]const u8 = null; @@ -233,12 +237,12 @@ pub fn main() !void { } } - try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(io, .{ .sub_path = root_source_file_path, .data = rendered.written() }); // std.debug.print("trying this code:\n{s}\n", .{rendered.items}); const interestingness = try runCheck(arena, interestingness_argv.items); - std.debug.print("{d} random transformations: {s}. {d}/{d}\n", .{ - subset_size, @tagName(interestingness), start_index, transformations.items.len, + std.debug.print("{d} random transformations: {t}. {d}/{d}\n", .{ + subset_size, interestingness, start_index, transformations.items.len, }); switch (interestingness) { .interesting => { @@ -274,7 +278,7 @@ pub fn main() !void { fixups.clearRetainingCapacity(); rendered.clearRetainingCapacity(); try tree.render(gpa, &rendered.writer, fixups); - try Io.Dir.cwd().writeFile(.{ .sub_path = root_source_file_path, .data = rendered.written() }); + try Io.Dir.cwd().writeFile(io, .{ .sub_path = root_source_file_path, .data = rendered.written() }); return std.process.cleanExit(); } diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index 1a6ef5eea3..afe1dcbe91 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -212,7 +212,7 @@ pub fn main() !void { try output_file.writeAll(full_input); }, .filename => |output_filename| { - try Io.Dir.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input }); + try Io.Dir.cwd().writeFile(io, .{ .sub_path = output_filename, .data = full_input }); }, } return; diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index f1382e6eae..8f02f05958 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -233,7 +233,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void { .interface = std.Io.File.Reader.initInterface(&.{}), .size = stat.size, }; - try archiver.writeFile(entry.path, &file_reader, stat.mtime); + try archiver.writeFile(io, entry.path, &file_reader, stat.mtime); } { diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index dab1926f53..f7c4d729bc 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1276,30 +1276,6 @@ pub const Manifest = struct { } }; -/// On operating systems that support symlinks, does a readlink. On other operating systems, -/// uses the file contents. Windows supports symlinks but only with elevated privileges, so -/// it is treated as not supporting symlinks. -pub fn readSmallFile(dir: Io.Dir, sub_path: []const u8, buffer: []u8) ![]u8 { - if (builtin.os.tag == .windows) { - return dir.readFile(sub_path, buffer); - } else { - return dir.readLink(sub_path, buffer); - } -} - -/// On operating systems that support symlinks, does a symlink. On other operating systems, -/// uses the file contents. Windows supports symlinks but only with elevated privileges, so -/// it is treated as not supporting symlinks. -/// `data` must be a valid UTF-8 encoded file path and 255 bytes or fewer. -pub fn writeSmallFile(dir: Io.Dir, sub_path: []const u8, data: []const u8) !void { - assert(data.len <= 255); - if (builtin.os.tag == .windows) { - return dir.writeFile(.{ .sub_path = sub_path, .data = data }); - } else { - return dir.symLink(data, sub_path, .{}); - } -} - fn hashFile(io: Io, file: Io.File, bin_digest: *[Hasher.mac_length]u8) Io.File.ReadPositionalError!void { var buffer: [2048]u8 = undefined; var hasher = hasher_init; @@ -1338,7 +1314,7 @@ test "cache file and then recall it" { const temp_file = "test.txt"; const temp_manifest_dir = "temp_manifest_dir"; - try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = "Hello, world!\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = "Hello, world!\n" }); // Wait for file timestamps to tick const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir); @@ -1404,7 +1380,7 @@ test "check that changing a file makes cache fail" { const original_temp_file_contents = "Hello, world!\n"; const updated_temp_file_contents = "Hello, world; but updated!\n"; - try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = original_temp_file_contents }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = original_temp_file_contents }); // Wait for file timestamps to tick const initial_time = try testGetCurrentFileTimestamp(tmp.dir); @@ -1441,7 +1417,7 @@ test "check that changing a file makes cache fail" { try ch.writeManifest(); } - try tmp.dir.writeFile(.{ .sub_path = temp_file, .data = updated_temp_file_contents }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = updated_temp_file_contents }); { var ch = cache.obtain(); @@ -1521,8 +1497,8 @@ test "Manifest with files added after initial hash work" { const temp_file2 = "cache_hash_post_file_test2.txt"; const temp_manifest_dir = "cache_hash_post_file_manifest_dir"; - try tmp.dir.writeFile(.{ .sub_path = temp_file1, .data = "Hello, world!\n" }); - try tmp.dir.writeFile(.{ .sub_path = temp_file2, .data = "Hello world the second!\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file1, .data = "Hello, world!\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second!\n" }); // Wait for file timestamps to tick const initial_time = try testGetCurrentFileTimestamp(tmp.dir); @@ -1573,7 +1549,7 @@ test "Manifest with files added after initial hash work" { try testing.expect(mem.eql(u8, &digest1, &digest2)); // Modify the file added after initial hash - try tmp.dir.writeFile(.{ .sub_path = temp_file2, .data = "Hello world the second, updated\n" }); + try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second, updated\n" }); // Wait for file timestamps to tick const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index f377959610..589110d4c4 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -264,7 +264,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { }); }; - b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output }) catch |err| { + b.cache_root.handle.writeFile(io, .{ .sub_path = sub_path, .data = output }) catch |err| { return step.fail("unable to write file '{f}{s}': {s}", .{ b.cache_root, sub_path, @errorName(err), }); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index f6b29635c1..a1618beb02 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1482,7 +1482,7 @@ fn runCommand( .leading => mem.trimStart(u8, stream.bytes.?, &std.ascii.whitespace), .trailing => mem.trimEnd(u8, stream.bytes.?, &std.ascii.whitespace), }; - b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = data }) catch |err| { + b.cache_root.handle.writeFile(io, .{ .sub_path = sub_path, .data = data }) catch |err| { return step.fail("unable to write file '{f}{s}': {s}", .{ b.cache_root, sub_path, @errorName(err), }); diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig index 44c6ae1ed4..eb8a6a85dd 100644 --- a/lib/std/Build/Step/UpdateSourceFiles.zig +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -84,7 +84,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { } switch (output_source_file.contents) { .bytes => |bytes| { - b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { + b.build_root.handle.writeFile(io, .{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { return step.fail("unable to write file '{f}{s}': {t}", .{ b.build_root, output_source_file.sub_path, err, }); diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 94b04b4212..3d712fa1d4 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -273,7 +273,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { } switch (file.contents) { .bytes => |bytes| { - cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| { + cache_dir.writeFile(io, .{ .sub_path = file.sub_path, .data = bytes }) catch |err| { return step.fail("unable to write file '{f}{s}{c}{s}': {t}", .{ b.cache_root, cache_path, fs.path.sep, file.sub_path, err, }); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 162d17f070..ed07d04d57 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -523,7 +523,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa); break :cwd cached_cwd_path.?; }; - try archiver.writeFile(path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); + try archiver.writeFile(io, path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); } // intentionally not calling `archiver.finishPedantically` diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 39207f938d..cb79bb7855 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1243,7 +1243,7 @@ test printLineFromFile { { const path = try join(gpa, &.{ test_dir_path, "one_line.zig" }); defer gpa.free(path); - try test_dir.dir.writeFile(.{ .sub_path = "one_line.zig", .data = "no new lines in this file, but one is printed anyway" }); + try test_dir.dir.writeFile(io, .{ .sub_path = "one_line.zig", .data = "no new lines in this file, but one is printed anyway" }); try expectError(error.EndOfFile, printLineFromFile(io, output_stream, .{ .file_name = path, .line = 2, .column = 0 })); @@ -1254,7 +1254,7 @@ test printLineFromFile { { const path = try fs.path.join(gpa, &.{ test_dir_path, "three_lines.zig" }); defer gpa.free(path); - try test_dir.dir.writeFile(.{ + try test_dir.dir.writeFile(io, .{ .sub_path = "three_lines.zig", .data = \\1 diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 59bacff2d0..59e0990eb0 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -184,7 +184,7 @@ test "Dir.readLink" { fn impl(ctx: *TestContext) !void { // Create some targets const file_target_path = try ctx.transformPath("file.txt"); - try ctx.dir.writeFile(.{ .sub_path = file_target_path, .data = "nonsense" }); + try ctx.dir.writeFile(io, .{ .sub_path = file_target_path, .data = "nonsense" }); const dir_target_path = try ctx.transformPath("subdir"); try ctx.dir.makeDir(dir_target_path); @@ -487,11 +487,13 @@ test "readLinkAbsolute" { if (native_os == .wasi) return error.SkipZigTest; if (native_os == .openbsd) return error.SkipZigTest; + const io = testing.io; + var tmp = tmpDir(.{}); defer tmp.cleanup(); // Create some targets - try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = "nonsense" }); + try tmp.dir.writeFile(io, .{ .sub_path = "file.txt", .data = "nonsense" }); try tmp.dir.makeDir("subdir"); // Get base abs path @@ -708,6 +710,7 @@ test "Dir.realpath smoke test" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const allocator = ctx.arena.allocator(); const test_file_path = try ctx.transformPath("test_file"); const test_dir_path = try ctx.transformPath("test_dir"); @@ -720,7 +723,7 @@ test "Dir.realpath smoke test" { try testing.expectError(error.FileNotFound, ctx.dir.realpath(test_dir_path, &buf)); // Now create the file and dir - try ctx.dir.writeFile(.{ .sub_path = test_file_path, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = test_file_path, .data = "" }); try ctx.dir.makeDir(test_dir_path); const base_path = try ctx.transformPath("."); @@ -803,11 +806,12 @@ test "readFileAlloc" { test "Dir.statFile" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const test_file_name = try ctx.transformPath("test_file"); try testing.expectError(error.FileNotFound, ctx.dir.statFile(test_file_name)); - try ctx.dir.writeFile(.{ .sub_path = test_file_name, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = test_file_name, .data = "" }); const stat = try ctx.dir.statFile(test_file_name); try testing.expectEqual(File.Kind.file, stat.kind); @@ -925,6 +929,7 @@ test "makeOpenPath parent dirs do not exist" { test "deleteDir" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const test_dir_path = try ctx.transformPath("test_dir"); const test_file_path = try ctx.transformPath("test_dir" ++ fs.path.sep_str ++ "test_file"); @@ -933,7 +938,7 @@ test "deleteDir" { // deleting a non-empty directory try ctx.dir.makeDir(test_dir_path); - try ctx.dir.writeFile(.{ .sub_path = test_file_path, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = test_file_path, .data = "" }); try testing.expectError(error.DirNotEmpty, ctx.dir.deleteDir(test_dir_path)); // deleting an empty directory @@ -1217,7 +1222,7 @@ test "deleteTree on a symlink" { defer tmp.cleanup(); // Symlink to a file - try tmp.dir.writeFile(.{ .sub_path = "file", .data = "" }); + try tmp.dir.writeFile(io, .{ .sub_path = "file", .data = "" }); try setupSymlink(tmp.dir, "file", "filelink", .{}); try tmp.dir.deleteTree("filelink"); @@ -1241,11 +1246,11 @@ test "makePath, put some files in it, deleteTree" { const dir_path = try ctx.transformPath("os_test_tmp"); try ctx.dir.makePath(io, try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }), .data = "nonsense", }); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }), .data = "blah", }); @@ -1264,11 +1269,11 @@ test "makePath, put some files in it, deleteTreeMinStackSize" { const dir_path = try ctx.transformPath("os_test_tmp"); try ctx.dir.makePath(io, try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }), .data = "nonsense", }); - try ctx.dir.writeFile(.{ + try ctx.dir.writeFile(io, .{ .sub_path = try fs.path.join(allocator, &.{ "os_test_tmp", "b", "file2.txt" }), .data = "blah", }); @@ -1298,7 +1303,7 @@ test "makePath but sub_path contains pre-existing file" { defer tmp.cleanup(); try tmp.dir.makeDir("foo"); - try tmp.dir.writeFile(.{ .sub_path = "foo/bar", .data = "" }); + try tmp.dir.writeFile(io, .{ .sub_path = "foo/bar", .data = "" }); try testing.expectError(error.NotDir, tmp.dir.makePath(io, "foo/bar/baz")); } @@ -1400,7 +1405,7 @@ fn testFilenameLimits(io: Io, iterable_dir: Dir, maxed_filename: []const u8) !vo var maxed_dir = try iterable_dir.makeOpenPath(maxed_filename, .{}); defer maxed_dir.close(io); - try maxed_dir.writeFile(.{ .sub_path = maxed_filename, .data = "" }); + try maxed_dir.writeFile(io, .{ .sub_path = maxed_filename, .data = "" }); var walker = try iterable_dir.walk(testing.allocator); defer walker.deinit(); @@ -1513,7 +1518,7 @@ test "setEndPos" { defer tmp.cleanup(); const file_name = "afile.txt"; - try tmp.dir.writeFile(.{ .sub_path = file_name, .data = "ninebytes" }); + try tmp.dir.writeFile(io, .{ .sub_path = file_name, .data = "ninebytes" }); const f = try tmp.dir.openFile(io, file_name, .{ .mode = .read_write }); defer f.close(io); @@ -1563,7 +1568,7 @@ test "access file" { try ctx.dir.makePath(io, dir_path); try testing.expectError(error.FileNotFound, ctx.dir.access(io, file_path, .{})); - try ctx.dir.writeFile(.{ .sub_path = file_path, .data = "" }); + try ctx.dir.writeFile(io, .{ .sub_path = file_path, .data = "" }); try ctx.dir.access(io, file_path, .{}); try ctx.dir.deleteTree(dir_path); } @@ -1659,12 +1664,13 @@ test "sendfile with buffered data" { test "copyFile" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP"; const src_file = try ctx.transformPath("tmp_test_copy_file.txt"); const dest_file = try ctx.transformPath("tmp_test_copy_file2.txt"); const dest_file2 = try ctx.transformPath("tmp_test_copy_file3.txt"); - try ctx.dir.writeFile(.{ .sub_path = src_file, .data = data }); + try ctx.dir.writeFile(io, .{ .sub_path = src_file, .data = data }); defer ctx.dir.deleteFile(src_file) catch {}; try ctx.dir.copyFile(src_file, ctx.dir, dest_file, .{}); @@ -2050,7 +2056,7 @@ test "'.' and '..' in Io.Dir functions" { renamed_file.close(io); try ctx.dir.deleteFile(rename_path); - try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" }); + try ctx.dir.writeFile(io, .{ .sub_path = update_path, .data = "something" }); var dir = ctx.dir; const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{}); try testing.expectEqual(Io.Dir.PrevStatus.stale, prev_status); @@ -2187,7 +2193,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.deleteTree(invalid_path)); try testing.expectError(expected_err, ctx.dir.deleteTreeMinStackSize(invalid_path)); - try testing.expectError(expected_err, ctx.dir.writeFile(.{ .sub_path = invalid_path, .data = "" })); + try testing.expectError(expected_err, ctx.dir.writeFile(io, .{ .sub_path = invalid_path, .data = "" })); try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{})); @@ -2304,7 +2310,7 @@ test "seekBy" { var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); - try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" }); + try tmp_dir.dir.writeFile(io, .{ .sub_path = "blah.txt", .data = "let's test seekBy" }); const f = try tmp_dir.dir.openFile(io, "blah.txt", .{ .mode = .read_only }); defer f.close(io); var reader = f.readerStreaming(io, &.{}); @@ -2350,7 +2356,7 @@ test "File.Writer sendfile with buffered contents" { defer tmp_dir.cleanup(); { - try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" }); + try tmp_dir.dir.writeFile(io, .{ .sub_path = "a", .data = "bcd" }); const in = try tmp_dir.dir.openFile(io, "a", .{}); defer in.close(io); const out = try tmp_dir.dir.createFile(io, "b", .{}); @@ -2391,11 +2397,13 @@ fn testReadlink(target_path: []const u8, symlink_path: []const u8) !void { } test "readlinkat" { + const io = testing.io; + var tmp = tmpDir(.{}); defer tmp.cleanup(); // create file - try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = "nonsense" }); + try tmp.dir.writeFile(io, .{ .sub_path = "file.txt", .data = "nonsense" }); // create a symbolic link if (native_os == .windows) { diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index af17f1f9ff..bacfdacf83 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -145,7 +145,7 @@ test "linkat with different directories" { const subdir = try tmp.dir.makeOpenPath("subdir", .{}); defer tmp.dir.deleteFile(target_name) catch {}; - try tmp.dir.writeFile(.{ .sub_path = target_name, .data = "example" }); + try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" }); // Test 1: link from file in subdir back up to target in parent directory try posix.linkat(tmp.dir.handle, target_name, subdir.handle, link_name, 0); diff --git a/src/Compilation.zig b/src/Compilation.zig index 36429a42f8..7fe217b385 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5715,7 +5715,7 @@ pub fn translateC( const out_h_sub_path = tmp_sub_path ++ fs.path.sep_str ++ cimport_basename; const out_h_path = try comp.dirs.local_cache.join(arena, &.{out_h_sub_path}); if (comp.verbose_cimport) log.info("writing C import source to {s}", .{out_h_path}); - try cache_dir.writeFile(.{ .sub_path = out_h_sub_path, .data = c_src }); + try cache_dir.writeFile(io, .{ .sub_path = out_h_sub_path, .data = c_src }); break :path out_h_path; }, .path => |p| p, @@ -6572,7 +6572,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 resource_id, resource_type, fmtRcEscape(src_path), }); - try o_dir.writeFile(.{ .sub_path = rc_basename, .data = input }); + try o_dir.writeFile(io, .{ .sub_path = rc_basename, .data = input }); var argv = std.array_list.Managed([]const u8).init(comp.gpa); defer argv.deinit(); diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index cfd8d5554c..77bd4372d0 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -520,7 +520,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| { try map_contents.print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor }); } - try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items }); map_contents.deinit(); } @@ -974,7 +974,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "stdthreads", etc. const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable; - try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items }); try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node); } diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index e3d8ce1f7f..a60dc921be 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -759,7 +759,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye try map_contents.print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch }); } } - try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items }); map_contents.deinit(); // The most recent allocation of an arena can be freed :) } @@ -1118,7 +1118,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc. const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable; - try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items }); try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node); } diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index cb6a80d69d..fd80616e9d 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -628,7 +628,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc. const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable; - try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items }); + try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items }); try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node); } diff --git a/src/main.zig b/src/main.zig index 99850f5ffe..bef3a3efb5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -7164,7 +7164,7 @@ fn cmdFetch( try ast.render(gpa, &aw.writer, fixups); const rendered = aw.written(); - build_root.directory.handle.writeFile(.{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| { + build_root.directory.handle.writeFile(io, .{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| { fatal("unable to write {s} file: {t}", .{ Package.Manifest.basename, err }); }; @@ -7207,7 +7207,7 @@ fn createDependenciesModule( { var tmp_dir = try dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}); defer tmp_dir.close(io); - try tmp_dir.writeFile(.{ .sub_path = basename, .data = source }); + try tmp_dir.writeFile(io, .{ .sub_path = basename, .data = source }); } var hh: Cache.HashHelper = .{}; @@ -7438,7 +7438,7 @@ const Templates = struct { i += 1; } - return out_dir.writeFile(.{ + return out_dir.writeFile(io, .{ .sub_path = template_path, .data = templates.buffer.items, .flags = .{ .exclusive = true }, -- cgit v1.2.3 From 4be8be1d2bd6959efae7df95e3f5713adf953a42 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 17:45:17 -0800 Subject: update all rename() to rename(io) --- lib/compiler/build_runner.zig | 8 ++++---- lib/compiler/objcopy.zig | 2 +- lib/std/Build/Cache.zig | 4 ++-- lib/std/Build/Fuzz.zig | 1 + lib/std/Build/Step/Compile.zig | 6 +++--- lib/std/Build/Step/Options.zig | 2 +- lib/std/Build/Step/Run.zig | 22 ++++++++-------------- lib/std/Build/WebServer.zig | 6 +++--- lib/std/Io.zig | 2 +- lib/std/Io/Dir.zig | 11 +++++++++-- lib/std/fs/test.zig | 38 +++++++++++++++++++------------------- lib/std/posix/test.zig | 4 ++-- lib/std/tar.zig | 2 +- lib/std/zip.zig | 2 +- src/Compilation.zig | 6 +++--- src/Package/Fetch.zig | 12 ++++++------ src/Package/Fetch/git.zig | 4 ++-- src/Zcu.zig | 14 ++++++++++---- src/Zcu/PerThread.zig | 12 ++++++------ src/libs/mingw.zig | 2 +- src/link.zig | 2 +- src/link/Lld.zig | 2 +- src/main.zig | 8 ++------ 23 files changed, 88 insertions(+), 84 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 9150d84470..677158645d 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -478,14 +478,14 @@ pub fn main() !void { validateSystemLibraryOptions(builder); if (help_menu) { - var w = initStdoutWriter(); + var w = initStdoutWriter(io); printUsage(builder, w) catch return stdout_writer_allocation.err.?; w.flush() catch return stdout_writer_allocation.err.?; return; } if (steps_menu) { - var w = initStdoutWriter(); + var w = initStdoutWriter(io); printSteps(builder, w) catch return stdout_writer_allocation.err.?; w.flush() catch return stdout_writer_allocation.err.?; return; @@ -1847,7 +1847,7 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void { var stdio_buffer_allocation: [256]u8 = undefined; var stdout_writer_allocation: Io.File.Writer = undefined; -fn initStdoutWriter() *Writer { - stdout_writer_allocation = Io.File.stdout().writerStreaming(&stdio_buffer_allocation); +fn initStdoutWriter(io: Io) *Writer { + stdout_writer_allocation = Io.File.stdout().writerStreaming(io, &stdio_buffer_allocation); return &stdout_writer_allocation.interface; } diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index 6473a10dd0..d9b9b33186 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -183,7 +183,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void var output_file = try Io.Dir.cwd().createFile(io, output, .{ .mode = mode }); defer output_file.close(io); - var out = output_file.writer(&output_buffer); + var out = output_file.writer(io, &output_buffer); switch (out_fmt) { .hex, .raw => { diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index f7c4d729bc..2d6dbc02fa 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1134,13 +1134,13 @@ pub const Manifest = struct { /// lock from exclusive to shared. pub fn writeManifest(self: *Manifest) !void { assert(self.have_exclusive_lock); - + const io = self.cache.io; const manifest_file = self.manifest_file.?; if (self.manifest_dirty) { self.manifest_dirty = false; var buffer: [4000]u8 = undefined; - var fw = manifest_file.writer(&buffer); + var fw = manifest_file.writer(io, &buffer); writeDirtyManifestToStream(self, &fw) catch |err| switch (err) { error.WriteFailed => return fw.err.?, else => |e| return e, diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index 8d5bc27f3a..8fedb7e067 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -389,6 +389,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO const target = run_step.producer.?.rootModuleTarget(); var debug_info = std.debug.Info.load( fuzz.gpa, + io, rebuilt_exe_path, &gop.value_ptr.coverage, target.ofmt, diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 4f1e83a573..dfa5460981 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -1709,7 +1709,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { defer b.cache_root.handle.deleteFile(io, tmp_path) catch { // It's fine if the temporary file can't be cleaned up. }; - b.cache_root.handle.rename(io, tmp_path, args_file) catch |rename_err| switch (rename_err) { + b.cache_root.handle.rename(tmp_path, b.cache_root.handle, args_file, io) catch |rename_err| switch (rename_err) { error.PathAlreadyExists => { // The args file was created by another concurrent build process. }, @@ -1827,14 +1827,14 @@ pub fn doAtomicSymLinks( // sym link for libfoo.so.1 to libfoo.so.1.2.3 const major_only_path = b.pathJoin(&.{ out_dir, filename_major_only }); const cwd: Io.Dir = .cwd(); - cwd.atomicSymLink(io, out_basename, major_only_path, .{}) catch |err| { + cwd.symLinkAtomic(io, out_basename, major_only_path, .{}) catch |err| { return step.fail("unable to symlink {s} -> {s}: {s}", .{ major_only_path, out_basename, @errorName(err), }); }; // sym link for libfoo.so to libfoo.so.1 const name_only_path = b.pathJoin(&.{ out_dir, filename_name_only }); - cwd.atomicSymLink(io, filename_major_only, name_only_path, .{}) catch |err| { + cwd.symLinkAtomic(io, filename_major_only, name_only_path, .{}) catch |err| { return step.fail("Unable to symlink {s} -> {s}: {s}", .{ name_only_path, filename_major_only, @errorName(err), }); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 1416e0e916..676ea4d851 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -498,7 +498,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { }); }; - b.cache_root.handle.rename(io, tmp_sub_path, sub_path) catch |err| switch (err) { + b.cache_root.handle.rename(tmp_sub_path, b.cache_root.handle, sub_path, io) catch |err| switch (err) { error.PathAlreadyExists => { // Other process beat us to it. Clean up the temp file. b.cache_root.handle.deleteFile(io, tmp_sub_path) catch |e| { diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index a1618beb02..e52dd21a96 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1042,27 +1042,21 @@ fn make(step: *Step, options: Step.MakeOptions) !void { if (any_output) { const o_sub_path = "o" ++ fs.path.sep_str ++ &digest; - b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |err| { + b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |err| { if (err == error.PathAlreadyExists) { b.cache_root.handle.deleteTree(o_sub_path) catch |del_err| { - return step.fail("unable to remove dir '{f}'{s}: {s}", .{ - b.cache_root, - tmp_dir_path, - @errorName(del_err), + return step.fail("unable to remove dir '{f}'{s}: {t}", .{ + b.cache_root, tmp_dir_path, del_err, }); }; - b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |retry_err| { - return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{ - b.cache_root, tmp_dir_path, - b.cache_root, o_sub_path, - @errorName(retry_err), + b.cache_root.handle.rename(tmp_dir_path, b.cache_root.handle, o_sub_path, io) catch |retry_err| { + return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{ + b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, retry_err, }); }; } else { - return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{ - b.cache_root, tmp_dir_path, - b.cache_root, o_sub_path, - @errorName(err), + return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {t}", .{ + b.cache_root, tmp_dir_path, b.cache_root, o_sub_path, err, }); } }; diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index ed07d04d57..38a7a73588 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -523,7 +523,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa); break :cwd cached_cwd_path.?; }; - try archiver.writeFile(io, path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); + try archiver.writeFile(path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds())); } // intentionally not calling `archiver.finishPedantically` @@ -587,7 +587,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim }); defer poller.deinit(); - try child.stdin.?.writeAll(@ptrCast(@as([]const std.zig.Client.Message.Header, &.{ + try child.stdin.?.writeStreamingAll(io, @ptrCast(@as([]const std.zig.Client.Message.Header, &.{ .{ .tag = .update, .bytes_len = 0 }, .{ .tag = .exit, .bytes_len = 0 }, }))); @@ -638,7 +638,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim child.stdin.?.close(io); child.stdin = null; - switch (try child.wait()) { + switch (try child.wait(io)) { .Exited => |code| { if (code != 0) { log.err( diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 7f5f049d34..193998f037 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -677,7 +677,7 @@ pub const VTable = struct { dirDeleteFile: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteFileError!void, dirDeleteDir: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteDirError!void, dirRename: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenameError!void, - dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.RenameError!void, + dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.SymLinkError!void, dirReadLink: *const fn (?*anyopaque, Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize, dirSetOwner: *const fn (?*anyopaque, Dir, ?File.Uid, ?File.Gid) Dir.SetOwnerError!void, dirSetPermissions: *const fn (?*anyopaque, Dir, Dir.Permissions) Dir.SetPermissionsError!void, diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 755ce924ad..7d1f6212dc 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -501,7 +501,7 @@ pub const WriteFileError = File.Writer.Error || File.OpenError; pub fn writeFile(dir: Dir, io: Io, options: WriteFileOptions) WriteFileError!void { var file = try dir.createFile(io, options.sub_path, options.flags); defer file.close(io); - try file.writeAll(io, options.data); + try file.writeStreamingAll(io, options.data); } pub const PrevStatus = enum { @@ -955,6 +955,13 @@ pub fn rename( return io.vtable.dirRename(io.userdata, old_dir, old_sub_path, new_dir, new_sub_path); } +pub fn renameAbsolute(io: Io, old_path: []const u8, new_path: []const u8) RenameError!void { + assert(path.isAbsolute(old_path)); + assert(path.isAbsolute(new_path)); + const my_cwd = cwd(); + return io.vtable.dirRename(io.userdata, my_cwd, old_path, my_cwd, new_path); +} + /// Use with `Dir.symLink`, `Dir.symLinkAtomic`, and `symLinkAbsolute` to /// specify whether the symlink will point to a file or a directory. This value /// is ignored on all hosts except Windows where creating symlinks to different @@ -1053,7 +1060,7 @@ pub fn symLinkAtomic( temp_path[dirname.len + 1 ..][0..rand_len].* = std.fmt.hex(random_integer); if (dir.symLink(io, target_path, temp_path, flags)) { - return dir.rename(temp_path, dir, io, sym_link_path); + return dir.rename(temp_path, dir, sym_link_path, io); } else |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 59e0990eb0..92992f91b4 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -961,14 +961,14 @@ test "Dir.rename files" { const missing_file_path = try ctx.transformPath("missing_file_name"); const something_else_path = try ctx.transformPath("something_else"); - try testing.expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, something_else_path)); + try testing.expectError(error.FileNotFound, ctx.dir.rename(missing_file_path, ctx.dir, something_else_path, io)); // Renaming files const test_file_name = try ctx.transformPath("test_file"); const renamed_test_file_name = try ctx.transformPath("test_file_renamed"); var file = try ctx.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); - try ctx.dir.rename(test_file_name, renamed_test_file_name); + try ctx.dir.rename(test_file_name, ctx.dir, renamed_test_file_name, io); // Ensure the file was renamed try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, test_file_name, .{})); @@ -976,13 +976,13 @@ test "Dir.rename files" { file.close(io); // Rename to self succeeds - try ctx.dir.rename(renamed_test_file_name, renamed_test_file_name); + try ctx.dir.rename(renamed_test_file_name, ctx.dir, renamed_test_file_name, io); // Rename to existing file succeeds const existing_file_path = try ctx.transformPath("existing_file"); var existing_file = try ctx.dir.createFile(io, existing_file_path, .{ .read = true }); existing_file.close(io); - try ctx.dir.rename(renamed_test_file_name, existing_file_path); + try ctx.dir.rename(renamed_test_file_name, ctx.dir, existing_file_path, io); try testing.expectError(error.FileNotFound, ctx.dir.openFile(io, renamed_test_file_name, .{})); file = try ctx.dir.openFile(io, existing_file_path, .{}); @@ -1007,7 +1007,7 @@ test "Dir.rename directories" { // Renaming directories try ctx.dir.makeDir(test_dir_path); - try ctx.dir.rename(test_dir_path, test_dir_renamed_path); + try ctx.dir.rename(test_dir_path, ctx.dir, test_dir_renamed_path, io); // Ensure the directory was renamed try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{})); @@ -1019,7 +1019,7 @@ test "Dir.rename directories" { dir.close(io); const test_dir_renamed_again_path = try ctx.transformPath("test_dir_renamed_again"); - try ctx.dir.rename(test_dir_renamed_path, test_dir_renamed_again_path); + try ctx.dir.rename(test_dir_renamed_path, ctx.dir, test_dir_renamed_again_path, io); // Ensure the directory was renamed and the file still exists in it try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_renamed_path, .{})); @@ -1044,7 +1044,7 @@ test "Dir.rename directory onto empty dir" { try ctx.dir.makeDir(test_dir_path); try ctx.dir.makeDir(target_dir_path); - try ctx.dir.rename(test_dir_path, target_dir_path); + try ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io); // Ensure the directory was renamed try testing.expectError(error.FileNotFound, ctx.dir.openDir(io, test_dir_path, .{})); @@ -1072,7 +1072,7 @@ test "Dir.rename directory onto non-empty dir" { target_dir.close(io); // Rename should fail with PathAlreadyExists if target_dir is non-empty - try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, target_dir_path)); + try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, ctx.dir, target_dir_path, io)); // Ensure the directory was not renamed var dir = try ctx.dir.openDir(io, test_dir_path, .{}); @@ -1094,8 +1094,8 @@ test "Dir.rename file <-> dir" { var file = try ctx.dir.createFile(io, test_file_path, .{ .read = true }); file.close(io); try ctx.dir.makeDir(test_dir_path); - try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path)); - try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, test_file_path)); + try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, ctx.dir, test_dir_path, io)); + try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, ctx.dir, test_file_path, io)); } }.impl); } @@ -1114,7 +1114,7 @@ test "rename" { const renamed_test_file_name = "test_file_renamed"; var file = try tmp_dir1.dir.createFile(io, test_file_name, .{ .read = true }); file.close(io); - try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name); + try Dir.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name, io); // ensure the file was renamed try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(io, test_file_name, .{})); @@ -1492,7 +1492,7 @@ test "pwritev, preadv" { var src_file = try tmp.dir.createFile(io, "test.txt", .{ .read = true }); defer src_file.close(io); - var writer = src_file.writer(&.{}); + var writer = src_file.writer(io, &.{}); try writer.seekTo(16); try writer.interface.writeVecAll(&lines); @@ -1593,7 +1593,7 @@ test "sendfile" { var src_file = try dir.createFile(io, "sendfile1.txt", .{ .read = true }); defer src_file.close(io); { - var fw = src_file.writer(&.{}); + var fw = src_file.writer(io, &.{}); try fw.interface.writeVecAll(&vecs); } @@ -1610,7 +1610,7 @@ test "sendfile" { var written_buf: [100]u8 = undefined; var file_reader = src_file.reader(io, &.{}); var fallback_buffer: [50]u8 = undefined; - var file_writer = dest_file.writer(&fallback_buffer); + var file_writer = dest_file.writer(io, &fallback_buffer); try file_writer.interface.writeVecAll(&headers); try file_reader.seekTo(1); try testing.expectEqual(10, try file_writer.interface.sendFileAll(&file_reader, .limited(10))); @@ -1648,7 +1648,7 @@ test "sendfile with buffered data" { try file_reader.interface.fill(8); var fallback_buffer: [32]u8 = undefined; - var file_writer = dest_file.writer(&fallback_buffer); + var file_writer = dest_file.writer(io, &fallback_buffer); try std.testing.expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4))); @@ -2051,7 +2051,7 @@ test "'.' and '..' in Io.Dir functions" { try ctx.dir.access(io, file_path, .{}); try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{}); - try ctx.dir.rename(copy_path, rename_path); + try ctx.dir.rename(copy_path, ctx.dir, rename_path, io); const renamed_file = try ctx.dir.openFile(io, rename_path, .{}); renamed_file.close(io); try ctx.dir.deleteFile(rename_path); @@ -2175,7 +2175,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.deleteDir(invalid_path)); - try testing.expectError(expected_err, ctx.dir.rename(invalid_path, invalid_path)); + try testing.expectError(expected_err, ctx.dir.rename(invalid_path, ctx.dir, invalid_path, io)); try testing.expectError(expected_err, ctx.dir.symLink(invalid_path, invalid_path, .{})); if (native_os == .wasi) { @@ -2208,7 +2208,7 @@ test "invalid UTF-8/WTF-8 paths" { try testing.expectError(expected_err, ctx.dir.realpathAlloc(testing.allocator, invalid_path)); } - try testing.expectError(expected_err, fs.rename(ctx.dir, invalid_path, ctx.dir, invalid_path)); + try testing.expectError(expected_err, Dir.rename(ctx.dir, invalid_path, ctx.dir, invalid_path, io)); if (native_os != .wasi and ctx.path_type != .relative) { try testing.expectError(expected_err, Dir.copyFileAbsolute(invalid_path, invalid_path, .{})); @@ -2334,7 +2334,7 @@ test "seekTo flushes buffered data" { defer file.close(io); { var buf: [16]u8 = undefined; - var file_writer = File.writer(file, &buf); + var file_writer = file.writer(io, file, &buf); try file_writer.interface.writeAll(contents); try file_writer.seekTo(8); diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index bacfdacf83..756ecfd63f 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -690,7 +690,7 @@ test "rename smoke test" { // Rename the file const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_file" }); defer a.free(new_file_path); - try posix.rename(file_path, new_file_path); + try Io.Dir.renameAbsolute(file_path, new_file_path); } { @@ -717,7 +717,7 @@ test "rename smoke test" { // Rename the directory const new_file_path = try fs.path.join(a, &.{ base_path, "some_other_dir" }); defer a.free(new_file_path); - try posix.rename(file_path, new_file_path); + try Io.Dir.renameAbsolute(file_path, new_file_path); } { diff --git a/lib/std/tar.zig b/lib/std/tar.zig index b2a5306458..5e7215f1d7 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -612,7 +612,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp .file => { if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| { defer fs_file.close(io); - var file_writer = fs_file.writer(&file_contents_buffer); + var file_writer = fs_file.writer(io, &file_contents_buffer); try it.streamRemaining(file, &file_writer.interface); try file_writer.interface.flush(); } else |err| { diff --git a/lib/std/zip.zig b/lib/std/zip.zig index acb3fc65ab..5a91b73d6a 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -570,7 +570,7 @@ pub const Iterator = struct { }; defer out_file.close(io); var out_file_buffer: [1024]u8 = undefined; - var file_writer = out_file.writer(&out_file_buffer); + var file_writer = out_file.writer(io, &out_file_buffer); const local_data_file_offset: u64 = @as(u64, self.file_offset) + @as(u64, @sizeOf(LocalFileHeader)) + diff --git a/src/Compilation.zig b/src/Compilation.zig index 7fe217b385..3a6ae263a5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5303,7 +5303,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { defer tar_file.close(io); var buffer: [1024]u8 = undefined; - var tar_file_writer = tar_file.writer(&buffer); + var tar_file_writer = tar_file.writer(io, &buffer); var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty; defer seen_table.deinit(comp.gpa); @@ -6448,7 +6448,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_obj_path); - try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename); + try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename, io); break :blk digest; }; @@ -6696,7 +6696,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_res_path); - try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename); + try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename, io); break :blk digest; }; diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 860fb8974e..aaba0c28cf 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -567,7 +567,7 @@ fn runResource( .root_dir = cache_root, .sub_path = try std.fmt.allocPrint(arena, "p" ++ s ++ "{s}", .{computed_package_hash.toSlice()}), }; - renameTmpIntoCache(cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| { + renameTmpIntoCache(io, cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| { const src = try cache_root.join(arena, &.{tmp_dir_sub_path}); const dest = try cache_root.join(arena, &.{f.package_root.sub_path}); try eb.addRootErrorMessage(.{ .msg = try eb.printString( @@ -1319,7 +1319,7 @@ fn unzip( defer zip_file.close(io); var zip_file_buffer: [4096]u8 = undefined; var zip_file_reader = b: { - var zip_file_writer = zip_file.writer(&zip_file_buffer); + var zip_file_writer = zip_file.writer(io, &zip_file_buffer); _ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) { error.ReadFailed => return error.ReadFailed, @@ -1370,7 +1370,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U defer pack_file.close(io); var pack_file_buffer: [4096]u8 = undefined; var pack_file_reader = b: { - var pack_file_writer = pack_file.writer(&pack_file_buffer); + var pack_file_writer = pack_file.writer(io, &pack_file_buffer); const fetch_reader = &resource.fetch_stream.reader; _ = try fetch_reader.streamRemaining(&pack_file_writer.interface); try pack_file_writer.interface.flush(); @@ -1380,7 +1380,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; - var index_file_writer = index_file.writer(&index_file_buffer); + var index_file_writer = index_file.writer(io, &index_file_buffer); { const index_prog_node = f.prog_node.start("Index pack", 0); defer index_prog_node.end(); @@ -1454,11 +1454,11 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void } } -pub fn renameTmpIntoCache(cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void { +pub fn renameTmpIntoCache(io: Io, cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void { assert(dest_dir_sub_path[1] == fs.path.sep); var handled_missing_dir = false; while (true) { - cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) { + cache_dir.rename(tmp_dir_sub_path, cache_dir, dest_dir_sub_path, io) catch |err| switch (err) { error.FileNotFound => { if (handled_missing_dir) return err; cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) { diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index 3f0ffb04cd..4d5bcfc84b 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1594,7 +1594,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [2000]u8 = undefined; - var index_file_writer = index_file.writer(&index_file_buffer); + var index_file_writer = index_file.writer(io, &index_file_buffer); try indexPack(testing.allocator, format, &pack_file_reader, &index_file_writer); // Arbitrary size limit on files read while checking the repository contents @@ -1730,7 +1730,7 @@ pub fn main() !void { var index_file = try git_dir.createFile(io, "idx", .{ .read = true }); defer index_file.close(io); var index_file_buffer: [4096]u8 = undefined; - var index_file_writer = index_file.writer(&index_file_buffer); + var index_file_writer = index_file.writer(io, &index_file_buffer); try indexPack(allocator, format, &pack_file_reader, &index_file_writer); std.debug.print("Starting checkout...\n", .{}); diff --git a/src/Zcu.zig b/src/Zcu.zig index 1f2fe89236..2891da2407 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2986,7 +2986,13 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader return zir; } -pub fn saveZirCache(gpa: Allocator, cache_file: Io.File, stat: Io.File.Stat, zir: Zir) (Io.File.WriteError || Allocator.Error)!void { +pub fn saveZirCache( + gpa: Allocator, + io: Io, + cache_file: Io.File, + stat: Io.File.Stat, + zir: Zir, +) (Io.File.WriteError || Allocator.Error)!void { const safety_buffer = if (data_has_safety_tag) try gpa.alloc([8]u8, zir.instructions.len) else @@ -3020,13 +3026,13 @@ pub fn saveZirCache(gpa: Allocator, cache_file: Io.File, stat: Io.File.Stat, zir zir.string_bytes, @ptrCast(zir.extra), }; - var cache_fw = cache_file.writer(&.{}); + var cache_fw = cache_file.writer(io, &.{}); cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) { error.WriteFailed => return cache_fw.err.?, }; } -pub fn saveZoirCache(cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void { +pub fn saveZoirCache(io: Io, cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void { const header: Zoir.Header = .{ .nodes_len = @intCast(zoir.nodes.len), .extra_len = @intCast(zoir.extra.len), @@ -3050,7 +3056,7 @@ pub fn saveZoirCache(cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.Fil @ptrCast(zoir.compile_errors), @ptrCast(zoir.error_notes), }; - var cache_fw = cache_file.writer(&.{}); + var cache_fw = cache_file.writer(io, &.{}); cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) { error.WriteFailed => return cache_fw.err.?, }; diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 74196705c3..9d6a45ad26 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -278,18 +278,18 @@ pub fn updateFile( switch (file.getMode()) { .zig => { file.zir = try AstGen.generate(gpa, file.tree.?); - Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) { + Zcu.saveZirCache(gpa, io, cache_file, stat, file.zir.?) catch |err| switch (err) { error.OutOfMemory => |e| return e, - else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {s}", .{ - file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err), + else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {t}", .{ + file.path.fmt(comp), cache_directory, &hex_digest, err, }), }; }, .zon => { file.zoir = try ZonGen.generate(gpa, file.tree.?, .{}); - Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| { - log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {s}", .{ - file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err), + Zcu.saveZoirCache(io, cache_file, stat, file.zoir.?) catch |err| { + log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {t}", .{ + file.path.fmt(comp), cache_directory, &hex_digest, err, }); }; }, diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index 93f88b9689..9631ec22f9 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -380,7 +380,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true }); defer lib_final_file.close(io); var buffer: [1024]u8 = undefined; - var file_writer = lib_final_file.writer(&buffer); + var file_writer = lib_final_file.writer(io, &buffer); try implib.writeCoffArchive(gpa, &file_writer.interface, members); try file_writer.interface.flush(); } diff --git a/src/link.zig b/src/link.zig index 06d18ec2d5..5f3031ca0e 100644 --- a/src/link.zig +++ b/src/link.zig @@ -621,7 +621,7 @@ pub const File = struct { }); defer gpa.free(tmp_sub_path); try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, .{}); - try emit.root_dir.handle.rename(tmp_sub_path, emit.sub_path); + try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io); switch (builtin.os.tag) { .linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| { log.warn("ptrace failure: {s}", .{@errorName(err)}); diff --git a/src/link/Lld.zig b/src/link/Lld.zig index ca15a38bb0..6920c12762 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -1631,7 +1631,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi { defer rsp_file.close(io); var rsp_file_buffer: [1024]u8 = undefined; - var rsp_file_writer = rsp_file.writer(&rsp_file_buffer); + var rsp_file_writer = rsp_file.writer(io, &rsp_file_buffer); const rsp_writer = &rsp_file_writer.interface; for (argv[2..]) |arg| { try rsp_writer.writeByte('"'); diff --git a/src/main.zig b/src/main.zig index bef3a3efb5..88e236adef 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3412,7 +3412,7 @@ fn buildOutputType( const sub_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-stdin{s}", .{ &bin_digest, ext.canonicalName(target), }); - try dirs.local_cache.handle.rename(dump_path, sub_path); + try dirs.local_cache.handle.rename(dump_path, dirs.local_cache.handle, sub_path, io); // Convert `sub_path` to be relative to current working directory. src.src_path = try dirs.local_cache.join(arena, &.{sub_path}); @@ -7216,11 +7216,7 @@ fn createDependenciesModule( const hex_digest = hh.final(); const o_dir_sub_path = try arena.dupe(u8, "o" ++ fs.path.sep_str ++ hex_digest); - try Package.Fetch.renameTmpIntoCache( - dirs.local_cache.handle, - tmp_dir_sub_path, - o_dir_sub_path, - ); + try Package.Fetch.renameTmpIntoCache(io, dirs.local_cache.handle, tmp_dir_sub_path, o_dir_sub_path); const deps_mod = try Package.Module.create(arena, .{ .paths = .{ -- cgit v1.2.3 From 1dcfc8787e86ed94d216976e621a49fc488e8214 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 18:00:55 -0800 Subject: update all readFileAlloc() to accept Io instance --- lib/compiler/std-docs.zig | 6 ++++-- lib/std/Build/Cache.zig | 3 ++- lib/std/Build/Step/CheckFile.zig | 3 ++- lib/std/Build/Step/ConfigHeader.zig | 4 ++-- lib/std/Build/WebServer.zig | 5 +++-- lib/std/Io/Dir.zig | 6 +++--- lib/std/crypto/Certificate/Bundle/macos.zig | 3 +-- lib/std/fs/test.zig | 27 ++++++++++++++------------- lib/std/zig/LibCInstallation.zig | 8 ++------ lib/std/zig/WindowsSdk.zig | 2 +- src/Compilation.zig | 4 ++-- src/Package/Fetch/git.zig | 4 ++-- src/link.zig | 4 ++-- src/link/MachO.zig | 8 +++++--- src/link/MachO/CodeSignature.zig | 16 ++++++++-------- src/main.zig | 21 +++++++++------------ 16 files changed, 62 insertions(+), 62 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 8f02f05958..6452496305 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -179,10 +179,11 @@ fn serveDocsFile( content_type: []const u8, ) !void { const gpa = context.gpa; + const io = context.io; // The desired API is actually sendfile, which will require enhancing std.http.Server. // We load the file with every request so that the user can make changes to the file // and refresh the HTML page without restarting this server. - const file_contents = try context.lib_dir.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024)); + const file_contents = try context.lib_dir.readFileAlloc(io, name, gpa, .limited(10 * 1024 * 1024)); defer gpa.free(file_contents); try request.respond(file_contents, .{ .extra_headers = &.{ @@ -255,6 +256,7 @@ fn serveWasm( optimize_mode: std.builtin.OptimizeMode, ) !void { const gpa = context.gpa; + const io = context.io; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); @@ -273,7 +275,7 @@ fn serveWasm( }); // std.http.Server does not have a sendfile API yet. const bin_path = try wasm_base_path.join(arena, bin_name); - const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024)); + const file_contents = try bin_path.root_dir.handle.readFileAlloc(io, bin_path.sub_path, gpa, .limited(10 * 1024 * 1024)); defer gpa.free(file_contents); try request.respond(file_contents, .{ .extra_headers = &.{ diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 2d6dbc02fa..d2ba33c74d 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1075,7 +1075,8 @@ pub const Manifest = struct { fn addDepFileMaybePost(self: *Manifest, dir: Io.Dir, dep_file_sub_path: []const u8) !void { const gpa = self.cache.gpa; - const dep_file_contents = try dir.readFileAlloc(dep_file_sub_path, gpa, .limited(manifest_file_size_max)); + const io = self.cache.io; + const dep_file_contents = try dir.readFileAlloc(io, dep_file_sub_path, gpa, .limited(manifest_file_size_max)); defer gpa.free(dep_file_contents); var error_buf: std.ArrayList(u8) = .empty; diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index 560b6ad050..1c3813ca82 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -51,11 +51,12 @@ pub fn setName(check_file: *CheckFile, name: []const u8) void { fn make(step: *Step, options: Step.MakeOptions) !void { _ = options; const b = step.owner; + const io = b.graph.io; const check_file: *CheckFile = @fieldParentPtr("step", step); try step.singleUnchangingWatchInput(check_file.source); const src_path = check_file.source.getPath2(b, step); - const contents = Io.Dir.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(io, src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| { return step.fail("unable to read '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 589110d4c4..250bae5009 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -208,7 +208,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .autoconf_undef, .autoconf_at => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(io, src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read autoconf input file '{s}': {s}", .{ src_path, @errorName(err), }); @@ -222,7 +222,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .cmake => |file_source| { try bw.writeAll(c_generated_line); const src_path = file_source.getPath2(b, step); - const contents = Io.Dir.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| { + const contents = Io.Dir.cwd().readFileAlloc(io, src_path, arena, .limited(config_header.max_bytes)) catch |err| { return step.fail("unable to read cmake input file '{s}': {s}", .{ src_path, @errorName(err), }); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 38a7a73588..72306cbab9 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -469,11 +469,12 @@ pub fn serveFile( content_type: []const u8, ) !void { const gpa = ws.gpa; + const io = ws.graph.io; // The desired API is actually sendfile, which will require enhancing http.Server. // We load the file with every request so that the user can make changes to the file // and refresh the HTML page without restarting this server. - const file_contents = path.root_dir.handle.readFileAlloc(path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| { - log.err("failed to read '{f}': {s}", .{ path, @errorName(err) }); + const file_contents = path.root_dir.handle.readFileAlloc(io, path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| { + log.err("failed to read '{f}': {t}", .{ path, err }); return error.AlreadyReported; }; defer gpa.free(file_contents); diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 7d1f6212dc..cf5e2b4c72 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -1117,10 +1117,10 @@ pub fn readLink(dir: Dir, io: Io, sub_path: []const u8, buffer: []u8) ReadLinkEr /// On other platforms, `path` is an opaque sequence of bytes with no particular encoding. pub fn readLinkAbsolute(io: Io, absolute_path: []const u8, buffer: []u8) ReadLinkError!usize { assert(path.isAbsolute(absolute_path)); - return io.vtable.dirReadLink(io.userdata, .cwd(), path, buffer); + return io.vtable.dirReadLink(io.userdata, .cwd(), absolute_path, buffer); } -pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{ +pub const ReadFileAllocError = File.OpenError || File.Reader.Error || Allocator.Error || error{ /// File size reached or exceeded the provided limit. StreamTooLong, }; @@ -1603,7 +1603,7 @@ pub const CopyFileOptions = struct { pub const CopyFileError = File.OpenError || File.StatError || File.Atomic.InitError || File.Atomic.FinishError || - File.ReadError || File.WriteError || error{InvalidFileName}; + File.Reader.Error || File.WriteError || error{InvalidFileName}; /// Atomically creates a new file at `dest_path` within `dest_dir` with the /// same contents as `source_path` within `source_dir`, overwriting any already diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index 444d8da675..086c8feb3f 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -17,9 +17,8 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanM "/Library/Keychains/System.keychain", }; - _ = io; // TODO migrate file system to use std.Io for (keychain_paths) |keychain_path| { - const bytes = Io.Dir.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { + const bytes = Io.Dir.cwd().readFileAlloc(io, keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) { error.StreamTooLong => return error.FileTooBig, else => |e| return e, }; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 92992f91b4..bb3a9da0f5 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -767,7 +767,7 @@ test "readFileAlloc" { var file = try tmp_dir.dir.createFile(io, "test_file", .{ .read = true }); defer file.close(io); - const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024)); + const buf1 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(1024)); defer testing.allocator.free(buf1); try testing.expectEqualStrings("", buf1); @@ -776,7 +776,7 @@ test "readFileAlloc" { { // max_bytes > file_size - const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024)); + const buf2 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(1024)); defer testing.allocator.free(buf2); try testing.expectEqualStrings(write_buf, buf2); } @@ -785,13 +785,13 @@ test "readFileAlloc" { // max_bytes == file_size try testing.expectError( error.StreamTooLong, - tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len)), + tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len)), ); } { // max_bytes == file_size + 1 - const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len + 1)); + const buf2 = try tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len + 1)); defer testing.allocator.free(buf2); try testing.expectEqualStrings(write_buf, buf2); } @@ -799,7 +799,7 @@ test "readFileAlloc" { // max_bytes < file_size try testing.expectError( error.StreamTooLong, - tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len - 1)), + tmp_dir.dir.readFileAlloc(io, "test_file", testing.allocator, .limited(write_buf.len - 1)), ); } @@ -877,16 +877,16 @@ test "file operations on directories" { switch (native_os) { .dragonfly, .netbsd => { // no error when reading a directory. See https://github.com/ziglang/zig/issues/5732 - const buf = try ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited); + const buf = try ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited); testing.allocator.free(buf); }, .wasi => { // WASI return EBADF, which gets mapped to NotOpenForReading. // See https://github.com/bytecodealliance/wasmtime/issues/1935 - try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited)); + try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited)); }, else => { - try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited)); + try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(io, test_dir_name, testing.allocator, .unlimited)); }, } @@ -1679,14 +1679,14 @@ test "copyFile" { try ctx.dir.copyFile(src_file, ctx.dir, dest_file2, .{ .override_mode = File.default_mode }); defer ctx.dir.deleteFile(dest_file2) catch {}; - try expectFileContents(ctx.dir, dest_file, data); - try expectFileContents(ctx.dir, dest_file2, data); + try expectFileContents(io, ctx.dir, dest_file, data); + try expectFileContents(io, ctx.dir, dest_file2, data); } }.impl); } -fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void { - const contents = try dir.readFileAlloc(file_path, testing.allocator, .limited(1000)); +fn expectFileContents(io: Io, dir: Dir, file_path: []const u8, data: []const u8) !void { + const contents = try dir.readFileAlloc(io, file_path, testing.allocator, .limited(1000)); defer testing.allocator.free(contents); try testing.expectEqualSlices(u8, data, contents); @@ -1695,6 +1695,7 @@ fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void { test "AtomicFile" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { + const io = ctx.io; const allocator = ctx.arena.allocator(); const test_out_file = try ctx.transformPath("tmp_atomic_file_test_dest.txt"); const test_content = @@ -1709,7 +1710,7 @@ test "AtomicFile" { try af.file_writer.interface.writeAll(test_content); try af.finish(); } - const content = try ctx.dir.readFileAlloc(test_out_file, allocator, .limited(9999)); + const content = try ctx.dir.readFileAlloc(io, test_out_file, allocator, .limited(9999)); try testing.expectEqualStrings(test_content, content); try ctx.dir.deleteFile(test_out_file); diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig index f3dc73838f..1463ff4a40 100644 --- a/lib/std/zig/LibCInstallation.zig +++ b/lib/std/zig/LibCInstallation.zig @@ -37,11 +37,7 @@ pub const FindError = error{ ZigIsTheCCompiler, }; -pub fn parse( - allocator: Allocator, - libc_file: []const u8, - target: *const std.Target, -) !LibCInstallation { +pub fn parse(allocator: Allocator, io: Io, libc_file: []const u8, target: *const std.Target) !LibCInstallation { var self: LibCInstallation = .{}; const fields = std.meta.fields(LibCInstallation); @@ -57,7 +53,7 @@ pub fn parse( } } - const contents = try Io.Dir.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize))); + const contents = try Io.Dir.cwd().readFileAlloc(io, libc_file, allocator, .limited(std.math.maxInt(usize))); defer allocator.free(contents); var it = std.mem.tokenizeScalar(u8, contents, '\n'); diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index dca474020a..3f6d58b6ba 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -775,7 +775,7 @@ const MsvcLibDir = struct { writer.writeByte(std.fs.path.sep) catch unreachable; writer.writeAll("state.json") catch unreachable; - const json_contents = instances_dir.readFileAlloc(writer.buffered(), allocator, .limited(std.math.maxInt(usize))) catch continue; + const json_contents = instances_dir.readFileAlloc(io, writer.buffered(), allocator, .limited(std.math.maxInt(usize))) catch continue; defer allocator.free(json_contents); var parsed = std.json.parseFromSlice(std.json.Value, allocator, json_contents, .{}) catch continue; diff --git a/src/Compilation.zig b/src/Compilation.zig index 3a6ae263a5..79b2d51635 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -6400,7 +6400,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr if (comp.file_system_inputs != null) { // Use the same file size limit as the cache code does for dependency files. - const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, gpa, .limited(Cache.manifest_file_size_max)); + const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, gpa, .limited(Cache.manifest_file_size_max)); defer gpa.free(dep_file_contents); var str_buf: std.ArrayList(u8) = .empty; @@ -6665,7 +6665,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 // Read depfile and update cache manifest { const dep_basename = fs.path.basename(out_dep_path); - const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, arena, .limited(50 * 1024 * 1024)); + const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, arena, .limited(50 * 1024 * 1024)); defer arena.free(dep_file_contents); const value = try std.json.parseFromSliceLeaky(std.json.Value, arena, dep_file_contents, .{}); diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index 4d5bcfc84b..1d6aa1a849 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1602,7 +1602,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u const max_file_size = 8192; if (!skip_checksums) { - const index_file_data = try git_dir.dir.readFileAlloc("testrepo.idx", testing.allocator, .limited(max_file_size)); + const index_file_data = try git_dir.dir.readFileAlloc(io, "testrepo.idx", testing.allocator, .limited(max_file_size)); defer testing.allocator.free(index_file_data); // testrepo.idx is generated by Git. The index created by this file should // match it exactly. Running `git verify-pack -v testrepo.pack` can verify @@ -1678,7 +1678,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u \\revision 19 \\ ; - const actual_file_contents = try worktree.dir.readFileAlloc("file", testing.allocator, .limited(max_file_size)); + const actual_file_contents = try worktree.dir.readFileAlloc(io, "file", testing.allocator, .limited(max_file_size)); defer testing.allocator.free(actual_file_contents); try testing.expectEqualStrings(expected_file_contents, actual_file_contents); } diff --git a/src/link.zig b/src/link.zig index 5f3031ca0e..ff6d938e6f 100644 --- a/src/link.zig +++ b/src/link.zig @@ -624,12 +624,12 @@ pub const File = struct { try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io); switch (builtin.os.tag) { .linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| { - log.warn("ptrace failure: {s}", .{@errorName(err)}); + log.warn("ptrace failure: {t}", .{err}); }, .maccatalyst, .macos => { const macho_file = base.cast(.macho).?; macho_file.ptraceAttach(pid) catch |err| { - log.warn("attaching failed with error: {s}", .{@errorName(err)}); + log.warn("attaching failed with error: {t}", .{err}); }; }, .windows => unreachable, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 6cd906340a..f996442f24 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -4347,11 +4347,13 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); + const io = comp.io; + const sdk_dir = switch (sdk_layout) { .sdk => comp.sysroot.?, .vendored => fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "libc", "darwin" }) catch return null, }; - if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| { + if (readSdkVersionFromSettings(arena, io, sdk_dir)) |ver| { return parseSdkVersion(ver); } else |_| { // Read from settings should always succeed when vendored. @@ -4374,9 +4376,9 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi // Official Apple SDKs ship with a `SDKSettings.json` located at the top of SDK fs layout. // Use property `MinimalDisplayName` to determine version. // The file/property is also available with vendored libc. -fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 { +fn readSdkVersionFromSettings(arena: Allocator, io: Io, dir: []const u8) ![]const u8 { const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" }); - const contents = try Io.Dir.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16))); + const contents = try Io.Dir.cwd().readFileAlloc(io, sdk_path, arena, .limited(std.math.maxInt(u16))); const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{}); if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string; return error.SdkVersionFailure; diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index ec516d4af0..0955c823b8 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -17,6 +17,12 @@ const MachO = @import("../MachO.zig"); const hash_size = Sha256.digest_length; +page_size: u16, +code_directory: CodeDirectory, +requirements: ?Requirements = null, +entitlements: ?Entitlements = null, +signature: ?Signature = null, + const Blob = union(enum) { code_directory: *CodeDirectory, requirements: *Requirements, @@ -220,12 +226,6 @@ const Signature = struct { } }; -page_size: u16, -code_directory: CodeDirectory, -requirements: ?Requirements = null, -entitlements: ?Entitlements = null, -signature: ?Signature = null, - pub fn init(page_size: u16) CodeSignature { return .{ .page_size = page_size, @@ -246,8 +246,8 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void { } } -pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void { - const inner = try Io.Dir.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32))); +pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, io: Io, path: []const u8) !void { + const inner = try Io.Dir.cwd().readFileAlloc(io, path, allocator, .limited(std.math.maxInt(u32))); self.entitlements = .{ .inner = inner }; } diff --git a/src/main.zig b/src/main.zig index 88e236adef..9bb88d373b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1029,9 +1029,8 @@ fn buildOutputType( if (mem.cutPrefix(u8, arg, "@")) |resp_file_path| { // This is a "compiler response file". We must parse the file and treat its // contents as command line parameters. - args_iter.resp_file = initArgIteratorResponseFile(arena, resp_file_path) catch |err| { - fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) }); - }; + args_iter.resp_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err| + fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err }); } else if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { try Io.File.stdout().writeAll(usage_build_generic); @@ -5441,7 +5440,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) // that are missing. const s = fs.path.sep_str; const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce; - const stdout = dirs.local_cache.handle.readFileAlloc(tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| { + const stdout = dirs.local_cache.handle.readFileAlloc(io, tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| { fatal("unable to read results of configure phase from '{f}{s}': {s}", .{ dirs.local_cache, tmp_sub_path, @errorName(err), }); @@ -5822,9 +5821,9 @@ pub fn lldMain( const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, .single_quotes = true }); /// Initialize the arguments from a Response File. "*.rsp" -fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile { +fn initArgIteratorResponseFile(allocator: Allocator, io: Io, resp_file_path: []const u8) !ArgIteratorResponseFile { const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit - const cmd_line = try Io.Dir.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes)); + const cmd_line = try Io.Dir.cwd().readFileAlloc(io, resp_file_path, allocator, .limited(max_bytes)); errdefer allocator.free(cmd_line); return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line); @@ -5952,7 +5951,7 @@ pub const ClangArgIterator = struct { }; } - fn next(self: *ClangArgIterator) !void { + fn next(self: *ClangArgIterator, io: Io) !void { assert(self.has_next); assert(self.next_index < self.argv.len); // In this state we know that the parameter we are looking at is a root parameter @@ -5970,10 +5969,8 @@ pub const ClangArgIterator = struct { const arena = self.arena; const resp_file_path = arg[1..]; - self.arg_iterator_response_file = - initArgIteratorResponseFile(arena, resp_file_path) catch |err| { - fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) }); - }; + self.arg_iterator_response_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err| + fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err }); // NOTE: The ArgIteratorResponseFile returns tokens from next() that are slices of an // internal buffer. This internal buffer is arena allocated, so it is not cleaned up here. @@ -7405,7 +7402,7 @@ const Templates = struct { } const max_bytes = 10 * 1024 * 1024; - const contents = templates.dir.readFileAlloc(template_path, arena, .limited(max_bytes)) catch |err| { + const contents = templates.dir.readFileAlloc(io, template_path, arena, .limited(max_bytes)) catch |err| { fatal("unable to read template file '{s}': {t}", .{ template_path, err }); }; templates.buffer.clearRetainingCapacity(); -- cgit v1.2.3 From 4a53e5b0b4131c6b8e18bb551e8215e425f8ac71 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 Dec 2025 20:03:50 -0800 Subject: fix a handful of compilation errors related to std.fs migration --- lib/compiler/resinator/cli.zig | 2 +- lib/compiler/resinator/main.zig | 2 +- lib/compiler/std-docs.zig | 18 +++---- lib/std/Build.zig | 2 +- lib/std/Build/Cache.zig | 2 +- lib/std/Build/Step.zig | 2 +- lib/std/Build/Step/Run.zig | 4 +- lib/std/Build/WebServer.zig | 2 +- lib/std/Io/Dir.zig | 7 +-- lib/std/Io/File.zig | 4 ++ lib/std/Io/File/Atomic.zig | 7 ++- lib/std/Io/net/test.zig | 2 +- lib/std/Io/test.zig | 2 +- lib/std/fs/test.zig | 26 +++++----- lib/std/posix/test.zig | 2 +- lib/std/process/Child.zig | 94 +++++++++++++++++----------------- lib/std/tar.zig | 36 ++++++------- src/Compilation.zig | 15 +++--- src/Package/Fetch.zig | 12 ++--- src/link.zig | 14 +++-- src/link/Lld.zig | 8 +-- src/main.zig | 13 ++--- test/src/Cases.zig | 3 +- test/standalone/child_process/main.zig | 2 +- 24 files changed, 148 insertions(+), 133 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index 5588390197..d2dd71b1f6 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -2010,7 +2010,7 @@ test "maybeAppendRC" { // Now delete the file and try again. But this time change the input format // to non-rc. - try tmp.dir.deleteFile("foo"); + try tmp.dir.deleteFile(io, "foo"); options.input_format = .res; try options.maybeAppendRC(io, tmp.dir); try std.testing.expectEqualStrings("foo", options.input_source.filename); diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index afe1dcbe91..6c12903f06 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -440,7 +440,7 @@ const IoStream = struct { // Delete the output file on error file.close(io); // Failing to delete is not really a big deal, so swallow any errors - Io.Dir.cwd().deleteFile(self.name) catch {}; + Io.Dir.cwd().deleteFile(io, self.name) catch {}; }, .stdio, .memory, .closed => return, } diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 6452496305..538558fe48 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -72,8 +72,8 @@ pub fn main() !void { const url_with_newline = try std.fmt.allocPrint(arena, "http://127.0.0.1:{d}/\n", .{port}); std.Io.File.stdout().writeAll(url_with_newline) catch {}; if (should_open_browser) { - openBrowserTab(gpa, url_with_newline[0 .. url_with_newline.len - 1 :'\n']) catch |err| { - std.log.err("unable to open browser: {s}", .{@errorName(err)}); + openBrowserTab(gpa, io, url_with_newline[0 .. url_with_newline.len - 1 :'\n']) catch |err| { + std.log.err("unable to open browser: {t}", .{err}); }; } @@ -89,7 +89,7 @@ pub fn main() !void { while (true) { const connection = try http_server.accept(); _ = std.Thread.spawn(.{}, accept, .{ &context, connection }) catch |err| { - std.log.err("unable to accept connection: {s}", .{@errorName(err)}); + std.log.err("unable to accept connection: {t}", .{err}); connection.stream.close(io); continue; }; @@ -328,7 +328,7 @@ fn buildWasmBinary( child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; - try child.spawn(); + try child.spawn(io); var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{ .stdout = child.stdout.?, @@ -434,13 +434,13 @@ fn sendMessage(io: Io, file: std.Io.File, tag: std.zig.Client.Message.Tag) !void }; } -fn openBrowserTab(gpa: Allocator, url: []const u8) !void { +fn openBrowserTab(gpa: Allocator, io: Io, url: []const u8) !void { // Until https://github.com/ziglang/zig/issues/19205 is implemented, we // spawn a thread for this child process. - _ = try std.Thread.spawn(.{}, openBrowserTabThread, .{ gpa, url }); + _ = try std.Thread.spawn(.{}, openBrowserTabThread, .{ gpa, io, url }); } -fn openBrowserTabThread(gpa: Allocator, url: []const u8) !void { +fn openBrowserTabThread(gpa: Allocator, io: Io, url: []const u8) !void { const main_exe = switch (builtin.os.tag) { .windows => "explorer", .macos => "open", @@ -450,6 +450,6 @@ fn openBrowserTabThread(gpa: Allocator, url: []const u8) !void { child.stdin_behavior = .Ignore; child.stdout_behavior = .Ignore; child.stderr_behavior = .Ignore; - try child.spawn(); - _ = try child.wait(); + try child.spawn(io); + _ = try child.wait(io); } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 746b41860b..ae2ab1c4d0 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1838,7 +1838,7 @@ pub fn runAllowFail( child.env_map = &b.graph.env_map; try Step.handleVerbose2(b, null, child.env_map, argv); - try child.spawn(); + try child.spawn(io); var stdout_reader = child.stdout.?.readerStreaming(io, &.{}); const stdout = stdout_reader.interface.allocRemaining(b.allocator, .limited(max_output_size)) catch { diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index d2ba33c74d..b97efc7677 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1300,7 +1300,7 @@ fn testGetCurrentFileTimestamp(io: Io, dir: Io.Dir) !Io.Timestamp { }); defer { file.close(io); - dir.deleteFile(test_out_file) catch {}; + dir.deleteFile(io, test_out_file) catch {}; } return (try file.stat(io)).mtime; diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 8115aaa1a1..9c7fcc757f 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -455,7 +455,7 @@ pub fn evalZigProcess( child.request_resource_usage_statistics = true; child.progress_node = prog_node; - child.spawn() catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err }); + child.spawn(io) catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err }); const zp = try gpa.create(ZigProcess); zp.* = .{ diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 19d2e7e61c..95024061d8 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1689,7 +1689,7 @@ fn evalZigTest( }; while (true) { - try child.spawn(); + try child.spawn(io); var poller = std.Io.poll(gpa, StdioPollEnum, .{ .stdout = child.stdout.?, .stderr = child.stderr.?, @@ -2168,7 +2168,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult { const io = b.graph.io; const arena = b.allocator; - try child.spawn(); + try child.spawn(io); errdefer _ = child.kill(io) catch {}; try child.waitForSpawn(); diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index 72306cbab9..5f633c5948 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -580,7 +580,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; - try child.spawn(); + try child.spawn(io); var poller = Io.poll(gpa, enum { stdout, stderr }, .{ .stdout = child.stdout.?, diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index cf5e2b4c72..d6d6aa1be2 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -1366,7 +1366,7 @@ pub fn deleteTree(dir: Dir, io: Io, sub_path: []const u8) DeleteTreeError!void { => |e| return e, }; } else { - if (parent_dir.deleteFile(name)) { + if (parent_dir.deleteFile(io, name)) { continue :process_stack; } else |err| switch (err) { error.FileNotFound => continue :process_stack, @@ -1477,7 +1477,7 @@ fn deleteTreeMinStackSizeWithKindHint(parent: Dir, io: Io, sub_path: []const u8, dir_name = result; continue :scan_dir; } else { - if (dir.deleteFile(entry.name)) { + if (dir.deleteFile(io, entry.name)) { continue :dir_it; } else |err| switch (err) { error.FileNotFound => continue :dir_it, @@ -1567,7 +1567,7 @@ fn deleteTreeOpenInitialSubpath(dir: Dir, io: Io, sub_path: []const u8, kind_hin => |e| return e, }; } else { - if (dir.deleteFile(sub_path)) { + if (dir.deleteFile(io, sub_path)) { return null; } else |err| switch (err) { error.FileNotFound => return null, @@ -1588,6 +1588,7 @@ fn deleteTreeOpenInitialSubpath(dir: Dir, io: Io, sub_path: []const u8, kind_hin error.FileBusy, error.BadPathName, error.NetworkNotFound, + error.Canceled, error.Unexpected, => |e| return e, } diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 302d2a8ca5..6d4dd4f323 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -407,6 +407,10 @@ pub const Permissions = std.options.FilePermissions orelse if (is_windows) enum( return @intFromEnum(self); } + pub fn fromMode(mode: std.posix.mode_t) @This() { + return @enumFromInt(mode); + } + /// Returns `true` if and only if no class has write permissions. pub fn readOnly(self: @This()) bool { const mode = toMode(self); diff --git a/lib/std/Io/File/Atomic.zig b/lib/std/Io/File/Atomic.zig index ccb81815ed..7d412703ed 100644 --- a/lib/std/Io/File/Atomic.zig +++ b/lib/std/Io/File/Atomic.zig @@ -20,7 +20,7 @@ pub const InitError = File.OpenError; pub fn init( io: Io, dest_basename: []const u8, - mode: File.Mode, + permissions: File.Permissions, dir: Dir, close_dir_on_deinit: bool, write_buffer: []u8, @@ -28,7 +28,10 @@ pub fn init( while (true) { const random_integer = std.crypto.random.int(u64); const tmp_sub_path = std.fmt.hex(random_integer); - const file = dir.createFile(io, &tmp_sub_path, .{ .mode = mode, .exclusive = true }) catch |err| switch (err) { + const file = dir.createFile(io, &tmp_sub_path, .{ + .permissions = permissions, + .exclusive = true, + }) catch |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, }; diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig index c9ed0d3284..23edab5a5d 100644 --- a/lib/std/Io/net/test.zig +++ b/lib/std/Io/net/test.zig @@ -278,7 +278,7 @@ test "listen on a unix socket, send bytes, receive bytes" { defer testing.allocator.free(socket_path); const socket_addr = try net.UnixAddress.init(socket_path); - defer Io.Dir.cwd().deleteFile(socket_path) catch {}; + defer Io.Dir.cwd().deleteFile(io, socket_path) catch {}; var server = try socket_addr.listen(io, .{}); defer server.socket.close(io); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 9763fb2397..02bdb591d0 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -60,7 +60,7 @@ test "write a file, read it, then delete it" { try expect(mem.eql(u8, contents["begin".len .. contents.len - "end".len], &data)); try expect(mem.eql(u8, contents[contents.len - "end".len ..], "end")); } - try tmp.dir.deleteFile(tmp_file_name); + try tmp.dir.deleteFile(io, tmp_file_name); } test "File seek ops" { diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index edc15526b2..da0a0cff79 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -873,7 +873,7 @@ test "file operations on directories" { try ctx.dir.makeDir(io, test_dir_name, .default_dir); try testing.expectError(error.IsDir, ctx.dir.createFile(io, test_dir_name, .{})); - try testing.expectError(error.IsDir, ctx.dir.deleteFile(test_dir_name)); + try testing.expectError(error.IsDir, ctx.dir.deleteFile(io, test_dir_name)); switch (native_os) { .dragonfly, .netbsd => { // no error when reading a directory. See https://github.com/ziglang/zig/issues/5732 @@ -942,7 +942,7 @@ test "deleteDir" { try testing.expectError(error.DirNotEmpty, ctx.dir.deleteDir(test_dir_path)); // deleting an empty directory - try ctx.dir.deleteFile(test_file_path); + try ctx.dir.deleteFile(io, test_file_path); try ctx.dir.deleteDir(test_dir_path); } }.impl); @@ -1671,13 +1671,13 @@ test "copyFile" { const dest_file2 = try ctx.transformPath("tmp_test_copy_file3.txt"); try ctx.dir.writeFile(io, .{ .sub_path = src_file, .data = data }); - defer ctx.dir.deleteFile(src_file) catch {}; + defer ctx.dir.deleteFile(io, src_file) catch {}; try ctx.dir.copyFile(src_file, ctx.dir, dest_file, .{}); - defer ctx.dir.deleteFile(dest_file) catch {}; + defer ctx.dir.deleteFile(io, dest_file) catch {}; try ctx.dir.copyFile(src_file, ctx.dir, dest_file2, .{ .override_mode = File.default_mode }); - defer ctx.dir.deleteFile(dest_file2) catch {}; + defer ctx.dir.deleteFile(io, dest_file2) catch {}; try expectFileContents(io, ctx.dir, dest_file, data); try expectFileContents(io, ctx.dir, dest_file2, data); @@ -1713,7 +1713,7 @@ test "AtomicFile" { const content = try ctx.dir.readFileAlloc(io, test_out_file, allocator, .limited(9999)); try testing.expectEqualStrings(test_content, content); - try ctx.dir.deleteFile(test_out_file); + try ctx.dir.deleteFile(io, test_out_file); } }.impl); } @@ -2055,7 +2055,7 @@ test "'.' and '..' in Io.Dir functions" { try ctx.dir.rename(copy_path, ctx.dir, rename_path, io); const renamed_file = try ctx.dir.openFile(io, rename_path, .{}); renamed_file.close(io); - try ctx.dir.deleteFile(rename_path); + try ctx.dir.deleteFile(io, rename_path); try ctx.dir.writeFile(io, .{ .sub_path = update_path, .data = "something" }); var dir = ctx.dir; @@ -2113,19 +2113,19 @@ test "chmod" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - const file = try tmp.dir.createFile(io, "test_file", .{ .mode = 0o600 }); + const file = try tmp.dir.createFile(io, "test_file", .{ .permissions = .fromMode(0o600) }); defer file.close(io); - try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat(io)).mode & 0o7777); + try testing.expectEqual(@as(posix.mode_t, 0o600), (try file.stat(io)).permissions.toMode() & 0o7777); - try file.chmod(0o644); - try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat(io)).mode & 0o7777); + try file.setPermissions(io, .fromMode(0o644)); + try testing.expectEqual(@as(posix.mode_t, 0o644), (try file.stat(io)).permissions.toMode() & 0o7777); try tmp.dir.makeDir(io, "test_dir", .default_dir); var dir = try tmp.dir.openDir(io, "test_dir", .{ .iterate = true }); defer dir.close(io); - try dir.chmod(0o700); - try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat(io)).mode & 0o7777); + try dir.setPermissions(io, .fromMode(0o700)); + try testing.expectEqual(@as(posix.mode_t, 0o700), (try dir.stat(io)).permissions.toMode() & 0o7777); } test "chown" { diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 756ecfd63f..b5255ad2a1 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -144,7 +144,7 @@ test "linkat with different directories" { const subdir = try tmp.dir.makeOpenPath("subdir", .{}); - defer tmp.dir.deleteFile(target_name) catch {}; + defer tmp.dir.deleteFile(io, target_name) catch {}; try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" }); // Test 1: link from file in subdir back up to target in parent directory diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index dbbacc496c..17139e66b8 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -1,4 +1,4 @@ -const ChildProcess = @This(); +const Child = @This(); const builtin = @import("builtin"); const native_os = builtin.os.tag; @@ -31,7 +31,7 @@ pub const Id = switch (native_os) { id: Id, thread_handle: if (native_os == .windows) windows.HANDLE else void, -allocator: mem.Allocator, +allocator: Allocator, /// The writing end of the child process's standard input pipe. /// Usage requires `stdin_behavior == StdIo.Pipe`. @@ -229,7 +229,7 @@ pub const StdIo = enum { }; /// First argument in argv is the executable. -pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess { +pub fn init(argv: []const []const u8, allocator: Allocator) Child { return .{ .allocator = allocator, .argv = argv, @@ -252,7 +252,7 @@ pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess { }; } -pub fn setUserName(self: *ChildProcess, name: []const u8) !void { +pub fn setUserName(self: *Child, name: []const u8) !void { const user_info = try process.getUserInfo(name); self.uid = user_info.uid; self.gid = user_info.gid; @@ -260,7 +260,7 @@ pub fn setUserName(self: *ChildProcess, name: []const u8) !void { /// On success must call `kill` or `wait`. /// After spawning the `id` is available. -pub fn spawn(self: *ChildProcess) SpawnError!void { +pub fn spawn(self: *Child, io: Io) SpawnError!void { if (!process.can_spawn) { @compileError("the target operating system cannot spawn processes"); } @@ -268,17 +268,17 @@ pub fn spawn(self: *ChildProcess) SpawnError!void { if (native_os == .windows) { return self.spawnWindows(); } else { - return self.spawnPosix(); + return self.spawnPosix(io); } } -pub fn spawnAndWait(self: *ChildProcess) SpawnError!Term { - try self.spawn(); - return self.wait(); +pub fn spawnAndWait(child: *Child, io: Io) SpawnError!Term { + try child.spawn(io); + return child.wait(io); } /// Forcibly terminates child process and then cleans up all resources. -pub fn kill(self: *ChildProcess, io: Io) !Term { +pub fn kill(self: *Child, io: Io) !Term { if (native_os == .windows) { return self.killWindows(io, 1); } else { @@ -286,7 +286,7 @@ pub fn kill(self: *ChildProcess, io: Io) !Term { } } -pub fn killWindows(self: *ChildProcess, io: Io, exit_code: windows.UINT) !Term { +pub fn killWindows(self: *Child, io: Io, exit_code: windows.UINT) !Term { if (self.term) |term| { self.cleanupStreams(io); return term; @@ -308,7 +308,7 @@ pub fn killWindows(self: *ChildProcess, io: Io, exit_code: windows.UINT) !Term { return self.term.?; } -pub fn killPosix(self: *ChildProcess, io: Io) !Term { +pub fn killPosix(self: *Child, io: Io) !Term { if (self.term) |term| { self.cleanupStreams(io); return term; @@ -325,7 +325,7 @@ pub const WaitError = SpawnError || std.os.windows.GetProcessMemoryInfoError; /// On some targets, `spawn` may not report all spawn errors, such as `error.InvalidExe`. /// This function will block until any spawn errors can be reported, and return them. -pub fn waitForSpawn(self: *ChildProcess) SpawnError!void { +pub fn waitForSpawn(self: *Child) SpawnError!void { if (native_os == .windows) return; // `spawn` reports everything if (self.term) |term| { _ = term catch |spawn_err| return spawn_err; @@ -355,7 +355,7 @@ pub fn waitForSpawn(self: *ChildProcess) SpawnError!void { } /// Blocks until child process terminates and then cleans up all resources. -pub fn wait(self: *ChildProcess, io: Io) WaitError!Term { +pub fn wait(self: *Child, io: Io) WaitError!Term { try self.waitForSpawn(); // report spawn errors if (self.term) |term| { self.cleanupStreams(io); @@ -381,7 +381,7 @@ pub const RunResult = struct { /// /// The process must be started with stdout_behavior and stderr_behavior == .Pipe pub fn collectOutput( - child: ChildProcess, + child: Child, /// Used for `stdout` and `stderr`. allocator: Allocator, stdout: *ArrayList(u8), @@ -446,7 +446,7 @@ pub fn run(allocator: Allocator, io: Io, args: struct { expand_arg0: Arg0Expand = .no_expand, progress_node: std.Progress.Node = std.Progress.Node.none, }) RunError!RunResult { - var child = ChildProcess.init(args.argv, allocator); + var child = Child.init(args.argv, allocator); child.stdin_behavior = .Ignore; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; @@ -461,7 +461,7 @@ pub fn run(allocator: Allocator, io: Io, args: struct { var stderr: ArrayList(u8) = .empty; defer stderr.deinit(allocator); - try child.spawn(); + try child.spawn(io); errdefer { _ = child.kill(io) catch {}; } @@ -474,7 +474,7 @@ pub fn run(allocator: Allocator, io: Io, args: struct { }; } -fn waitUnwrappedWindows(self: *ChildProcess, io: Io) WaitError!void { +fn waitUnwrappedWindows(self: *Child, io: Io) WaitError!void { const result = windows.WaitForSingleObjectEx(self.id, windows.INFINITE, false); self.term = @as(SpawnError!Term, x: { @@ -496,7 +496,7 @@ fn waitUnwrappedWindows(self: *ChildProcess, io: Io) WaitError!void { return result; } -fn waitUnwrappedPosix(self: *ChildProcess, io: Io) void { +fn waitUnwrappedPosix(self: *Child, io: Io) void { const res: posix.WaitPidResult = res: { if (self.request_resource_usage_statistics) { switch (native_os) { @@ -531,11 +531,11 @@ fn waitUnwrappedPosix(self: *ChildProcess, io: Io) void { self.handleWaitResult(status); } -fn handleWaitResult(self: *ChildProcess, status: u32) void { +fn handleWaitResult(self: *Child, status: u32) void { self.term = statusToTerm(status); } -fn cleanupStreams(self: *ChildProcess, io: Io) void { +fn cleanupStreams(self: *Child, io: Io) void { if (self.stdin) |*stdin| { stdin.close(io); self.stdin = null; @@ -561,7 +561,7 @@ fn statusToTerm(status: u32) Term { Term{ .Unknown = status }; } -fn spawnPosix(self: *ChildProcess) SpawnError!void { +fn spawnPosix(self: *Child, io: Io) SpawnError!void { // The child process does need to access (one end of) these pipes. However, // we must initially set CLOEXEC to avoid a race condition. If another thread // is racing to spawn a different child process, we don't want it to inherit @@ -659,7 +659,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { })).ptr; } else { // TODO come up with a solution for this. - @panic("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); + @panic("missing std lib enhancement: std.process.Child implementation has no way to collect the environment variables to forward to the child process"); } }; @@ -671,41 +671,41 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { const pid_result = try posix.fork(); if (pid_result == 0) { // we are the child - setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); - setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); - setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); + setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err); + setUpChildIo(self.stdout_behavior, stdout_pipe[1], posix.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err); + setUpChildIo(self.stderr_behavior, stderr_pipe[1], posix.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(io, err_pipe[1], err); if (self.cwd_dir) |cwd| { - posix.fchdir(cwd.handle) catch |err| forkChildErrReport(err_pipe[1], err); + posix.fchdir(cwd.handle) catch |err| forkChildErrReport(io, err_pipe[1], err); } else if (self.cwd) |cwd| { - posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); + posix.chdir(cwd) catch |err| forkChildErrReport(io, err_pipe[1], err); } // Must happen after fchdir above, the cwd file descriptor might be // equal to prog_fileno and be clobbered by this dup2 call. - if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err); + if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(io, err_pipe[1], err); if (self.gid) |gid| { - posix.setregid(gid, gid) catch |err| forkChildErrReport(err_pipe[1], err); + posix.setregid(gid, gid) catch |err| forkChildErrReport(io, err_pipe[1], err); } if (self.uid) |uid| { - posix.setreuid(uid, uid) catch |err| forkChildErrReport(err_pipe[1], err); + posix.setreuid(uid, uid) catch |err| forkChildErrReport(io, err_pipe[1], err); } if (self.pgid) |pid| { - posix.setpgid(0, pid) catch |err| forkChildErrReport(err_pipe[1], err); + posix.setpgid(0, pid) catch |err| forkChildErrReport(io, err_pipe[1], err); } if (self.start_suspended) { - posix.kill(posix.getpid(), .STOP) catch |err| forkChildErrReport(err_pipe[1], err); + posix.kill(posix.getpid(), .STOP) catch |err| forkChildErrReport(io, err_pipe[1], err); } const err = switch (self.expand_arg0) { .expand => posix.execvpeZ_expandArg0(.expand, argv_buf.ptr[0].?, argv_buf.ptr, envp), .no_expand => posix.execvpeZ_expandArg0(.no_expand, argv_buf.ptr[0].?, argv_buf.ptr, envp), }; - forkChildErrReport(err_pipe[1], err); + forkChildErrReport(io, err_pipe[1], err); } // we are the parent @@ -750,7 +750,7 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { self.progress_node.setIpcFd(prog_pipe[0]); } -fn spawnWindows(self: *ChildProcess) SpawnError!void { +fn spawnWindows(self: *Child) SpawnError!void { var saAttr = windows.SECURITY_ATTRIBUTES{ .nLength = @sizeOf(windows.SECURITY_ATTRIBUTES), .bInheritHandle = windows.TRUE, @@ -880,7 +880,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void { const app_name_wtf8 = self.argv[0]; const app_name_is_absolute = fs.path.isAbsolute(app_name_wtf8); - // the cwd set in ChildProcess is in effect when choosing the executable path + // the cwd set in Child is in effect when choosing the executable path // to match posix semantics var cwd_path_w_needs_free = false; const cwd_path_w = x: { @@ -965,7 +965,7 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void { // If the app name had path separators, that disallows PATH searching, // and there's no need to search the PATH if the app name is absolute. // We still search the path if the cwd is absolute because of the - // "cwd set in ChildProcess is in effect when choosing the executable path + // "cwd set in Child is in effect when choosing the executable path // to match posix semantics" behavior--we don't want to skip searching // the PATH just because we were trying to set the cwd of the child process. if (app_dirname_w != null or app_name_is_absolute) { @@ -1039,8 +1039,8 @@ fn destroyPipe(pipe: [2]posix.fd_t) void { // Child of fork calls this to report an error to the fork parent. // Then the child exits. -fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { - writeIntFd(fd, @as(ErrInt, @intFromError(err))) catch {}; +fn forkChildErrReport(io: Io, fd: i32, err: Child.SpawnError) noreturn { + writeIntFd(io, fd, @as(ErrInt, @intFromError(err))) catch {}; // If we're linking libc, some naughty applications may have registered atexit handlers // which we really do not want to run in the fork child. I caught LLVM doing this and // it caused a deadlock instead of doing an exit syscall. In the words of Avril Lavigne, @@ -1052,9 +1052,9 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { posix.system.exit(1); } -fn writeIntFd(fd: i32, value: ErrInt) !void { +fn writeIntFd(io: Io, fd: i32, value: ErrInt) !void { var buffer: [8]u8 = undefined; - var fw: File.Writer = .initStreaming(.{ .handle = fd }, &buffer); + var fw: File.Writer = .initStreaming(.{ .handle = fd }, io, &buffer); fw.interface.writeInt(u64, value, .little) catch unreachable; fw.interface.flush() catch return error.SystemResources; } @@ -1078,7 +1078,7 @@ const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8); /// Note: `app_buf` should not contain any leading path separators. /// Note: If the dir is the cwd, dir_buf should be empty (len = 0). fn windowsCreateProcessPathExt( - allocator: mem.Allocator, + allocator: Allocator, dir_buf: *ArrayList(u16), app_buf: *ArrayList(u16), pathext: [:0]const u16, @@ -1525,9 +1525,9 @@ const WindowsCommandLineCache = struct { script_cmd_line: ?[:0]u16 = null, cmd_exe_path: ?[:0]u16 = null, argv: []const []const u8, - allocator: mem.Allocator, + allocator: Allocator, - fn init(allocator: mem.Allocator, argv: []const []const u8) WindowsCommandLineCache { + fn init(allocator: Allocator, argv: []const []const u8) WindowsCommandLineCache { return .{ .allocator = allocator, .argv = argv, @@ -1571,7 +1571,7 @@ const WindowsCommandLineCache = struct { /// Returns the absolute path of `cmd.exe` within the Windows system directory. /// The caller owns the returned slice. -fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 { +fn windowsCmdExePath(allocator: Allocator) error{ OutOfMemory, Unexpected }![:0]u16 { var buf = try ArrayList(u16).initCapacity(allocator, 128); errdefer buf.deinit(allocator); while (true) { @@ -1608,7 +1608,7 @@ const ArgvToCommandLineError = error{ OutOfMemory, InvalidWtf8, InvalidArg0 }; /// /// When executing `.bat`/`.cmd` scripts, use `argvToScriptCommandLineWindows` instead. fn argvToCommandLineWindows( - allocator: mem.Allocator, + allocator: Allocator, argv: []const []const u8, ) ArgvToCommandLineError![:0]u16 { var buf = std.array_list.Managed(u8).init(allocator); @@ -1784,7 +1784,7 @@ const ArgvToScriptCommandLineError = error{ /// Should only be used when spawning `.bat`/`.cmd` scripts, see `argvToCommandLineWindows` otherwise. /// The `.bat`/`.cmd` file must be known to both have the `.bat`/`.cmd` extension and exist on the filesystem. fn argvToScriptCommandLineWindows( - allocator: mem.Allocator, + allocator: Allocator, /// Path to the `.bat`/`.cmd` script. If this path is relative, it is assumed to be relative to the CWD. /// The script must have been verified to exist at this path before calling this function. script_path: []const u16, diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 5e7215f1d7..7bb20a9959 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -610,7 +610,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } }, .file => { - if (createDirAndFile(io, dir, file_name, fileMode(file.mode, options))) |fs_file| { + if (createDirAndFile(io, dir, file_name, filePermissions(file.mode, options))) |fs_file| { defer fs_file.close(io); var file_writer = fs_file.writer(io, &file_contents_buffer); try it.streamRemaining(file, &file_writer.interface); @@ -638,12 +638,12 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp } } -fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File { - const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }) catch |err| { +fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, permissions: Io.File.Permissions) !Io.File { + const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions }) catch |err| { if (err == error.FileNotFound) { if (std.fs.path.dirname(file_name)) |dir_name| { try dir.makePath(io, dir_name); - return try dir.createFile(io, file_name, .{ .exclusive = true, .mode = mode }); + return try dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions }); } } return err; @@ -880,9 +880,9 @@ test "create file and symlink" { var root = testing.tmpDir(.{}); defer root.cleanup(); - var file = try createDirAndFile(io, root.dir, "file1", default_mode); + var file = try createDirAndFile(io, root.dir, "file1", .default_file); file.close(io); - file = try createDirAndFile(io, root.dir, "a/b/c/file2", default_mode); + file = try createDirAndFile(io, root.dir, "a/b/c/file2", .default_file); file.close(io); createDirAndSymlink(io, root.dir, "a/b/c/file2", "symlink1") catch |err| { @@ -894,7 +894,7 @@ test "create file and symlink" { // Danglink symlnik, file created later try createDirAndSymlink(io, root.dir, "../../../g/h/i/file4", "j/k/l/symlink3"); - file = try createDirAndFile(io, root.dir, "g/h/i/file4", default_mode); + file = try createDirAndFile(io, root.dir, "g/h/i/file4", .default_file); file.close(io); } @@ -1118,30 +1118,30 @@ fn normalizePath(bytes: []u8) []u8 { return bytes; } -const default_mode = Io.File.default_mode; - // File system mode based on tar header mode and mode_mode options. -fn fileMode(mode: u32, options: PipeOptions) Io.File.Mode { +fn filePermissions(mode: u32, options: PipeOptions) Io.File.Permissions { + const default_mode = 0o666; + if (!std.fs.has_executable_bit or options.mode_mode == .ignore) - return default_mode; + return .fromMode(default_mode); const S = std.posix.S; // The mode from the tar file is inspected for the owner executable bit. if (mode & S.IXUSR == 0) - return default_mode; + return .fromMode(default_mode); // This bit is copied to the group and other executable bits. // Other bits of the mode are left as the default when creating files. - return default_mode | S.IXUSR | S.IXGRP | S.IXOTH; + return .fromMode(default_mode | S.IXUSR | S.IXGRP | S.IXOTH); } -test fileMode { +test filePermissions { if (!std.fs.has_executable_bit) return error.SkipZigTest; - try testing.expectEqual(default_mode, fileMode(0o744, PipeOptions{ .mode_mode = .ignore })); - try testing.expectEqual(0o777, fileMode(0o744, PipeOptions{})); - try testing.expectEqual(0o666, fileMode(0o644, PipeOptions{})); - try testing.expectEqual(0o666, fileMode(0o655, PipeOptions{})); + try testing.expectEqual(0o666, filePermissions(0o744, PipeOptions{ .mode_mode = .ignore })); + try testing.expectEqual(0o777, filePermissions(0o744, PipeOptions{})); + try testing.expectEqual(0o666, filePermissions(0o644, PipeOptions{})); + try testing.expectEqual(0o666, filePermissions(0o655, PipeOptions{})); } test "executable bit" { diff --git a/src/Compilation.zig b/src/Compilation.zig index 617421e279..c63fa9a3c1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5782,7 +5782,7 @@ pub fn translateC( } // Just to save disk space, we delete the file because it is never needed again. - cache_tmp_dir.deleteFile(dep_basename) catch |err| { + cache_tmp_dir.deleteFile(io, dep_basename) catch |err| { log.warn("failed to delete '{s}': {t}", .{ dep_file_path, err }); }; } @@ -6314,11 +6314,11 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr } // Just to save disk space, we delete the files that are never needed again. - defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(diag_file_path)) catch |err| switch (err) { + defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(diag_file_path)) catch |err| switch (err) { error.FileNotFound => {}, // the file wasn't created due to an error we reported else => log.warn("failed to delete '{s}': {s}", .{ diag_file_path, @errorName(err) }), }; - defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(dep_file_path)) catch |err| switch (err) { + defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(dep_file_path)) catch |err| switch (err) { error.FileNotFound => {}, // the file wasn't created due to an error we reported else => log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }), }; @@ -6329,7 +6329,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - const term = child.spawnAndWait() catch |err| { + const term = child.spawnAndWait(io) catch |err| { return comp.failCObj(c_object, "failed to spawn zig clang (passthrough mode) {s}: {s}", .{ argv.items[0], @errorName(err) }); }; switch (term) { @@ -6347,7 +6347,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr child.stdout_behavior = .Ignore; child.stderr_behavior = .Pipe; - try child.spawn(); + try child.spawn(io); var stderr_reader = child.stderr.?.readerStreaming(io, &.{}); const stderr = try stderr_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32))); @@ -6723,6 +6723,7 @@ fn spawnZigRc( argv: []const []const u8, child_progress_node: std.Progress.Node, ) !void { + const io = comp.io; var node_name: std.ArrayList(u8) = .empty; defer node_name.deinit(arena); @@ -6732,8 +6733,8 @@ fn spawnZigRc( child.stderr_behavior = .Pipe; child.progress_node = child_progress_node; - child.spawn() catch |err| { - return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) }); + child.spawn(io) catch |err| { + return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {t}", .{ argv[0], err }); }; var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{ diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 3552a7fc06..e9753734e9 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -1347,7 +1347,7 @@ fn unzip( .diagnostics = &diagnostics, }) catch |err| return f.fail(f.location_tok, try eb.printString("zip extract failed: {t}", .{err})); - cache_root.handle.deleteFile(&zip_path) catch |err| + cache_root.handle.deleteFile(io, &zip_path) catch |err| return f.fail(f.location_tok, try eb.printString("delete temporary zip failed: {t}", .{err})); return .{ .root_dir = diagnostics.root_dir }; @@ -1547,7 +1547,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute .fs_path = fs_path, .failure = undefined, // to be populated by the worker }; - group.async(io, workerDeleteFile, .{ root_dir, deleted_file }); + group.async(io, workerDeleteFile, .{ io, root_dir, deleted_file }); try deleted_files.append(deleted_file); continue; } @@ -1669,8 +1669,8 @@ fn workerHashFile(dir: Io.Dir, hashed_file: *HashedFile) void { hashed_file.failure = hashFileFallible(dir, hashed_file); } -fn workerDeleteFile(dir: Io.Dir, deleted_file: *DeletedFile) void { - deleted_file.failure = deleteFileFallible(dir, deleted_file); +fn workerDeleteFile(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) void { + deleted_file.failure = deleteFileFallible(io, dir, deleted_file); } fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Error!void { @@ -1712,8 +1712,8 @@ fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Er hashed_file.size = file_size; } -fn deleteFileFallible(dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void { - try dir.deleteFile(deleted_file.fs_path); +fn deleteFileFallible(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void { + try dir.deleteFile(io, deleted_file.fs_path); } fn setExecutable(file: Io.File) !void { diff --git a/src/link.zig b/src/link.zig index ff6d938e6f..7ebe1c6ba2 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1235,22 +1235,26 @@ pub const File = struct { ty: InternPool.Index, }; - pub fn determineMode( + pub fn determinePermissions( output_mode: std.builtin.OutputMode, link_mode: std.builtin.LinkMode, - ) Io.File.Mode { + ) Io.File.Permissions { // On common systems with a 0o022 umask, 0o777 will still result in a file created // with 0o755 permissions, but it works appropriately if the system is configured // more leniently. As another data point, C's fopen seems to open files with the // 666 mode. - const executable_mode = if (builtin.target.os.tag == .windows) 0 else 0o777; + const executable_mode: Io.FilePermissions = if (builtin.target.os.tag == .windows) + .default_file + else + .fromMode(0o777); + switch (output_mode) { .Lib => return switch (link_mode) { .dynamic => executable_mode, - .static => Io.File.default_mode, + .static => .default_file, }, .Exe => return executable_mode, - .Obj => return Io.File.default_mode, + .Obj => return .default_file, } } diff --git a/src/link/Lld.zig b/src/link/Lld.zig index 6920c12762..e3127b24bf 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -1608,13 +1608,13 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - break :term child.spawnAndWait(); + break :term child.spawnAndWait(io); } else term: { child.stdin_behavior = .Ignore; child.stdout_behavior = .Ignore; child.stderr_behavior = .Pipe; - child.spawn() catch |err| break :term err; + child.spawn(io) catch |err| break :term err; var stderr_reader = child.stderr.?.readerStreaming(io, &.{}); stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited); break :term child.wait(); @@ -1658,13 +1658,13 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi rsp_child.stdout_behavior = .Inherit; rsp_child.stderr_behavior = .Inherit; - break :term rsp_child.spawnAndWait() catch |err| break :err err; + break :term rsp_child.spawnAndWait(io) catch |err| break :err err; } else { rsp_child.stdin_behavior = .Ignore; rsp_child.stdout_behavior = .Ignore; rsp_child.stderr_behavior = .Pipe; - rsp_child.spawn() catch |err| break :err err; + rsp_child.spawn(io) catch |err| break :err err; var stderr_reader = rsp_child.stderr.?.readerStreaming(io, &.{}); stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited); break :term rsp_child.wait() catch |err| break :err err; diff --git a/src/main.zig b/src/main.zig index a832a79dbe..3835e75949 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4457,7 +4457,7 @@ fn runOrTest( const term_result = t: { std.debug.lockStdErr(); defer std.debug.unlockStdErr(); - break :t child.spawnAndWait(); + break :t child.spawnAndWait(io); }; const term = term_result catch |err| { try warnAboutForeignBinaries(io, arena, arg_mode, target, link_libc); @@ -4512,6 +4512,7 @@ fn runOrTestHotSwap( all_args: []const []const u8, runtime_args_start: ?usize, ) !std.process.Child.Id { + const io = comp.io; const lf = comp.bin_file.?; const exe_path = switch (builtin.target.os.tag) { @@ -4593,7 +4594,7 @@ fn runOrTestHotSwap( child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - try child.spawn(); + try child.spawn(io); return child.id; }, @@ -5419,8 +5420,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) const term = t: { std.debug.lockStdErr(); defer std.debug.unlockStdErr(); - break :t child.spawnAndWait() catch |err| { - fatal("failed to spawn build runner {s}: {s}", .{ child_argv.items[0], @errorName(err) }); + break :t child.spawnAndWait(io) catch |err| { + fatal("failed to spawn build runner {s}: {t}", .{ child_argv.items[0], err }); }; }; @@ -5444,7 +5445,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) dirs.local_cache, tmp_sub_path, @errorName(err), }); }; - dirs.local_cache.handle.deleteFile(tmp_sub_path) catch {}; + dirs.local_cache.handle.deleteFile(io, tmp_sub_path) catch {}; var it = mem.splitScalar(u8, stdout, '\n'); var any_errors = false; @@ -5685,7 +5686,7 @@ fn jitCmd( child.stdout_behavior = if (options.capture == null) .Inherit else .Pipe; child.stderr_behavior = .Inherit; - try child.spawn(); + try child.spawn(io); if (options.capture) |ptr| { var stdout_reader = child.stdout.?.readerStreaming(io, &.{}); diff --git a/test/src/Cases.zig b/test/src/Cases.zig index bf3eade8f4..82b59f722d 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -461,6 +461,7 @@ pub fn lowerToBuildSteps( parent_step: *std.Build.Step, options: CaseTestOptions, ) void { + const io = self.io; const host = b.resolveTargetQuery(.{}); const cases_dir_path = b.build_root.join(b.allocator, &.{ "test", "cases" }) catch @panic("OOM"); @@ -595,7 +596,7 @@ pub fn lowerToBuildSteps( }, .Execution => |expected_stdout| no_exec: { const run = if (case.target.result.ofmt == .c) run_step: { - if (getExternalExecutor(&host.result, &case.target.result, .{ .link_libc = true }) != .native) { + if (getExternalExecutor(io, &host.result, &case.target.result, .{ .link_libc = true }) != .native) { // We wouldn't be able to run the compiled C code. break :no_exec; } diff --git a/test/standalone/child_process/main.zig b/test/standalone/child_process/main.zig index 2a28845a56..3aaec0f2b2 100644 --- a/test/standalone/child_process/main.zig +++ b/test/standalone/child_process/main.zig @@ -29,7 +29,7 @@ pub fn main() !void { child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Inherit; - try child.spawn(); + try child.spawn(io); const child_stdin = child.stdin.?; try child_stdin.writeAll("hello from stdin"); // verified in child child_stdin.close(io); -- cgit v1.2.3 From 16f8af1b9a7a287ac6fdefec5949725c55cbe179 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 11 Dec 2025 23:18:42 -0800 Subject: compiler: update various code to new fs API --- lib/compiler/aro/main.zig | 2 +- lib/std/Build/Cache.zig | 8 +- lib/std/Build/Cache/Path.zig | 4 +- lib/std/Io/Dir.zig | 2 +- lib/std/crypto/Certificate/Bundle.zig | 3 +- lib/std/crypto/Certificate/Bundle/macos.zig | 2 +- lib/std/fs/test.zig | 14 +-- lib/std/posix/test.zig | 2 +- lib/std/zip.zig | 4 +- src/Compilation.zig | 30 +++--- src/Package/Fetch.zig | 14 +-- src/Package/Fetch/git.zig | 4 +- src/Zcu.zig | 2 +- src/crash_report.zig | 10 +- src/fmt.zig | 12 +-- src/introspect.zig | 54 +++++----- src/libs/freebsd.zig | 4 +- src/libs/glibc.zig | 4 +- src/libs/mingw.zig | 4 +- src/libs/netbsd.zig | 4 +- src/link.zig | 15 ++- src/link/Coff.zig | 4 +- src/link/Dwarf.zig | 1 - src/link/Elf.zig | 2 +- src/link/Elf2.zig | 4 +- src/link/MachO.zig | 35 +++++-- src/link/MappedFile.zig | 7 +- src/link/Wasm.zig | 6 +- src/main.zig | 155 +++++++++++++--------------- src/print_env.zig | 17 +-- src/print_targets.zig | 16 +-- test/standalone/self_exe_symlink/main.zig | 2 +- 32 files changed, 228 insertions(+), 219 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/aro/main.zig b/lib/compiler/aro/main.zig index d1be1dbb21..9e0440febf 100644 --- a/lib/compiler/aro/main.zig +++ b/lib/compiler/aro/main.zig @@ -43,7 +43,7 @@ pub fn main() u8 { return 1; }; - const aro_name = std.fs.selfExePathAlloc(gpa) catch { + const aro_name = process.executablePathAlloc(io, gpa) catch { std.debug.print("unable to find Aro executable path\n", .{}); if (fast_exit) process.exit(1); return 1; diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index b97efc7677..e2c848b6fd 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1330,7 +1330,7 @@ test "cache file and then recall it" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); @@ -1396,7 +1396,7 @@ test "check that changing a file makes cache fail" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); @@ -1456,7 +1456,7 @@ test "no file inputs" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); @@ -1515,7 +1515,7 @@ test "Manifest with files added after initial hash work" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index 54104afa41..51f9e7aecd 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -84,14 +84,14 @@ pub fn openDir( return p.root_dir.handle.openDir(io, joined_path, args); } -pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: Io.Dir.OpenOptions) !Io.Dir { +pub fn makeOpenPath(p: Path, io: Io, sub_path: []const u8, opts: Io.Dir.OpenOptions) !Io.Dir { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ p.sub_path, sub_path, }) catch return error.NameTooLong; }; - return p.root_dir.handle.makeOpenPath(joined_path, opts); + return p.root_dir.handle.makeOpenPath(io, joined_path, opts); } pub fn statFile(p: Path, io: Io, sub_path: []const u8) !Io.Dir.Stat { diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index be82b8b8b2..cd30181900 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -1588,7 +1588,7 @@ pub const CopyFileOptions = struct { pub const CopyFileError = File.OpenError || File.StatError || File.Atomic.InitError || File.Atomic.FinishError || - File.Reader.Error || File.WriteError || error{InvalidFileName}; + File.Reader.Error || File.Writer.Error || error{InvalidFileName}; /// Atomically creates a new file at `dest_path` within `dest_dir` with the /// same contents as `source_path` within `source_dir`, overwriting any already diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index 1b87c97949..386e1b6b5a 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -242,8 +242,7 @@ pub fn addCertsFromFilePath( } pub const AddCertsFromFileError = Allocator.Error || - Io.File.GetSeekPosError || - Io.File.ReadError || + Io.File.Reader.Error || ParseCertError || std.base64.Error || error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker, Streaming }; diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig index 086c8feb3f..ea8a91702d 100644 --- a/lib/std/crypto/Certificate/Bundle/macos.zig +++ b/lib/std/crypto/Certificate/Bundle/macos.zig @@ -6,7 +6,7 @@ const mem = std.mem; const Allocator = std.mem.Allocator; const Bundle = @import("../Bundle.zig"); -pub const RescanMacError = Allocator.Error || Io.File.OpenError || Io.File.ReadError || Io.File.SeekError || Bundle.ParseCertError || error{EndOfStream}; +pub const RescanMacError = Allocator.Error || Io.File.OpenError || Io.File.Reader.Error || Io.File.SeekError || Bundle.ParseCertError || error{EndOfStream}; pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanMacError!void { cb.bytes.clearRetainingCapacity(); diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 5205e8eacf..1a8e5adfbd 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -213,7 +213,7 @@ test "Dir.readLink" { // test 3: relative path symlink const parent_file = ".." ++ fs.path.sep_str ++ "target.txt"; const canonical_parent_file = try ctx.toCanonicalPathSep(parent_file); - var subdir = try ctx.dir.makeOpenPath("subdir", .{}); + var subdir = try ctx.dir.makeOpenPath(io, "subdir", .{}); defer subdir.close(io); try setupSymlink(io, subdir, canonical_parent_file, "relative-link.txt", .{}); try testReadLink(io, subdir, canonical_parent_file, "relative-link.txt"); @@ -411,7 +411,7 @@ test "openDir non-cwd parent '..'" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var subdir = try tmp.dir.makeOpenPath("subdir", .{}); + var subdir = try tmp.dir.makeOpenPath(io, "subdir", .{}); defer subdir.close(io); var dir = try subdir.openDir(io, "..", .{}); @@ -613,7 +613,7 @@ test "Dir.Iterator but dir is deleted during iteration" { defer tmp.cleanup(); // Create directory and setup an iterator for it - var subdir = try tmp.dir.makeOpenPath("subdir", .{ .iterate = true }); + var subdir = try tmp.dir.makeOpenPath(io, "subdir", .{ .iterate = true }); defer subdir.close(io); var iterator = subdir.iterate(); @@ -862,7 +862,7 @@ test "makeOpenPath parent dirs do not exist" { var tmp_dir = tmpDir(.{}); defer tmp_dir.cleanup(); - var dir = try tmp_dir.dir.makeOpenPath("root_dir/parent_dir/some_dir", .{}); + var dir = try tmp_dir.dir.makeOpenPath(io, "root_dir/parent_dir/some_dir", .{}); dir.close(io); // double check that the full directory structure was created @@ -1010,7 +1010,7 @@ test "Dir.rename directory onto non-empty dir" { try ctx.dir.makeDir(io, test_dir_path, .default_dir); - var target_dir = try ctx.dir.makeOpenPath(target_dir_path, .{}); + var target_dir = try ctx.dir.makeOpenPath(io, target_dir_path, .{}); var file = try target_dir.createFile(io, "test_file", .{ .read = true }); file.close(io); target_dir.close(io); @@ -1147,7 +1147,7 @@ test "deleteTree does not follow symlinks" { try tmp.dir.makePath(io, "b"); { - var a = try tmp.dir.makeOpenPath("a", .{}); + var a = try tmp.dir.makeOpenPath(io, "a", .{}); defer a.close(io); try setupSymlink(io, a, "../b", "b", .{ .is_directory = true }); @@ -1346,7 +1346,7 @@ test "makepath ignores '.'" { fn testFilenameLimits(io: Io, iterable_dir: Dir, maxed_filename: []const u8) !void { // setup, create a dir and a nested file both with maxed filenames, and walk the dir { - var maxed_dir = try iterable_dir.makeOpenPath(maxed_filename, .{}); + var maxed_dir = try iterable_dir.makeOpenPath(io, maxed_filename, .{}); defer maxed_dir.close(io); try maxed_dir.writeFile(io, .{ .sub_path = maxed_filename, .data = "" }); diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 87361a5038..64511ff4a2 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -142,7 +142,7 @@ test "linkat with different directories" { const target_name = "link-target"; const link_name = "newlink"; - const subdir = try tmp.dir.makeOpenPath("subdir", .{}); + const subdir = try tmp.dir.makeOpenPath(io, "subdir", .{}); defer tmp.dir.deleteFile(io, target_name) catch {}; try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" }); diff --git a/lib/std/zip.zig b/lib/std/zip.zig index 5a91b73d6a..770bfd8ae7 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -117,7 +117,7 @@ pub const EndRecord = extern struct { return record; } - pub const FindFileError = File.Reader.SizeError || File.SeekError || File.ReadError || error{ + pub const FindFileError = File.Reader.SizeError || File.SeekError || File.Reader.Error || error{ ZipNoEndRecord, EndOfStream, ReadFailed, @@ -560,7 +560,7 @@ pub const Iterator = struct { const out_file = blk: { if (std.fs.path.dirname(filename)) |dirname| { - var parent_dir = try dest.makeOpenPath(dirname, .{}); + var parent_dir = try dest.makeOpenPath(io, dirname, .{}); defer parent_dir.close(io); const basename = std.fs.path.basename(filename); diff --git a/src/Compilation.zig b/src/Compilation.zig index c63fa9a3c1..9e76b8feca 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -832,7 +832,7 @@ pub const Directories = struct { const nonempty_path = if (path.len == 0) "." else path; const handle_or_err = switch (thing) { .@"zig lib" => Io.Dir.cwd().openDir(io, nonempty_path, .{}), - .@"global cache", .@"local cache" => Io.Dir.cwd().makeOpenPath(nonempty_path, .{}), + .@"global cache", .@"local cache" => Io.Dir.cwd().makeOpenPath(io, nonempty_path, .{}), }; return .{ .path = if (path.len == 0) null else path, @@ -2111,7 +2111,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, cache.* = .{ .gpa = gpa, .io = io, - .manifest_dir = options.dirs.local_cache.handle.makeOpenPath("h", .{}) catch |err| { + .manifest_dir = options.dirs.local_cache.handle.makeOpenPath(io, "h", .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = "h", .err = err } }); }, }; @@ -2161,7 +2161,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, // to redundantly happen for each AstGen operation. const zir_sub_dir = "z"; - var local_zir_dir = options.dirs.local_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| { + var local_zir_dir = options.dirs.local_cache.handle.makeOpenPath(io, zir_sub_dir, .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = zir_sub_dir, .err = err } }); }; errdefer local_zir_dir.close(io); @@ -2169,7 +2169,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, .handle = local_zir_dir, .path = try options.dirs.local_cache.join(arena, &.{zir_sub_dir}), }; - var global_zir_dir = options.dirs.global_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| { + var global_zir_dir = options.dirs.global_cache.handle.makeOpenPath(io, zir_sub_dir, .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .global, .sub = zir_sub_dir, .err = err } }); }; errdefer global_zir_dir.close(io); @@ -2440,7 +2440,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, const digest = hash.final(); const artifact_sub_dir = "o" ++ fs.path.sep_str ++ digest; - var artifact_dir = options.dirs.local_cache.handle.makeOpenPath(artifact_sub_dir, .{}) catch |err| { + var artifact_dir = options.dirs.local_cache.handle.makeOpenPath(io, artifact_sub_dir, .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = artifact_sub_dir, .err = err } }); }; errdefer artifact_dir.close(io); @@ -2895,7 +2895,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE tmp_dir_rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int); const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path}); - const handle = comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}) catch |err| { + const handle = comp.dirs.local_cache.handle.makeOpenPath(io, tmp_dir_sub_path, .{}) catch |err| { return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err }); }; break :d .{ .path = path, .handle = handle }; @@ -2976,7 +2976,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE tmp_dir_rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int); const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path}); - const handle = comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}) catch |err| { + const handle = comp.dirs.local_cache.handle.makeOpenPath(io, tmp_dir_sub_path, .{}) catch |err| { return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err }); }; break :d .{ .path = path, .handle = handle }; @@ -5267,7 +5267,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { const io = comp.io; const docs_path = comp.resolveEmitPath(comp.emit_docs.?); - var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| { + var out_dir = docs_path.root_dir.handle.makeOpenPath(io, docs_path.sub_path, .{}) catch |err| { return comp.lockAndSetMiscFailure( .docs_copy, "unable to create output directory '{f}': {s}", @@ -5509,7 +5509,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU assert(docs_bin_file.sub_path.len > 0); // emitted binary is not a directory const docs_path = comp.resolveEmitPath(comp.emit_docs.?); - var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| { + var out_dir = docs_path.root_dir.handle.makeOpenPath(io, docs_path.sub_path, .{}) catch |err| { comp.lockAndSetMiscFailure( .docs_copy, "unable to create output directory '{f}': {t}", @@ -5699,7 +5699,7 @@ pub fn translateC( const tmp_basename = std.fmt.hex(std.crypto.random.int(u64)); const tmp_sub_path = "tmp" ++ fs.path.sep_str ++ tmp_basename; const cache_dir = comp.dirs.local_cache.handle; - var cache_tmp_dir = try cache_dir.makeOpenPath(tmp_sub_path, .{}); + var cache_tmp_dir = try cache_dir.makeOpenPath(io, tmp_sub_path, .{}); defer cache_tmp_dir.close(io); const translated_path = try comp.dirs.local_cache.join(arena, &.{ tmp_sub_path, translated_basename }); @@ -6274,7 +6274,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr // We can't know the digest until we do the C compiler invocation, // so we need a temporary filename. const out_obj_path = try comp.tmpFilePath(arena, o_basename); - var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{}); + var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, "tmp", .{}); defer zig_cache_tmp_dir.close(io); const out_diag_path = if (comp.clang_passthrough_mode or !ext.clangSupportsDiagnostics()) @@ -6439,7 +6439,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr // Rename into place. const digest = man.final(); const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest }); - var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{}); + var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_obj_path); try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename, io); @@ -6528,7 +6528,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 const digest = man.final(); const o_sub_path = try fs.path.join(arena, &.{ "o", &digest }); - var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{}); + var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, o_sub_path, .{}); defer o_dir.close(io); const in_rc_path = try comp.dirs.local_cache.join(comp.gpa, &.{ @@ -6616,7 +6616,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 const rc_basename_noext = src_basename[0 .. src_basename.len - fs.path.extension(src_basename).len]; const digest = if (try man.hit()) man.final() else blk: { - var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{}); + var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, "tmp", .{}); defer zig_cache_tmp_dir.close(io); const res_filename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext}); @@ -6687,7 +6687,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 // Rename into place. const digest = man.final(); const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest }); - var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{}); + var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_res_path); try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename, io); diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index d25ee55c32..33214a47db 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -500,12 +500,12 @@ fn runResource( var tmp_directory: Cache.Directory = .{ .path = tmp_directory_path, .handle = handle: { - const dir = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{ + const dir = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{ .iterate = true, }) catch |err| { try eb.addRootErrorMessage(.{ - .msg = try eb.printString("unable to create temporary directory '{s}': {s}", .{ - tmp_directory_path, @errorName(err), + .msg = try eb.printString("unable to create temporary directory '{s}': {t}", .{ + tmp_directory_path, err, }), }); return error.FetchFailed; @@ -524,7 +524,7 @@ fn runResource( if (native_os == .linux and f.job_queue.work_around_btrfs_bug) { // https://github.com/ziglang/zig/issues/17095 pkg_path.root_dir.handle.close(io); - pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{ + pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{ .iterate = true, }) catch @panic("btrfs workaround failed"); } @@ -1366,7 +1366,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U // we do not attempt to replicate the exact structure of a real .git // directory, since that isn't relevant for fetching a package. { - var pack_dir = try out_dir.makeOpenPath(".git", .{}); + var pack_dir = try out_dir.makeOpenPath(io, ".git", .{}); defer pack_dir.close(io); var pack_file = try pack_dir.createFile(io, "pkg.pack", .{ .read = true }); defer pack_file.close(io); @@ -1743,7 +1743,7 @@ const HashedFile = struct { const Error = Io.File.OpenError || - Io.File.ReadError || + Io.File.Reader.Error || Io.File.StatError || Io.File.ChmodError || Io.Dir.ReadLinkError; @@ -2258,7 +2258,7 @@ const TestFetchBuilder = struct { cache_parent_dir: std.Io.Dir, path_or_url: []const u8, ) !*Fetch { - const cache_dir = try cache_parent_dir.makeOpenPath("zig-global-cache", .{}); + const cache_dir = try cache_parent_dir.makeOpenPath(io, "zig-global-cache", .{}); self.http_client = .{ .allocator = allocator, .io = io }; self.global_cache_directory = .{ .handle = cache_dir, .path = null }; diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index 255f0e6f69..f53c96cc3d 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1720,10 +1720,10 @@ pub fn main() !void { var pack_file_reader = pack_file.reader(io, &pack_file_buffer); const commit = try Oid.parse(format, args[3]); - var worktree = try Io.Dir.cwd().makeOpenPath(args[4], .{}); + var worktree = try Io.Dir.cwd().makeOpenPath(io, args[4], .{}); defer worktree.close(io); - var git_dir = try worktree.makeOpenPath(".git", .{}); + var git_dir = try worktree.makeOpenPath(io, ".git", .{}); defer git_dir.close(io); std.debug.print("Starting index...\n", .{}); diff --git a/src/Zcu.zig b/src/Zcu.zig index 2891da2407..e0e254d807 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1200,7 +1200,7 @@ pub const EmbedFile = struct { /// `.none` means the file was not loaded, so `stat` is undefined. val: InternPool.Index, /// If this is `null` and `val` is `.none`, the file has never been loaded. - err: ?(Io.File.OpenError || Io.File.StatError || Io.File.ReadError || error{UnexpectedEof}), + err: ?(Io.File.OpenError || Io.File.StatError || Io.File.Reader.Error || error{UnexpectedEof}), stat: Cache.File.Stat, pub const Index = enum(u32) { diff --git a/src/crash_report.zig b/src/crash_report.zig index d525d4b3b5..76503fdc53 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -95,19 +95,19 @@ fn dumpCrashContext() Io.Writer.Error!void { // TODO: this does mean that a different thread could grab the stderr mutex between the context // and the actual panic printing, which would be quite confusing. - const stderr, _ = std.debug.lockStderrWriter(&.{}); + const stderr = std.debug.lockStderrWriter(&.{}); defer std.debug.unlockStderrWriter(); - try stderr.writeAll("Compiler crash context:\n"); + try stderr.interface.writeAll("Compiler crash context:\n"); if (CodegenFunc.current) |*cg| { const func_nav = cg.zcu.funcInfo(cg.func_index).owner_nav; const func_fqn = cg.zcu.intern_pool.getNav(func_nav).fqn; - try stderr.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)}); + try stderr.interface.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)}); } else if (AnalyzeBody.current) |anal| { - try dumpCrashContextSema(anal, stderr, &S.crash_heap); + try dumpCrashContextSema(anal, &stderr.interface, &S.crash_heap); } else { - try stderr.writeAll("(no context)\n\n"); + try stderr.interface.writeAll("(no context)\n\n"); } } fn dumpCrashContextSema(anal: *AnalyzeBody, stderr: *Io.Writer, crash_heap: []u8) Io.Writer.Error!void { diff --git a/src/fmt.zig b/src/fmt.zig index 59b4470f81..bec3496e68 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -59,7 +59,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - try Io.File.stdout().writeAll(usage_fmt); + try Io.File.stdout().writeStreamingAll(io, usage_fmt); return process.cleanExit(); } else if (mem.eql(u8, arg, "--color")) { if (i + 1 >= args.len) { @@ -154,7 +154,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! process.exit(code); } - return Io.File.stdout().writeAll(formatted); + return Io.File.stdout().writeStreamingAll(io, formatted); } if (input_files.items.len == 0) { @@ -162,7 +162,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! } var stdout_buffer: [4096]u8 = undefined; - var stdout_writer = Io.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer); var fmt: Fmt = .{ .gpa = gpa, @@ -231,7 +231,7 @@ fn fmtPathDir( if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; var dir_it = dir.iterate(); - while (try dir_it.next()) |entry| { + while (try dir_it.next(io)) |entry| { const is_dir = entry.kind == .directory; if (mem.startsWith(u8, entry.name, ".")) continue; @@ -244,7 +244,7 @@ fn fmtPathDir( try fmtPathDir(fmt, full_path, check_mode, dir, entry.name); } else { fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| { - std.log.err("unable to format '{s}': {s}", .{ full_path, @errorName(err) }); + std.log.err("unable to format '{s}': {t}", .{ full_path, err }); fmt.any_error = true; return; }; @@ -355,7 +355,7 @@ fn fmtPathFile( try fmt.stdout_writer.interface.print("{s}\n", .{file_path}); fmt.any_error = true; } else { - var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode, .write_buffer = &.{} }); + var af = try dir.atomicFile(io, sub_path, .{ .permissions = stat.permissions, .write_buffer = &.{} }); defer af.deinit(); try af.file_writer.interface.writeAll(fmt.out_buffer.written()); diff --git a/src/introspect.zig b/src/introspect.zig index 3f8308961f..0a57505aeb 100644 --- a/src/introspect.zig +++ b/src/introspect.zig @@ -3,10 +3,9 @@ const build_options = @import("build_options"); const std = @import("std"); const Io = std.Io; +const Dir = std.Io.Dir; const mem = std.mem; const Allocator = std.mem.Allocator; -const os = std.os; -const fs = std.fs; const Cache = std.Build.Cache; const Compilation = @import("Compilation.zig"); @@ -16,11 +15,11 @@ const Package = @import("Package.zig"); /// The path of the returned Directory is relative to `base`. /// The handle of the returned Directory is open. fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { - const test_index_file = "std" ++ fs.path.sep_str ++ "std.zig"; + const test_index_file = "std" ++ Dir.path.sep_str ++ "std.zig"; zig_dir: { // Try lib/zig/std/std.zig - const lib_zig = "lib" ++ fs.path.sep_str ++ "zig"; + const lib_zig = "lib" ++ Dir.path.sep_str ++ "zig"; var test_zig_dir = base_dir.openDir(io, lib_zig, .{}) catch break :zig_dir; const file = test_zig_dir.openFile(io, test_index_file, .{}) catch { test_zig_dir.close(io); @@ -44,13 +43,13 @@ fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory { pub fn findZigLibDir(gpa: Allocator, io: Io) !Cache.Directory { const cwd_path = try getResolvedCwd(gpa); defer gpa.free(cwd_path); - const self_exe_path = try fs.selfExePathAlloc(gpa); + const self_exe_path = try std.process.executablePathAlloc(io, gpa); defer gpa.free(self_exe_path); return findZigLibDirFromSelfExe(gpa, io, cwd_path, self_exe_path); } -/// Like `std.process.getCwdAlloc`, but also resolves the path with `std.fs.path.resolve`. This +/// Like `std.process.getCwdAlloc`, but also resolves the path with `Dir.path.resolve`. This /// means the path has no repeated separators, no "." or ".." components, and no trailing separator. /// On WASI, "" is returned instead of ".". pub fn getResolvedCwd(gpa: Allocator) error{ @@ -68,8 +67,8 @@ pub fn getResolvedCwd(gpa: Allocator) error{ } const cwd = try std.process.getCwdAlloc(gpa); defer gpa.free(cwd); - const resolved = try fs.path.resolve(gpa, &.{cwd}); - std.debug.assert(fs.path.isAbsolute(resolved)); + const resolved = try Dir.path.resolve(gpa, &.{cwd}); + std.debug.assert(Dir.path.isAbsolute(resolved)); return resolved; } @@ -84,12 +83,12 @@ pub fn findZigLibDirFromSelfExe( ) error{ OutOfMemory, FileNotFound }!Cache.Directory { const cwd = Io.Dir.cwd(); var cur_path: []const u8 = self_exe_path; - while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) { + while (Dir.path.dirname(cur_path)) |dirname| : (cur_path = dirname) { var base_dir = cwd.openDir(io, dirname, .{}) catch continue; defer base_dir.close(io); const sub_directory = testZigInstallPrefix(io, base_dir) orelse continue; - const p = try fs.path.join(allocator, &.{ dirname, sub_directory.path.? }); + const p = try Dir.path.join(allocator, &.{ dirname, sub_directory.path.? }); defer allocator.free(p); const resolved = try resolvePath(allocator, cwd_path, &.{p}); @@ -113,18 +112,18 @@ pub fn resolveGlobalCacheDir(allocator: Allocator) ![]u8 { if (builtin.os.tag != .windows) { if (std.zig.EnvVar.XDG_CACHE_HOME.getPosix()) |cache_root| { if (cache_root.len > 0) { - return fs.path.join(allocator, &.{ cache_root, appname }); + return Dir.path.join(allocator, &.{ cache_root, appname }); } } if (std.zig.EnvVar.HOME.getPosix()) |home| { - return fs.path.join(allocator, &.{ home, ".cache", appname }); + return Dir.path.join(allocator, &.{ home, ".cache", appname }); } } - return fs.getAppDataDir(allocator, appname); + return std.fs.getAppDataDir(allocator, appname); } -/// Similar to `fs.path.resolve`, but converts to a cwd-relative path, or, if that would +/// Similar to `Dir.path.resolve`, but converts to a cwd-relative path, or, if that would /// start with a relative up-dir (".."), an absolute path based on the cwd. Also, the cwd /// returns the empty string ("") instead of ".". pub fn resolvePath( @@ -136,7 +135,7 @@ pub fn resolvePath( ) Allocator.Error![]u8 { if (builtin.target.os.tag == .wasi) { std.debug.assert(mem.eql(u8, cwd_resolved, "")); - const res = try fs.path.resolve(gpa, paths); + const res = try Dir.path.resolve(gpa, paths); if (mem.eql(u8, res, ".")) { gpa.free(res); return ""; @@ -146,16 +145,16 @@ pub fn resolvePath( // Heuristic for a fast path: if no component is absolute and ".." never appears, we just need to resolve `paths`. for (paths) |p| { - if (fs.path.isAbsolute(p)) break; // absolute path + if (Dir.path.isAbsolute(p)) break; // absolute path if (mem.indexOf(u8, p, "..") != null) break; // may contain up-dir } else { // no absolute path, no "..". - const res = try fs.path.resolve(gpa, paths); + const res = try Dir.path.resolve(gpa, paths); if (mem.eql(u8, res, ".")) { gpa.free(res); return ""; } - std.debug.assert(!fs.path.isAbsolute(res)); + std.debug.assert(!Dir.path.isAbsolute(res)); std.debug.assert(!isUpDir(res)); return res; } @@ -164,19 +163,19 @@ pub fn resolvePath( // Optimization: `paths` often has just one element. const path_resolved = switch (paths.len) { 0 => unreachable, - 1 => try fs.path.resolve(gpa, &.{ cwd_resolved, paths[0] }), + 1 => try Dir.path.resolve(gpa, &.{ cwd_resolved, paths[0] }), else => r: { const all_paths = try gpa.alloc([]const u8, paths.len + 1); defer gpa.free(all_paths); all_paths[0] = cwd_resolved; @memcpy(all_paths[1..], paths); - break :r try fs.path.resolve(gpa, all_paths); + break :r try Dir.path.resolve(gpa, all_paths); }, }; errdefer gpa.free(path_resolved); - std.debug.assert(fs.path.isAbsolute(path_resolved)); - std.debug.assert(fs.path.isAbsolute(cwd_resolved)); + std.debug.assert(Dir.path.isAbsolute(path_resolved)); + std.debug.assert(Dir.path.isAbsolute(cwd_resolved)); if (!std.mem.startsWith(u8, path_resolved, cwd_resolved)) return path_resolved; // not in cwd if (path_resolved.len == cwd_resolved.len) { @@ -184,7 +183,7 @@ pub fn resolvePath( gpa.free(path_resolved); return ""; } - if (path_resolved[cwd_resolved.len] != std.fs.path.sep) return path_resolved; // not in cwd (last component differs) + if (path_resolved[cwd_resolved.len] != Dir.path.sep) return path_resolved; // not in cwd (last component differs) // in cwd; extract sub path const sub_path = try gpa.dupe(u8, path_resolved[cwd_resolved.len + 1 ..]); @@ -192,9 +191,8 @@ pub fn resolvePath( return sub_path; } -/// TODO move this to std.fs.path pub fn isUpDir(p: []const u8) bool { - return mem.startsWith(u8, p, "..") and (p.len == 2 or p[2] == fs.path.sep); + return mem.startsWith(u8, p, "..") and (p.len == 2 or p[2] == Dir.path.sep); } pub const default_local_zig_cache_basename = ".zig-cache"; @@ -205,12 +203,12 @@ pub const default_local_zig_cache_basename = ".zig-cache"; pub fn resolveSuitableLocalCacheDir(arena: Allocator, io: Io, cwd: []const u8) Allocator.Error!?[]u8 { var cur_dir = cwd; while (true) { - const joined = try fs.path.join(arena, &.{ cur_dir, Package.build_zig_basename }); + const joined = try Dir.path.join(arena, &.{ cur_dir, Package.build_zig_basename }); if (Io.Dir.cwd().access(io, joined, .{})) |_| { - return try fs.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename }); + return try Dir.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename }); } else |err| switch (err) { error.FileNotFound => { - cur_dir = fs.path.dirname(cur_dir) orelse return null; + cur_dir = Dir.path.dirname(cur_dir) orelse return null; continue; }, else => return null, diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index 77bd4372d0..ee638718b7 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -444,7 +444,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -477,7 +477,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest }); var o_directory: Cache.Directory = .{ - .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}), + .handle = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}), .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}), }; defer o_directory.handle.close(io); diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index a60dc921be..de86413cfd 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -679,7 +679,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -712,7 +712,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest }); var o_directory: Cache.Directory = .{ - .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}), + .handle = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}), .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}), }; defer o_directory.handle.close(io); diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index 9631ec22f9..05a9de71e7 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -258,7 +258,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -297,7 +297,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const digest = man.final(); const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); - var o_dir = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}); + var o_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}); defer o_dir.close(io); const aro = @import("aro"); diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index fd80616e9d..33dd62d851 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -385,7 +385,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -418,7 +418,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest }); var o_directory: Cache.Directory = .{ - .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}), + .handle = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}), .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}), }; defer o_directory.handle.close(io); diff --git a/src/link.zig b/src/link.zig index 7ebe1c6ba2..9914b1af3a 100644 --- a/src/link.zig +++ b/src/link.zig @@ -2170,28 +2170,27 @@ fn resolvePathInputLib( }) { var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) { error.FileNotFound => return .no_match, - else => |e| fatal("unable to search for {s} library '{f}': {s}", .{ - @tagName(link_mode), std.fmt.alt(test_path, .formatEscapeChar), @errorName(e), + else => |e| fatal("unable to search for {t} library '{f}': {t}", .{ + link_mode, std.fmt.alt(test_path, .formatEscapeChar), e, }), }; errdefer file.close(io); try ld_script_bytes.resize(gpa, @max(std.elf.MAGIC.len, std.elf.ARMAG.len)); - const n = file.preadAll(ld_script_bytes.items, 0) catch |err| fatal("failed to read '{f}': {s}", .{ - std.fmt.alt(test_path, .formatEscapeChar), @errorName(err), - }); + const n = file.readPositionalAll(io, ld_script_bytes.items, 0) catch |err| + fatal("failed to read '{f}': {t}", .{ std.fmt.alt(test_path, .formatEscapeChar), err }); const buf = ld_script_bytes.items[0..n]; if (mem.startsWith(u8, buf, std.elf.MAGIC) or mem.startsWith(u8, buf, std.elf.ARMAG)) { // Appears to be an ELF or archive file. return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query); } const stat = file.stat(io) catch |err| - fatal("failed to stat {f}: {s}", .{ test_path, @errorName(err) }); + fatal("failed to stat {f}: {t}", .{ test_path, err }); const size = std.math.cast(u32, stat.size) orelse fatal("{f}: linker script too big", .{test_path}); try ld_script_bytes.resize(gpa, size); const buf2 = ld_script_bytes.items[n..]; - const n2 = file.preadAll(buf2, n) catch |err| - fatal("failed to read {f}: {s}", .{ test_path, @errorName(err) }); + const n2 = file.readPositionalAll(io, buf2, n) catch |err| + fatal("failed to read {f}: {t}", .{ test_path, err }); if (n2 != buf2.len) fatal("failed to read {f}: unexpected end of file", .{test_path}); // This `Io` is only used for a mutex, and we know we aren't doing anything async/concurrent. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 009e59ed0d..2585d43eba 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -636,7 +636,7 @@ fn create( const coff = try arena.create(Coff); const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, - .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), + .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode), }); errdefer file.close(io); coff.* = .{ @@ -653,7 +653,7 @@ fn create( .allow_shlib_undefined = false, .stack_size = 0, }, - .mf = try .init(file, comp.gpa), + .mf = try .init(file, comp.gpa, io), .nodes = .empty, .import_table = .{ .ni = .none, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index c675af3e22..8b05272033 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -52,7 +52,6 @@ pub const UpdateError = error{ codegen.GenerateSymbolError || Io.File.OpenError || Io.File.LengthError || - Io.File.CopyRangeError || Io.File.ReadPositionalError || Io.File.WritePositionalError; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 13a624a295..2916f5ee25 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -320,7 +320,7 @@ pub fn createEmpty( self.base.file = try emit.root_dir.handle.createFile(io, sub_path, .{ .truncate = true, .read = true, - .mode = link.File.determineMode(output_mode, link_mode), + .permissions = link.File.determinePermissions(output_mode, link_mode), }); const gpa = comp.gpa; diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index 72fdb244a4..fc4794a7b0 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -976,7 +976,7 @@ fn create( const elf = try arena.create(Elf); const file = try path.root_dir.handle.createFile(io, path.sub_path, .{ .read = true, - .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode), + .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode), }); errdefer file.close(io); elf.* = .{ @@ -994,7 +994,7 @@ fn create( .stack_size = 0, }, .options = options, - .mf = try .init(file, comp.gpa), + .mf = try .init(file, comp.gpa, io), .ni = .{ .tls = .none, }, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index f996442f24..6c380637ee 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -224,7 +224,7 @@ pub fn createEmpty( self.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, - .mode = link.File.determineMode(output_mode, link_mode), + .permissions = link.File.determinePermissions(output_mode, link_mode), }); // Append null file @@ -3157,7 +3157,9 @@ fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 { } } - if (at_end) try self.base.file.?.setEndPos(end); + const comp = self.base.comp; + const io = comp.io; + if (at_end) try self.base.file.?.setLength(io, end); return null; } @@ -3292,7 +3294,7 @@ pub fn reopenDebugInfo(self: *MachO) !void { ); defer gpa.free(d_sym_path); - var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{}); + var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(io, d_sym_path, .{}); defer d_sym_bundle.close(io); self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{ @@ -3303,6 +3305,10 @@ pub fn reopenDebugInfo(self: *MachO) !void { // TODO: move to ZigObject fn initMetadata(self: *MachO, options: InitMetadataOptions) !void { + const comp = self.base.comp; + const gpa = comp.gpa; + const io = comp.io; + if (!self.base.isRelocatable()) { const base_vmaddr = blk: { const pagezero_size = self.pagezero_size orelse default_pagezero_size; @@ -3357,7 +3363,11 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void { if (options.zo.dwarf) |*dwarf| { // Create dSYM bundle. log.debug("creating {s}.dSYM bundle", .{options.emit.sub_path}); - self.d_sym = .{ .allocator = self.base.comp.gpa, .file = null }; + self.d_sym = .{ + .io = io, + .allocator = gpa, + .file = null, + }; try self.reopenDebugInfo(); try self.d_sym.?.initMetadata(self); try dwarf.initMetadata(); @@ -3477,6 +3487,9 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo const seg_id = self.sections.items(.segment_id)[sect_index]; const seg = &self.segments.items[seg_id]; + const comp = self.base.comp; + const io = comp.io; + if (!sect.isZerofill()) { const allocated_size = self.allocatedSize(sect.offset); if (needed_size > allocated_size) { @@ -3498,7 +3511,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo sect.offset = @intCast(new_offset); } else if (sect.offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(sect.offset + needed_size); + try self.base.file.?.setLength(io, sect.offset + needed_size); } seg.filesize = needed_size; } @@ -3520,6 +3533,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo } fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void { + const comp = self.base.comp; + const io = comp.io; const sect = &self.sections.items(.header)[sect_index]; if (!sect.isZerofill()) { @@ -3547,7 +3562,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void sect.offset = @intCast(new_offset); sect.addr = new_addr; } else if (sect.offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(sect.offset + needed_size); + try self.base.file.?.setLength(io, sect.offset + needed_size); } } sect.size = needed_size; @@ -5346,12 +5361,12 @@ pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkF }; } -pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void { +pub fn setLength(macho_file: *MachO, length: u64) error{LinkFailure}!void { const comp = macho_file.base.comp; + const io = comp.io; const diags = &comp.link_diags; - macho_file.base.file.?.setEndPos(length) catch |err| { - return diags.fail("failed to set file end pos: {s}", .{@errorName(err)}); - }; + macho_file.base.file.?.setLength(io, length) catch |err| + return diags.fail("failed to set file end pos: {t}", .{err}); } pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T { diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig index 4e58aae01a..974b03ef7f 100644 --- a/src/link/MappedFile.zig +++ b/src/link/MappedFile.zig @@ -10,6 +10,7 @@ const assert = std.debug.assert; const linux = std.os.linux; const windows = std.os.windows; +io: Io, file: std.Io.File, flags: packed struct { block_size: std.mem.Alignment, @@ -36,8 +37,9 @@ pub const Error = std.posix.MMapError || std.posix.MRemapError || Io.File.Length NoSpaceLeft, }; -pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile { +pub fn init(file: std.Io.File, gpa: std.mem.Allocator, io: Io) !MappedFile { var mf: MappedFile = .{ + .io = io, .file = file, .flags = undefined, .section = if (is_windows) windows.INVALID_HANDLE_VALUE else {}, @@ -624,13 +626,14 @@ pub fn addNodeAfter( } fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void { + const io = mf.io; const node = ni.get(mf); const old_offset, const old_size = node.location().resolve(mf); const new_size = node.flags.alignment.forward(@intCast(requested_size)); // Resize the entire file if (ni == Node.Index.root) { try mf.ensureCapacityForSetLocation(gpa); - try mf.file.setEndPos(new_size); + try mf.file.setLength(io, new_size); try mf.ensureTotalCapacity(@intCast(new_size)); ni.setLocationAssumeCapacity(mf, old_offset, new_size); return; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 80a248eb6f..489fb8e6fe 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -3002,11 +3002,11 @@ pub fn createEmpty( wasm.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{ .truncate = true, .read = true, - .mode = if (Io.File.Permissions.has_executable_bit) + .permissions = if (Io.File.Permissions.has_executable_bit) if (target.os.tag == .wasi and output_mode == .Exe) - Io.File.default_mode | 0b001_000_000 + .executable_file else - Io.File.default_mode + .default_file else 0, }); diff --git a/src/main.zig b/src/main.zig index 2eebb6d060..47e5af23da 100644 --- a/src/main.zig +++ b/src/main.zig @@ -335,19 +335,20 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { } else if (mem.eql(u8, cmd, "targets")) { dev.check(.targets_command); const host = std.zig.resolveTargetQueryOrFatal(io, .{}); - var stdout_writer = Io.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer); try @import("print_targets.zig").cmdTargets(arena, io, cmd_args, &stdout_writer.interface, &host); return stdout_writer.interface.flush(); } else if (mem.eql(u8, cmd, "version")) { dev.check(.version_command); - try Io.File.stdout().writeAll(build_options.version ++ "\n"); + try Io.File.stdout().writeStreamingAll(io, build_options.version ++ "\n"); return; } else if (mem.eql(u8, cmd, "env")) { dev.check(.env_command); const host = std.zig.resolveTargetQueryOrFatal(io, .{}); - var stdout_writer = Io.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer); try @import("print_env.zig").cmdEnv( arena, + io, &stdout_writer.interface, args, if (native_os == .wasi) wasi_preopens, @@ -361,10 +362,10 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { }); } else if (mem.eql(u8, cmd, "zen")) { dev.check(.zen_command); - return Io.File.stdout().writeAll(info_zen); + return Io.File.stdout().writeStreamingAll(io, info_zen); } else if (mem.eql(u8, cmd, "help") or mem.eql(u8, cmd, "-h") or mem.eql(u8, cmd, "--help")) { dev.check(.help_command); - return Io.File.stdout().writeAll(usage); + return Io.File.stdout().writeStreamingAll(io, usage); } else if (mem.eql(u8, cmd, "ast-check")) { return cmdAstCheck(arena, io, cmd_args); } else if (mem.eql(u8, cmd, "detect-cpu")) { @@ -374,7 +375,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { } else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "dump-zir")) { return cmdDumpZir(arena, io, cmd_args); } else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "llvm-ints")) { - return cmdDumpLlvmInts(gpa, arena, cmd_args); + return cmdDumpLlvmInts(gpa, arena, io, cmd_args); } else { std.log.info("{s}", .{usage}); fatal("unknown command: {s}", .{args[1]}); @@ -701,7 +702,7 @@ const Emit = union(enum) { yes: []const u8, const OutputToCacheReason = enum { listen, @"zig run", @"zig test" }; - fn resolve(io: Io, emit: Emit, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit { + fn resolve(emit: Emit, io: Io, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit { return switch (emit) { .no => .no, .yes_default_path => if (output_to_cache != null) .yes_cache else .{ .yes_path = default_basename }, @@ -1036,7 +1037,7 @@ fn buildOutputType( fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err }); } else if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - try Io.File.stdout().writeAll(usage_build_generic); + try Io.File.stdout().writeStreamingAll(io, usage_build_generic); return cleanExit(); } else if (mem.eql(u8, arg, "--")) { if (arg_mode == .run) { @@ -1858,9 +1859,7 @@ fn buildOutputType( var must_link = false; var file_ext: ?Compilation.FileExt = null; while (it.has_next) { - it.next() catch |err| { - fatal("unable to parse command line parameters: {s}", .{@errorName(err)}); - }; + it.next(io) catch |err| fatal("unable to parse command line parameters: {t}", .{err}); switch (it.zig_equivalent) { .target => target_arch_os_abi = it.only_arg, // example: -target riscv64-linux-unknown .o => { @@ -2836,9 +2835,9 @@ fn buildOutputType( } else if (mem.eql(u8, arg, "-V")) { warn("ignoring request for supported emulations: unimplemented", .{}); } else if (mem.eql(u8, arg, "-v")) { - try Io.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n"); + try Io.File.stdout().writeStreamingAll(io, "zig ld " ++ build_options.version ++ "\n"); } else if (mem.eql(u8, arg, "--version")) { - try Io.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n"); + try Io.File.stdout().writeStreamingAll(io, "zig ld " ++ build_options.version ++ "\n"); process.exit(0); } else { fatal("unsupported linker arg: {s}", .{arg}); @@ -3077,14 +3076,13 @@ fn buildOutputType( const self_exe_path = switch (native_os) { .wasi => {}, - else => fs.selfExePathAlloc(arena) catch |err| { - fatal("unable to find zig self exe path: {s}", .{@errorName(err)}); - }, + else => process.executablePathAlloc(io, arena) catch |err| fatal("unable to find zig self exe path: {t}", .{err}), }; // This `init` calls `fatal` on error. var dirs: Compilation.Directories = .init( arena, + io, override_lib_dir, override_global_cache_dir, s: { @@ -3097,11 +3095,9 @@ fn buildOutputType( if (native_os == .wasi) wasi_preopens, self_exe_path, ); - defer dirs.deinit(); + defer dirs.deinit(io); - if (linker_optimization) |o| { - warn("ignoring deprecated linker optimization setting '{s}'", .{o}); - } + if (linker_optimization) |o| warn("ignoring deprecated linker optimization setting '{s}'", .{o}); create_module.dirs = dirs; create_module.opts.emit_llvm_ir = emit_llvm_ir != .no; @@ -3324,18 +3320,18 @@ fn buildOutputType( }; const default_h_basename = try std.fmt.allocPrint(arena, "{s}.h", .{root_name}); - const emit_h_resolved = emit_h.resolve(default_h_basename, output_to_cache); + const emit_h_resolved = emit_h.resolve(io, default_h_basename, output_to_cache); const default_asm_basename = try std.fmt.allocPrint(arena, "{s}.s", .{root_name}); - const emit_asm_resolved = emit_asm.resolve(default_asm_basename, output_to_cache); + const emit_asm_resolved = emit_asm.resolve(io, default_asm_basename, output_to_cache); const default_llvm_ir_basename = try std.fmt.allocPrint(arena, "{s}.ll", .{root_name}); - const emit_llvm_ir_resolved = emit_llvm_ir.resolve(default_llvm_ir_basename, output_to_cache); + const emit_llvm_ir_resolved = emit_llvm_ir.resolve(io, default_llvm_ir_basename, output_to_cache); const default_llvm_bc_basename = try std.fmt.allocPrint(arena, "{s}.bc", .{root_name}); - const emit_llvm_bc_resolved = emit_llvm_bc.resolve(default_llvm_bc_basename, output_to_cache); + const emit_llvm_bc_resolved = emit_llvm_bc.resolve(io, default_llvm_bc_basename, output_to_cache); - const emit_docs_resolved = emit_docs.resolve("docs", output_to_cache); + const emit_docs_resolved = emit_docs.resolve(io, "docs", output_to_cache); const is_exe_or_dyn_lib = switch (create_module.resolved_options.output_mode) { .Obj => false, @@ -3356,7 +3352,7 @@ fn buildOutputType( const default_implib_basename = try std.fmt.allocPrint(arena, "{s}.lib", .{root_name}); const emit_implib_resolved: Compilation.CreateOptions.Emit = switch (emit_implib) { .no => .no, - .yes => emit_implib.resolve(default_implib_basename, output_to_cache), + .yes => emit_implib.resolve(io, default_implib_basename, output_to_cache), .yes_default_path => emit: { if (output_to_cache != null) break :emit .yes_cache; const p = try fs.path.join(arena, &.{ @@ -3399,7 +3395,7 @@ fn buildOutputType( // for the hashing algorithm here and in the cache are the same. // We are providing our own cache key, because this file has nothing // to do with the cache manifest. - var file_writer = f.writer(&.{}); + var file_writer = f.writer(io, &.{}); var buffer: [1000]u8 = undefined; var hasher = file_writer.interface.hashed(Cache.Hasher.init("0123456789abcdef"), &buffer); var stdin_reader = Io.File.stdin().readerStreaming(io, &.{}); @@ -3633,13 +3629,13 @@ fn buildOutputType( if (show_builtin) { const builtin_opts = comp.root_mod.getBuiltinOptions(comp.config); const source = try builtin_opts.generate(arena); - return Io.File.stdout().writeAll(source); + return Io.File.stdout().writeStreamingAll(io, source); } switch (listen) { .none => {}, .stdio => { var stdin_reader = Io.File.stdin().reader(io, &stdin_buffer); - var stdout_writer = Io.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer); try serve( comp, &stdin_reader.interface, @@ -3930,11 +3926,8 @@ fn createModule( } if (target.isMinGW()) { - const exists = mingw.libExists(arena, target, create_module.dirs.zig_lib, lib_name) catch |err| { - fatal("failed to check zig installation for DLL import libs: {s}", .{ - @errorName(err), - }); - }; + const exists = mingw.libExists(arena, io, target, create_module.dirs.zig_lib, lib_name) catch |err| + fatal("failed to check zig installation for DLL import libs: {t}", .{err}); if (exists) { try create_module.windows_libs.put(arena, lib_name, {}); continue; @@ -4009,11 +4002,8 @@ fn createModule( } if (create_module.libc_paths_file) |paths_file| { - create_module.libc_installation = LibCInstallation.parse(arena, paths_file, target) catch |err| { - fatal("unable to parse libc paths file at path {s}: {s}", .{ - paths_file, @errorName(err), - }); - }; + create_module.libc_installation = LibCInstallation.parse(arena, io, paths_file, target) catch |err| + fatal("unable to parse libc paths file at path {s}: {t}", .{ paths_file, err }); } if (target.os.tag == .windows and (target.abi == .msvc or target.abi == .itanium) and @@ -4024,7 +4014,7 @@ fn createModule( .verbose = true, .target = target, }) catch |err| { - fatal("unable to find native libc installation: {s}", .{@errorName(err)}); + fatal("unable to find native libc installation: {t}", .{err}); }; } try create_module.lib_directories.ensureUnusedCapacity(arena, 2); @@ -4163,7 +4153,7 @@ fn serve( var child_pid: ?std.process.Child.Id = null; - const main_progress_node = std.Progress.start(.{}); + const main_progress_node = std.Progress.start(io, .{}); const file_system_inputs = comp.file_system_inputs.?; const IncrementalDebugServer = if (build_options.enable_debug_extensions and !builtin.single_threaded) @@ -4694,7 +4684,7 @@ fn cmdTranslateC( }); }; defer zig_file.close(io); - var stdout_writer = Io.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer); var file_reader = zig_file.reader(io, &.{}); _ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited); try stdout_writer.interface.flush(); @@ -4744,7 +4734,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! if (mem.eql(u8, arg, "-m") or mem.eql(u8, arg, "--minimal")) { template = .minimal; } else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - try Io.File.stdout().writeAll(usage_init); + try Io.File.stdout().writeStreamingAll(io, usage_init); return cleanExit(); } else { fatal("unrecognized parameter: '{s}'", .{arg}); @@ -4764,7 +4754,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! switch (template) { .example => { var templates = findTemplates(gpa, arena, io); - defer templates.deinit(); + defer templates.deinit(io); const s = fs.path.sep_str; const template_paths = [_][]const u8{ @@ -4898,7 +4888,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) const argv_index_exe = child_argv.items.len; _ = try child_argv.addOne(); - const self_exe_path = try fs.selfExePathAlloc(arena); + const self_exe_path = try process.executablePathAlloc(io, arena); try child_argv.append(self_exe_path); const argv_index_zig_lib_dir = child_argv.items.len; @@ -5079,7 +5069,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) const work_around_btrfs_bug = native_os == .linux and EnvVar.ZIG_BTRFS_WORKAROUND.isSet(); - const root_prog_node = std.Progress.start(.{ + const root_prog_node = std.Progress.start(io, .{ .disable_printing = (color == .off), .root_name = "Compile Build Script", }); @@ -5114,7 +5104,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) const paths_file = debug_libc_paths_file orelse break :lci null; if (!build_options.enable_debug_extensions) unreachable; const lci = try arena.create(LibCInstallation); - lci.* = try .parse(arena, paths_file, &resolved_target.result); + lci.* = try .parse(arena, io, paths_file, &resolved_target.result); break :lci lci; }; @@ -5129,6 +5119,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) // This `init` calls `fatal` on error. var dirs: Compilation.Directories = .init( arena, + io, override_lib_dir, override_global_cache_dir, .{ .override = path: { @@ -5138,7 +5129,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) {}, self_exe_path, ); - defer dirs.deinit(); + defer dirs.deinit(io); child_argv.items[argv_index_zig_lib_dir] = dirs.zig_lib.path orelse cwd_path; child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path; @@ -5421,11 +5412,10 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) child.stderr_behavior = .Inherit; const term = t: { - std.debug.lockStdErr(); - defer std.debug.unlockStdErr(); - break :t child.spawnAndWait(io) catch |err| { + _ = std.debug.lockStderrWriter(&.{}); + defer std.debug.unlockStderrWriter(); + break :t child.spawnAndWait(io) catch |err| fatal("failed to spawn build runner {s}: {t}", .{ child_argv.items[0], err }); - }; }; switch (term) { @@ -5517,7 +5507,7 @@ fn jitCmd( dev.check(.jit_command); const color: Color = .auto; - const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(.{ + const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(io, .{ .disable_printing = (color == .off), }); @@ -5529,9 +5519,8 @@ fn jitCmd( .is_explicit_dynamic_linker = false, }; - const self_exe_path = fs.selfExePathAlloc(arena) catch |err| { - fatal("unable to find self exe path: {s}", .{@errorName(err)}); - }; + const self_exe_path = process.executablePathAlloc(io, arena) catch |err| + fatal("unable to find self exe path: {t}", .{err}); const optimize_mode: std.builtin.OptimizeMode = if (EnvVar.ZIG_DEBUG_CMD.isSet()) .Debug @@ -5544,13 +5533,14 @@ fn jitCmd( // This `init` calls `fatal` on error. var dirs: Compilation.Directories = .init( arena, + io, override_lib_dir, override_global_cache_dir, .global, if (native_os == .wasi) wasi_preopens, self_exe_path, ); - defer dirs.deinit(); + defer dirs.deinit(io); const thread_limit = @min( @max(std.Thread.getCpuCount() catch 1, 1), @@ -5629,7 +5619,7 @@ fn jitCmd( defer comp.destroy(); if (options.server) { - var stdout_writer = Io.File.stdout().writer(&stdout_buffer); + var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer); var server: std.zig.Server = .{ .out = &stdout_writer.interface, .in = undefined, // won't be receiving messages @@ -5696,7 +5686,7 @@ fn jitCmd( ptr.* = try stdout_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32))); } - const term = try child.wait(); + const term = try child.wait(io); switch (term) { .Exited => |code| { if (code == 0) { @@ -6160,7 +6150,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - try Io.File.stdout().writeAll(usage_ast_check); + try Io.File.stdout().writeStreamingAll(io, usage_ast_check); return cleanExit(); } else if (mem.eql(u8, arg, "-t")) { want_output_text = true; @@ -6211,7 +6201,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void { const tree = try Ast.parse(arena, source, mode); - var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer); + var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer); const stdout_bw = &stdout_writer.interface; switch (mode) { .zig => { @@ -6334,7 +6324,7 @@ fn cmdDetectCpu(io: Io, args: []const []const u8) !void { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - try Io.File.stdout().writeAll(detect_cpu_usage); + try Io.File.stdout().writeStreamingAll(io, detect_cpu_usage); return cleanExit(); } else if (mem.eql(u8, arg, "--llvm")) { use_llvm = true; @@ -6355,10 +6345,10 @@ fn cmdDetectCpu(io: Io, args: []const []const u8) !void { const name = llvm.GetHostCPUName() orelse fatal("LLVM could not figure out the host cpu name", .{}); const features = llvm.GetHostCPUFeatures() orelse fatal("LLVM could not figure out the host cpu feature set", .{}); const cpu = try detectNativeCpuWithLLVM(builtin.cpu.arch, name, features); - try printCpu(cpu); + try printCpu(io, cpu); } else { const host_target = std.zig.resolveTargetQueryOrFatal(io, .{}); - try printCpu(host_target.cpu); + try printCpu(io, host_target.cpu); } } @@ -6425,8 +6415,8 @@ fn detectNativeCpuWithLLVM( return result; } -fn printCpu(cpu: std.Target.Cpu) !void { - var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer); +fn printCpu(io: Io, cpu: std.Target.Cpu) !void { + var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer); const stdout_bw = &stdout_writer.interface; if (cpu.model.llvm_name) |llvm_name| { @@ -6448,6 +6438,7 @@ fn printCpu(cpu: std.Target.Cpu) !void { fn cmdDumpLlvmInts( gpa: Allocator, arena: Allocator, + io: Io, args: []const []const u8, ) !void { dev.check(.llvm_ints_command); @@ -6475,7 +6466,7 @@ fn cmdDumpLlvmInts( const dl = tm.createTargetDataLayout(); const context = llvm.Context.create(); - var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer); + var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer); const stdout_bw = &stdout_writer.interface; for ([_]u16{ 1, 8, 16, 32, 64, 128, 256 }) |bits| { const int_type = context.intType(bits); @@ -6501,7 +6492,7 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void { defer f.close(io); const zir = try Zcu.loadZirCache(arena, io, f); - var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer); + var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer); const stdout_bw = &stdout_writer.interface; { const instruction_bytes = zir.instructions.len * @@ -6585,7 +6576,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void { var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty; try Zcu.mapOldZirToNew(arena, old_zir, new_zir, &inst_map); - var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer); + var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer); const stdout_bw = &stdout_writer.interface; { try stdout_bw.print("Instruction mappings:\n", .{}); @@ -6917,7 +6908,7 @@ fn cmdFetch( const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - try Io.File.stdout().writeAll(usage_fetch); + try Io.File.stdout().writeStreamingAll(io, usage_fetch); return cleanExit(); } else if (mem.eql(u8, arg, "--global-cache-dir")) { if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); @@ -6951,7 +6942,7 @@ fn cmdFetch( try http_client.initDefaultProxies(arena); - var root_prog_node = std.Progress.start(.{ + var root_prog_node = std.Progress.start(io, .{ .root_name = "Fetch", }); defer root_prog_node.end(); @@ -6959,7 +6950,7 @@ fn cmdFetch( var global_cache_directory: Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); break :l .{ - .handle = try Io.Dir.cwd().makeOpenPath(p, .{}), + .handle = try Io.Dir.cwd().makeOpenPath(io, p, .{}), .path = p, }; }; @@ -7026,7 +7017,7 @@ fn cmdFetch( const name = switch (save) { .no => { - var stdout = Io.File.stdout().writerStreaming(&stdout_buffer); + var stdout = Io.File.stdout().writerStreaming(io, &stdout_buffer); try stdout.interface.print("{s}\n", .{package_hash_slice}); try stdout.interface.flush(); return cleanExit(); @@ -7044,7 +7035,7 @@ fn cmdFetch( var build_root = try findBuildRoot(arena, io, .{ .cwd_path = cwd_path, }); - defer build_root.deinit(); + defer build_root.deinit(io); // The name to use in case the manifest file needs to be created now. const init_root_name = fs.path.basename(build_root.directory.path orelse cwd_path); @@ -7205,7 +7196,7 @@ fn createDependenciesModule( const rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int); { - var tmp_dir = try dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}); + var tmp_dir = try dirs.local_cache.handle.makeOpenPath(io, tmp_dir_sub_path, .{}); defer tmp_dir.close(io); try tmp_dir.writeFile(io, .{ .sub_path = basename, .data = source }); } @@ -7446,28 +7437,28 @@ fn writeSimpleTemplateFile(io: Io, file_name: []const u8, comptime fmt: []const const f = try Io.Dir.cwd().createFile(io, file_name, .{ .exclusive = true }); defer f.close(io); var buf: [4096]u8 = undefined; - var fw = f.writer(&buf); + var fw = f.writer(io, &buf); try fw.interface.print(fmt, args); try fw.interface.flush(); } fn findTemplates(gpa: Allocator, arena: Allocator, io: Io) Templates { const cwd_path = introspect.getResolvedCwd(arena) catch |err| { - fatal("unable to get cwd: {s}", .{@errorName(err)}); + fatal("unable to get cwd: {t}", .{err}); }; - const self_exe_path = fs.selfExePathAlloc(arena) catch |err| { - fatal("unable to find self exe path: {s}", .{@errorName(err)}); + const self_exe_path = process.executablePathAlloc(io, arena) catch |err| { + fatal("unable to find self exe path: {t}", .{err}); }; var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, io, cwd_path, self_exe_path) catch |err| { - fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) }); + fatal("unable to find zig installation directory '{s}': {t}", .{ self_exe_path, err }); }; const s = fs.path.sep_str; const template_sub_path = "init"; const template_dir = zig_lib_directory.handle.openDir(io, template_sub_path, .{}) catch |err| { const path = zig_lib_directory.path orelse "."; - fatal("unable to open zig project template directory '{s}{s}{s}': {s}", .{ - path, s, template_sub_path, @errorName(err), + fatal("unable to open zig project template directory '{s}{s}{s}': {t}", .{ + path, s, template_sub_path, err, }); }; diff --git a/src/print_env.zig b/src/print_env.zig index e1847688ad..3540a58d18 100644 --- a/src/print_env.zig +++ b/src/print_env.zig @@ -1,13 +1,17 @@ -const std = @import("std"); const builtin = @import("builtin"); -const build_options = @import("build_options"); -const Compilation = @import("Compilation.zig"); + +const std = @import("std"); +const Io = std.Io; const Allocator = std.mem.Allocator; const EnvVar = std.zig.EnvVar; const fatal = std.process.fatal; +const build_options = @import("build_options"); +const Compilation = @import("Compilation.zig"); + pub fn cmdEnv( arena: Allocator, + io: Io, out: *std.Io.Writer, args: []const []const u8, wasi_preopens: switch (builtin.target.os.tag) { @@ -21,20 +25,21 @@ pub fn cmdEnv( const self_exe_path = switch (builtin.target.os.tag) { .wasi => args[0], - else => std.fs.selfExePathAlloc(arena) catch |err| { - fatal("unable to find zig self exe path: {s}", .{@errorName(err)}); + else => std.process.executablePathAlloc(io, arena) catch |err| { + fatal("unable to find zig self exe path: {t}", .{err}); }, }; var dirs: Compilation.Directories = .init( arena, + io, override_lib_dir, override_global_cache_dir, .global, if (builtin.target.os.tag == .wasi) wasi_preopens, if (builtin.target.os.tag != .wasi) self_exe_path, ); - defer dirs.deinit(); + defer dirs.deinit(io); const zig_lib_dir = dirs.zig_lib.path orelse ""; const zig_std_dir = try dirs.zig_lib.join(arena, &.{"std"}); diff --git a/src/print_targets.zig b/src/print_targets.zig index 2f80187de1..a5e89506ad 100644 --- a/src/print_targets.zig +++ b/src/print_targets.zig @@ -1,14 +1,16 @@ const std = @import("std"); +const Io = std.Io; const fs = std.fs; const mem = std.mem; const meta = std.meta; const fatal = std.process.fatal; const Allocator = std.mem.Allocator; const Target = std.Target; -const target = @import("target.zig"); const assert = std.debug.assert; + const glibc = @import("libs/glibc.zig"); const introspect = @import("introspect.zig"); +const target = @import("target.zig"); pub fn cmdTargets( allocator: Allocator, @@ -18,19 +20,19 @@ pub fn cmdTargets( native_target: *const Target, ) !void { _ = args; - var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| { - fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)}); - }; + var zig_lib_directory = introspect.findZigLibDir(allocator, io) catch |err| + fatal("unable to find zig installation directory: {t}", .{err}); defer zig_lib_directory.handle.close(io); defer allocator.free(zig_lib_directory.path.?); const abilists_contents = zig_lib_directory.handle.readFileAlloc( + io, glibc.abilists_path, allocator, .limited(glibc.abilists_max_size), ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - else => fatal("unable to read " ++ glibc.abilists_path ++ ": {s}", .{@errorName(err)}), + else => fatal("unable to read " ++ glibc.abilists_path ++ ": {t}", .{err}), }; defer allocator.free(abilists_contents); @@ -49,9 +51,7 @@ pub fn cmdTargets( { var libc_obj = try root_obj.beginTupleField("libc", .{}); for (std.zig.target.available_libcs) |libc| { - const tmp = try std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ - @tagName(libc.arch), @tagName(libc.os), @tagName(libc.abi), - }); + const tmp = try std.fmt.allocPrint(allocator, "{t}-{t}-{t}", .{ libc.arch, libc.os, libc.abi }); defer allocator.free(tmp); try libc_obj.field(tmp, .{}); } diff --git a/test/standalone/self_exe_symlink/main.zig b/test/standalone/self_exe_symlink/main.zig index 4df5b02720..dfe3acb7a0 100644 --- a/test/standalone/self_exe_symlink/main.zig +++ b/test/standalone/self_exe_symlink/main.zig @@ -9,7 +9,7 @@ pub fn main() !void { defer threaded.deinit(); const io = threaded.io(); - const self_path = try std.fs.selfExePathAlloc(gpa); + const self_path = try std.process.executablePathAlloc(io, gpa); defer gpa.free(self_path); var self_exe = try std.fs.openSelfExe(.{}); -- cgit v1.2.3 From 7ce5ee2e92bf1bf1f39ccc08df19f9a1044e9f2c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 18 Dec 2025 22:03:10 -0800 Subject: std: update remaining unit tests for std.Io API changes --- lib/std/Build/Cache.zig | 12 ++++++------ lib/std/Io/File/Reader.zig | 4 ++-- lib/std/Io/File/Writer.zig | 8 ++++++-- lib/std/Io/test.zig | 39 ++++++++++++++++++--------------------- lib/std/Thread.zig | 18 ++++++++++-------- lib/std/debug.zig | 9 +++++---- 6 files changed, 47 insertions(+), 43 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index e2c848b6fd..43f8691000 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1384,8 +1384,8 @@ test "check that changing a file makes cache fail" { try tmp.dir.writeFile(io, .{ .sub_path = temp_file, .data = original_temp_file_contents }); // Wait for file timestamps to tick - const initial_time = try testGetCurrentFileTimestamp(tmp.dir); - while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) { + const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir); + while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time.nanoseconds) { try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io); } @@ -1502,8 +1502,8 @@ test "Manifest with files added after initial hash work" { try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second!\n" }); // Wait for file timestamps to tick - const initial_time = try testGetCurrentFileTimestamp(tmp.dir); - while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) { + const initial_time = try testGetCurrentFileTimestamp(io, tmp.dir); + while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time.nanoseconds) { try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io); } @@ -1553,8 +1553,8 @@ test "Manifest with files added after initial hash work" { try tmp.dir.writeFile(io, .{ .sub_path = temp_file2, .data = "Hello world the second, updated\n" }); // Wait for file timestamps to tick - const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir); - while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time2.nanoseconds) { + const initial_time2 = try testGetCurrentFileTimestamp(io, tmp.dir); + while ((try testGetCurrentFileTimestamp(io, tmp.dir)).nanoseconds == initial_time2.nanoseconds) { try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io); } diff --git a/lib/std/Io/File/Reader.zig b/lib/std/Io/File/Reader.zig index 2359fad722..0c573c9ae1 100644 --- a/lib/std/Io/File/Reader.zig +++ b/lib/std/Io/File/Reader.zig @@ -18,8 +18,8 @@ io: Io, file: File, err: ?Error = null, mode: Mode = .positional, -/// Tracks the true seek position in the file. To obtain the logical -/// position, use `logicalPos`. +/// Tracks the true seek position in the file. To obtain the logical position, +/// use `logicalPos`. pos: u64 = 0, size: ?u64 = null, size_err: ?SizeError = null, diff --git a/lib/std/Io/File/Writer.zig b/lib/std/Io/File/Writer.zig index 56a1c09340..3487416719 100644 --- a/lib/std/Io/File/Writer.zig +++ b/lib/std/Io/File/Writer.zig @@ -11,8 +11,8 @@ io: Io, file: File, err: ?Error = null, mode: Mode = .positional, -/// Tracks the true seek position in the file. To obtain the logical -/// position, add the buffer size to this value. +/// Tracks the true seek position in the file. To obtain the logical position, +/// use `logicalPos`. pos: u64 = 0, write_file_err: ?WriteFileError = null, seek_err: ?SeekError = null, @@ -221,6 +221,10 @@ pub fn seekTo(w: *Writer, offset: u64) (SeekError || Io.Writer.Error)!void { try seekToUnbuffered(w, offset); } +pub fn logicalPos(w: *const Writer) u64 { + return w.pos + w.interface.end; +} + /// Asserts that no data is currently buffered. pub fn seekToUnbuffered(w: *Writer, offset: u64) SeekError!void { assert(w.interface.buffered().len == 0); diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig index 8f32555b52..ef0d45d953 100644 --- a/lib/std/Io/test.zig +++ b/lib/std/Io/test.zig @@ -64,33 +64,28 @@ test "write a file, read it, then delete it" { try tmp.dir.deleteFile(io, tmp_file_name); } -test "File seek ops" { +test "File.Writer.seekTo" { var tmp = tmpDir(.{}); defer tmp.cleanup(); const io = testing.io; + var data: [8192]u8 = undefined; + @memset(&data, 0x55); + const tmp_file_name = "temp_test_file.txt"; var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); - try file.writeAll(&([_]u8{0x55} ** 8192)); - - // Seek to the end - try file.seekFromEnd(0); - try expect((try file.getPos()) == try file.length(io)); - // Negative delta - try file.seekBy(-4096); - try expect((try file.getPos()) == 4096); - // Positive delta - try file.seekBy(10); - try expect((try file.getPos()) == 4106); - // Absolute position - try file.seekTo(1234); - try expect((try file.getPos()) == 1234); + var fw = file.writerStreaming(io, &.{}); + + try fw.interface.writeAll(&data); + try expect(fw.logicalPos() == try file.length(io)); + try fw.seekTo(1234); + try expect(fw.logicalPos() == 1234); } -test "setLength" { +test "File.setLength" { const io = testing.io; var tmp = tmpDir(.{}); @@ -100,19 +95,21 @@ test "setLength" { var file = try tmp.dir.createFile(io, tmp_file_name, .{}); defer file.close(io); + var fw = file.writerStreaming(io, &.{}); + // Verify that the file size changes and the file offset is not moved try expect((try file.length(io)) == 0); - try expect((try file.getPos()) == 0); + try expect(fw.logicalPos() == 0); try file.setLength(io, 8192); try expect((try file.length(io)) == 8192); - try expect((try file.getPos()) == 0); - try file.seekTo(100); + try expect(fw.logicalPos() == 0); + try fw.seekTo(100); try file.setLength(io, 4096); try expect((try file.length(io)) == 4096); - try expect((try file.getPos()) == 100); + try expect(fw.logicalPos() == 100); try file.setLength(io, 0); try expect((try file.length(io)) == 0); - try expect((try file.getPos()) == 100); + try expect(fw.logicalPos() == 100); } test "legacy setLength" { diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 8453bc4c81..fbce1cd000 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -211,7 +211,7 @@ pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void { const file = try Io.Dir.cwd().openFile(io, path, .{ .mode = .write_only }); defer file.close(io); - try file.writeAll(name); + try file.writeStreamingAll(io, name); return; }, .windows => { @@ -1676,14 +1676,14 @@ const LinuxThreadImpl = struct { } }; -fn testThreadName(thread: *Thread) !void { +fn testThreadName(io: Io, thread: *Thread) !void { const testCases = &[_][]const u8{ "mythread", "b" ** max_name_len, }; inline for (testCases) |tc| { - try thread.setName(tc); + try thread.setName(io, tc); var name_buffer: [max_name_len:0]u8 = undefined; @@ -1698,6 +1698,8 @@ fn testThreadName(thread: *Thread) !void { test "setName, getName" { if (builtin.single_threaded) return error.SkipZigTest; + const io = testing.io; + const Context = struct { start_wait_event: ResetEvent = .unset, test_done_event: ResetEvent = .unset, @@ -1711,11 +1713,11 @@ test "setName, getName" { ctx.start_wait_event.wait(); switch (native_os) { - .windows => testThreadName(&ctx.thread) catch |err| switch (err) { + .windows => testThreadName(io, &ctx.thread) catch |err| switch (err) { error.Unsupported => return error.SkipZigTest, else => return err, }, - else => try testThreadName(&ctx.thread), + else => try testThreadName(io, &ctx.thread), } // Signal our test is done @@ -1735,14 +1737,14 @@ test "setName, getName" { switch (native_os) { .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => { - const res = thread.setName("foobar"); + const res = thread.setName(io, "foobar"); try std.testing.expectError(error.Unsupported, res); }, - .windows => testThreadName(&thread) catch |err| switch (err) { + .windows => testThreadName(io, &thread) catch |err| switch (err) { error.Unsupported => return error.SkipZigTest, else => return err, }, - else => try testThreadName(&thread), + else => try testThreadName(io, &thread), } context.thread_done_event.set(); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 3850ec3e8c..cef4233495 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -384,12 +384,13 @@ pub fn dumpHexFallible(t: Io.Terminal, bytes: []const u8) !void { } test dumpHexFallible { + const gpa = testing.allocator; const bytes: []const u8 = &.{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x01, 0x12, 0x13 }; - var aw: Writer.Allocating = .init(testing.allocator); + var aw: Writer.Allocating = .init(gpa); defer aw.deinit(); - try dumpHexFallible(&aw.writer, .no_color, bytes); - const expected = try std.fmt.allocPrint(testing.allocator, + try dumpHexFallible(.{ .writer = &aw.writer, .mode = .no_color }, bytes); + const expected = try std.fmt.allocPrint(gpa, \\{x:0>[2]} 00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF .."3DUfw........ \\{x:0>[2]} 01 12 13 ... \\ @@ -398,7 +399,7 @@ test dumpHexFallible { @intFromPtr(bytes.ptr) + 16, @sizeOf(usize) * 2, }); - defer testing.allocator.free(expected); + defer gpa.free(expected); try testing.expectEqualStrings(expected, aw.written()); } -- cgit v1.2.3 From a8088306f6223b07ad9b7ae37486bcc9e0ac08c9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 19 Dec 2025 16:42:28 -0800 Subject: std: rename other Dir "make" functions to "create" --- lib/compiler/build_runner.zig | 6 +-- lib/compiler/translate-c/main.zig | 2 +- lib/std/Build.zig | 6 +-- lib/std/Build/Cache.zig | 8 ++-- lib/std/Build/Cache/Path.zig | 8 ++-- lib/std/Build/Step.zig | 6 +-- lib/std/Build/Step/Compile.zig | 4 +- lib/std/Build/Step/ConfigHeader.zig | 2 +- lib/std/Build/Step/ObjCopy.zig | 2 +- lib/std/Build/Step/Options.zig | 4 +- lib/std/Build/Step/Run.zig | 6 +-- lib/std/Build/Step/UpdateSourceFiles.zig | 2 +- lib/std/Build/Step/WriteFile.zig | 8 ++-- lib/std/Io.zig | 6 +-- lib/std/Io/Dir.zig | 40 +++++++++---------- lib/std/Io/Kqueue.zig | 12 +++--- lib/std/Io/Threaded.zig | 65 +++++++++++++++---------------- lib/std/fs/test.zig | 66 ++++++++++++++++---------------- lib/std/posix.zig | 2 +- lib/std/tar.zig | 6 +-- lib/std/testing.zig | 6 +-- lib/std/zip.zig | 4 +- src/Compilation.zig | 34 ++++++++-------- src/Package/Fetch.zig | 12 +++--- src/Package/Fetch/git.zig | 4 +- src/libs/freebsd.zig | 4 +- src/libs/glibc.zig | 4 +- src/libs/mingw.zig | 4 +- src/libs/netbsd.zig | 4 +- src/link/MachO.zig | 2 +- src/main.zig | 8 ++-- 31 files changed, 171 insertions(+), 176 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 1c30ecaadc..cfe0fee78f 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -66,12 +66,12 @@ pub fn main() !void { const local_cache_directory: std.Build.Cache.Directory = .{ .path = cache_root, - .handle = try cwd.makeOpenPath(io, cache_root, .{}), + .handle = try cwd.createDirPathOpen(io, cache_root, .{}), }; const global_cache_directory: std.Build.Cache.Directory = .{ .path = global_cache_root, - .handle = try cwd.makeOpenPath(io, global_cache_root, .{}), + .handle = try cwd.createDirPathOpen(io, global_cache_root, .{}), }; var graph: std.Build.Graph = .{ @@ -80,7 +80,7 @@ pub fn main() !void { .cache = .{ .io = io, .gpa = arena, - .manifest_dir = try local_cache_directory.handle.makeOpenPath(io, "h", .{}), + .manifest_dir = try local_cache_directory.handle.createDirPathOpen(io, "h", .{}), }, .zig_exe = zig_exe, .env_map = try process.getEnvMap(arena), diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig index d02f21a2a8..80dc67fbfe 100644 --- a/lib/compiler/translate-c/main.zig +++ b/lib/compiler/translate-c/main.zig @@ -253,7 +253,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration if (d.output_name) |path| blk: { if (std.mem.eql(u8, path, "-")) break :blk; if (std.fs.path.dirname(path)) |dirname| { - Io.Dir.cwd().makePath(io, dirname) catch |err| + Io.Dir.cwd().createDirPath(io, dirname) catch |err| return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) }); } out_file = Io.Dir.cwd().createFile(io, path, .{}) catch |err| { diff --git a/lib/std/Build.zig b/lib/std/Build.zig index cf0b9e5b0d..4b8909e689 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1701,14 +1701,14 @@ pub fn addCheckFile( return Step.CheckFile.create(b, file_source, options); } -pub fn truncateFile(b: *Build, dest_path: []const u8) (Io.Dir.MakeError || Io.Dir.StatFileError)!void { +pub fn truncateFile(b: *Build, dest_path: []const u8) (Io.Dir.CreateDirError || Io.Dir.StatFileError)!void { const io = b.graph.io; if (b.verbose) log.info("truncate {s}", .{dest_path}); const cwd = Io.Dir.cwd(); var src_file = cwd.createFile(io, dest_path, .{}) catch |err| switch (err) { error.FileNotFound => blk: { if (fs.path.dirname(dest_path)) |dirname| { - try cwd.makePath(io, dirname); + try cwd.createDirPath(io, dirname); } break :blk try cwd.createFile(io, dest_path, .{}); }, @@ -2654,7 +2654,7 @@ pub fn makeTempPath(b: *Build) []const u8 { const rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int); const result_path = b.cache_root.join(b.allocator, &.{tmp_dir_sub_path}) catch @panic("OOM"); - b.cache_root.handle.makePath(io, tmp_dir_sub_path) catch |err| { + b.cache_root.handle.createDirPath(io, tmp_dir_sub_path) catch |err| { std.debug.print("unable to make tmp path '{s}': {t}\n", .{ result_path, err }); }; return result_path; diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 43f8691000..396f204350 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -1330,7 +1330,7 @@ test "cache file and then recall it" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); @@ -1396,7 +1396,7 @@ test "check that changing a file makes cache fail" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); @@ -1456,7 +1456,7 @@ test "no file inputs" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); @@ -1515,7 +1515,7 @@ test "Manifest with files added after initial hash work" { var cache: Cache = .{ .io = io, .gpa = testing.allocator, - .manifest_dir = try tmp.dir.makeOpenPath(io, temp_manifest_dir, .{}), + .manifest_dir = try tmp.dir.createDirPathOpen(io, temp_manifest_dir, .{}), }; cache.addPrefix(.{ .path = null, .handle = tmp.dir }); defer cache.manifest_dir.close(io); diff --git a/lib/std/Build/Cache/Path.zig b/lib/std/Build/Cache/Path.zig index 3ef4dec658..2b7814c544 100644 --- a/lib/std/Build/Cache/Path.zig +++ b/lib/std/Build/Cache/Path.zig @@ -84,14 +84,14 @@ pub fn openDir( return p.root_dir.handle.openDir(io, joined_path, args); } -pub fn makeOpenPath(p: Path, io: Io, sub_path: []const u8, opts: Io.Dir.OpenOptions) !Io.Dir { +pub fn createDirPathOpen(p: Path, io: Io, sub_path: []const u8, opts: Io.Dir.OpenOptions) !Io.Dir { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ p.sub_path, sub_path, }) catch return error.NameTooLong; }; - return p.root_dir.handle.makeOpenPath(io, joined_path, opts); + return p.root_dir.handle.createDirPathOpen(io, joined_path, opts); } pub fn statFile(p: Path, io: Io, sub_path: []const u8) !Io.Dir.Stat { @@ -129,14 +129,14 @@ pub fn access(p: Path, io: Io, sub_path: []const u8, flags: Io.Dir.AccessOptions return p.root_dir.handle.access(io, joined_path, flags); } -pub fn makePath(p: Path, io: Io, sub_path: []const u8) !void { +pub fn createDirPath(p: Path, io: Io, sub_path: []const u8) !void { var buf: [fs.max_path_bytes]u8 = undefined; const joined_path = if (p.sub_path.len == 0) sub_path else p: { break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{ p.sub_path, sub_path, }) catch return error.NameTooLong; }; - return p.root_dir.handle.makePath(io, joined_path); + return p.root_dir.handle.createDirPath(io, joined_path); } pub fn toString(p: Path, allocator: Allocator) Allocator.Error![]u8 { diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 74b41634a7..243dee8604 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -516,12 +516,12 @@ pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u return s.fail("unable to update file from '{f}' to '{s}': {t}", .{ src_path, dest_path, err }); } -/// Wrapper around `Io.Dir.makePathStatus` that handles verbose and error output. -pub fn installDir(s: *Step, dest_path: []const u8) !Io.Dir.MakePathStatus { +/// Wrapper around `Io.Dir.createDirPathStatus` that handles verbose and error output. +pub fn installDir(s: *Step, dest_path: []const u8) !Io.Dir.CreatePathStatus { const b = s.owner; const io = b.graph.io; try handleVerbose(b, null, &.{ "install", "-d", dest_path }); - return Io.Dir.cwd().makePathStatus(io, dest_path, .default_dir) catch |err| + return Io.Dir.cwd().createDirPathStatus(io, dest_path, .default_dir) catch |err| return s.fail("unable to create dir '{s}': {t}", .{ dest_path, err }); } diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 4752046089..0454e5b79d 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -1669,7 +1669,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { args_length += arg.len + 1; // +1 to account for null terminator } if (args_length >= 30 * 1024) { - try b.cache_root.handle.makePath(io, "args"); + try b.cache_root.handle.createDirPath(io, "args"); const args_to_escape = zig_args.items[2..]; var escaped_args = try std.array_list.Managed([]const u8).initCapacity(arena, args_to_escape.len); @@ -1706,7 +1706,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { // The args file is already present from a previous run. } else |err| switch (err) { error.FileNotFound => { - try b.cache_root.handle.makePath(io, "tmp"); + try b.cache_root.handle.createDirPath(io, "tmp"); const rand_int = std.crypto.random.int(u64); const tmp_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int); try b.cache_root.handle.writeFile(io, .{ .sub_path = tmp_path, .data = args }); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 250bae5009..b55efc0da4 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -258,7 +258,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const sub_path = b.pathJoin(&.{ "o", &digest, config_header.include_path }); const sub_path_dirname = std.fs.path.dirname(sub_path).?; - b.cache_root.handle.makePath(io, sub_path_dirname) catch |err| { + b.cache_root.handle.createDirPath(io, sub_path_dirname) catch |err| { return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, sub_path_dirname, @errorName(err), }); diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index b81f59b9a1..ea0714adf9 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -177,7 +177,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const cache_path = "o" ++ fs.path.sep_str ++ digest; const full_dest_path = try b.cache_root.join(b.allocator, &.{ cache_path, objcopy.basename }); const full_dest_path_debug = try b.cache_root.join(b.allocator, &.{ cache_path, b.fmt("{s}.debug", .{objcopy.basename}) }); - b.cache_root.handle.makePath(io, cache_path) catch |err| { + b.cache_root.handle.createDirPath(io, cache_path) catch |err| { return step.fail("unable to make path {s}: {s}", .{ cache_path, @errorName(err) }); }; diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index 676ea4d851..610d417aea 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -477,7 +477,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { } else |outer_err| switch (outer_err) { error.FileNotFound => { const sub_dirname = fs.path.dirname(sub_path).?; - b.cache_root.handle.makePath(io, sub_dirname) catch |e| + b.cache_root.handle.createDirPath(io, sub_dirname) catch |e| return step.fail("unable to make path '{f}{s}': {t}", .{ b.cache_root, sub_dirname, e }); const rand_int = std.crypto.random.int(u64); @@ -486,7 +486,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void { basename; const tmp_sub_path_dirname = fs.path.dirname(tmp_sub_path).?; - b.cache_root.handle.makePath(io, tmp_sub_path_dirname) catch |err| { + b.cache_root.handle.createDirPath(io, tmp_sub_path_dirname) catch |err| { return step.fail("unable to make temporary directory '{f}{s}': {t}", .{ b.cache_root, tmp_sub_path_dirname, err, }); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 157a0292e7..c0ba7f0cee 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -975,7 +975,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .output_directory => output_sub_path, else => unreachable, }; - b.cache_root.handle.makePath(io, output_sub_dir_path) catch |err| { + b.cache_root.handle.createDirPath(io, output_sub_dir_path) catch |err| { return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, output_sub_dir_path, @errorName(err), }); @@ -1007,7 +1007,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { .output_directory => output_sub_path, else => unreachable, }; - b.cache_root.handle.makePath(io, output_sub_dir_path) catch |err| { + b.cache_root.handle.createDirPath(io, output_sub_dir_path) catch |err| { return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, output_sub_dir_path, @errorName(err), }); @@ -1439,7 +1439,7 @@ fn runCommand( const sub_path = b.pathJoin(&output_components); const sub_path_dirname = Dir.path.dirname(sub_path).?; - b.cache_root.handle.makePath(io, sub_path_dirname) catch |err| { + b.cache_root.handle.createDirPath(io, sub_path_dirname) catch |err| { return step.fail("unable to make path '{f}{s}': {s}", .{ b.cache_root, sub_path_dirname, @errorName(err), }); diff --git a/lib/std/Build/Step/UpdateSourceFiles.zig b/lib/std/Build/Step/UpdateSourceFiles.zig index eb8a6a85dd..1c4c94f9cf 100644 --- a/lib/std/Build/Step/UpdateSourceFiles.zig +++ b/lib/std/Build/Step/UpdateSourceFiles.zig @@ -78,7 +78,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { var any_miss = false; for (usf.output_source_files.items) |output_source_file| { if (fs.path.dirname(output_source_file.sub_path)) |dirname| { - b.build_root.handle.makePath(io, dirname) catch |err| { + b.build_root.handle.createDirPath(io, dirname) catch |err| { return step.fail("unable to make path '{f}{s}': {t}", .{ b.build_root, dirname, err }); }; } diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 53222289e6..145c7f9bb3 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -259,13 +259,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void { write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest }); - var cache_dir = b.cache_root.handle.makeOpenPath(io, cache_path, .{}) catch |err| + var cache_dir = b.cache_root.handle.createDirPathOpen(io, cache_path, .{}) catch |err| return step.fail("unable to make path '{f}{s}': {t}", .{ b.cache_root, cache_path, err }); defer cache_dir.close(io); for (write_file.files.items) |file| { if (fs.path.dirname(file.sub_path)) |dirname| { - cache_dir.makePath(io, dirname) catch |err| { + cache_dir.createDirPath(io, dirname) catch |err| { return step.fail("unable to make path '{f}{s}{c}{s}': {t}", .{ b.cache_root, cache_path, fs.path.sep, dirname, err, }); @@ -300,7 +300,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const dest_dirname = dir.sub_path; if (dest_dirname.len != 0) { - cache_dir.makePath(io, dest_dirname) catch |err| { + cache_dir.createDirPath(io, dest_dirname) catch |err| { return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{ b.cache_root, cache_path, fs.path.sep, dest_dirname, @errorName(err), }); @@ -315,7 +315,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const src_entry_path = try src_dir_path.join(arena, entry.path); const dest_path = b.pathJoin(&.{ dest_dirname, entry.path }); switch (entry.kind) { - .directory => try cache_dir.makePath(io, dest_path), + .directory => try cache_dir.createDirPath(io, dest_path), .file => { const prev_status = Io.Dir.updateFile( src_entry_path.root_dir.handle, diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 1d477d330e..ad26893651 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -659,9 +659,9 @@ pub const VTable = struct { futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void, futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, - dirMake: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.MakeError!void, - dirMakePath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.MakePathError!Dir.MakePathStatus, - dirMakeOpenPath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions, Dir.OpenOptions) Dir.MakeOpenPathError!Dir, + dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void, + dirCreateDirPath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirPathError!Dir.CreatePathStatus, + dirCreateDirPathOpen: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions, Dir.OpenOptions) Dir.CreateDirPathOpenError!Dir, dirOpenDir: *const fn (?*anyopaque, Dir, []const u8, Dir.OpenOptions) Dir.OpenError!Dir, dirStat: *const fn (?*anyopaque, Dir) Dir.StatError!Dir.Stat, dirStatFile: *const fn (?*anyopaque, Dir, []const u8, Dir.StatFileOptions) Dir.StatFileError!File.Stat, diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig index 78637015ba..54cce082da 100644 --- a/lib/std/Io/Dir.zig +++ b/lib/std/Io/Dir.zig @@ -590,7 +590,7 @@ pub fn updateFile( } if (path.dirname(dest_path)) |dirname| { - try dest_dir.makePath(io, dirname); + try dest_dir.createDirPath(io, dirname); } var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available. @@ -637,7 +637,7 @@ pub fn readFile(dir: Dir, io: Io, file_path: []const u8, buffer: []u8) ReadFileE return buffer[0..n]; } -pub const MakeError = error{ +pub const CreateDirError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to create a new directory relative to it. AccessDenied, @@ -663,10 +663,10 @@ pub const MakeError = error{ /// * On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding. /// /// Related: -/// * `makePath` +/// * `createDirPath` /// * `createDirAbsolute` -pub fn createDir(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions) MakeError!void { - return io.vtable.dirMake(io.userdata, dir, sub_path, permissions); +pub fn createDir(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions) CreateDirError!void { + return io.vtable.dirCreateDir(io.userdata, dir, sub_path, permissions); } /// Create a new directory, based on an absolute path. @@ -677,14 +677,14 @@ pub fn createDir(dir: Dir, io: Io, sub_path: []const u8, permissions: Permission /// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/). /// On WASI, `absolute_path` should be encoded as valid UTF-8. /// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding. -pub fn createDirAbsolute(io: Io, absolute_path: []const u8, permissions: Permissions) MakeError!void { +pub fn createDirAbsolute(io: Io, absolute_path: []const u8, permissions: Permissions) CreateDirError!void { assert(path.isAbsolute(absolute_path)); return createDir(.cwd(), io, absolute_path, permissions); } test createDirAbsolute {} -pub const MakePathError = MakeError || StatFileError; +pub const CreateDirPathError = CreateDirError || StatFileError; /// Creates parent directories with default permissions as necessary to ensure /// `sub_path` exists as a directory. @@ -710,27 +710,27 @@ pub const MakePathError = MakeError || StatFileError; /// and a `./second` directory. /// /// See also: -/// * `makePathStatus` -pub fn makePath(dir: Dir, io: Io, sub_path: []const u8) MakePathError!void { - _ = try io.vtable.dirMakePath(io.userdata, dir, sub_path, .default_dir); +/// * `createDirPathStatus` +pub fn createDirPath(dir: Dir, io: Io, sub_path: []const u8) CreateDirPathError!void { + _ = try io.vtable.dirCreateDirPath(io.userdata, dir, sub_path, .default_dir); } -pub const MakePathStatus = enum { existed, created }; +pub const CreatePathStatus = enum { existed, created }; -/// Same as `makePath` except returns whether the path already existed or was +/// Same as `createDirPath` except returns whether the path already existed or was /// successfully created. -pub fn makePathStatus(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions) MakePathError!MakePathStatus { - return io.vtable.dirMakePath(io.userdata, dir, sub_path, permissions); +pub fn createDirPathStatus(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions) CreateDirPathError!CreatePathStatus { + return io.vtable.dirCreateDirPath(io.userdata, dir, sub_path, permissions); } -pub const MakeOpenPathError = MakeError || OpenError || StatFileError; +pub const CreateDirPathOpenError = CreateDirError || OpenError || StatFileError; -pub const MakeOpenPathOptions = struct { +pub const CreateDirPathOpenOptions = struct { open_options: OpenOptions = .{}, permissions: Permissions = .default_dir, }; -/// Performs the equivalent of `makePath` followed by `openDir`, atomically if possible. +/// Performs the equivalent of `createDirPath` followed by `openDir`, atomically if possible. /// /// When this operation is canceled, it may leave the file system in a /// partially modified state. @@ -738,8 +738,8 @@ pub const MakeOpenPathOptions = struct { /// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/). /// On WASI, `sub_path` should be encoded as valid UTF-8. /// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding. -pub fn makeOpenPath(dir: Dir, io: Io, sub_path: []const u8, options: MakeOpenPathOptions) MakeOpenPathError!Dir { - return io.vtable.dirMakeOpenPath(io.userdata, dir, sub_path, options.permissions, options.open_options); +pub fn createDirPathOpen(dir: Dir, io: Io, sub_path: []const u8, options: CreateDirPathOpenOptions) CreateDirPathOpenError!Dir { + return io.vtable.dirCreateDirPathOpen(io.userdata, dir, sub_path, options.permissions, options.open_options); } pub const Stat = File.Stat; @@ -1729,7 +1729,7 @@ pub const AtomicFileOptions = struct { pub fn atomicFile(parent: Dir, io: Io, dest_path: []const u8, options: AtomicFileOptions) !File.Atomic { if (path.dirname(dest_path)) |dirname| { const dir = if (options.make_path) - try parent.makeOpenPath(io, dirname, .{}) + try parent.createDirPathOpen(io, dirname, .{}) else try parent.openDir(io, dirname, .{}); diff --git a/lib/std/Io/Kqueue.zig b/lib/std/Io/Kqueue.zig index 9f9403d5ad..26b8298cab 100644 --- a/lib/std/Io/Kqueue.zig +++ b/lib/std/Io/Kqueue.zig @@ -869,9 +869,9 @@ pub fn io(k: *Kqueue) Io { .conditionWaitUncancelable = conditionWaitUncancelable, .conditionWake = conditionWake, - .dirMake = dirMake, - .dirMakePath = dirMakePath, - .dirMakeOpenPath = dirMakeOpenPath, + .dirCreateDir = dirCreateDir, + .dirCreateDirPath = dirCreateDirPath, + .dirCreateDirPathOpen = dirCreateDirPathOpen, .dirStat = dirStat, .dirStatFile = dirStatFile, @@ -1114,7 +1114,7 @@ fn conditionWake(userdata: ?*anyopaque, cond: *Io.Condition, wake: Io.Condition. k.yield(waiting_fiber, .reschedule); } -fn dirMake(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.MakeError!void { +fn dirCreateDir(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.CreateDirError!void { const k: *Kqueue = @ptrCast(@alignCast(userdata)); _ = k; _ = dir; @@ -1122,7 +1122,7 @@ fn dirMake(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode _ = mode; @panic("TODO"); } -fn dirMakePath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.MakeError!void { +fn dirCreateDirPath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.CreateDirError!void { const k: *Kqueue = @ptrCast(@alignCast(userdata)); _ = k; _ = dir; @@ -1130,7 +1130,7 @@ fn dirMakePath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir. _ = mode; @panic("TODO"); } -fn dirMakeOpenPath(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, options: Dir.OpenOptions) Dir.MakeOpenPathError!Dir { +fn dirCreateDirPathOpen(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, options: Dir.OpenOptions) Dir.CreateDirPathOpenError!Dir { const k: *Kqueue = @ptrCast(@alignCast(userdata)); _ = k; _ = dir; diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index f4d60b3934..4afd8d75d1 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -711,9 +711,9 @@ pub fn io(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, - .dirMake = dirMake, - .dirMakePath = dirMakePath, - .dirMakeOpenPath = dirMakeOpenPath, + .dirCreateDir = dirCreateDir, + .dirCreateDirPath = dirCreateDirPath, + .dirCreateDirPathOpen = dirCreateDirPathOpen, .dirStat = dirStat, .dirStatFile = dirStatFile, .dirAccess = dirAccess, @@ -846,9 +846,9 @@ pub fn ioBasic(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, - .dirMake = dirMake, - .dirMakePath = dirMakePath, - .dirMakeOpenPath = dirMakeOpenPath, + .dirCreateDir = dirCreateDir, + .dirCreateDirPath = dirCreateDirPath, + .dirCreateDirPathOpen = dirCreateDirPathOpen, .dirStat = dirStat, .dirStatFile = dirStatFile, .dirAccess = dirAccess, @@ -1507,13 +1507,13 @@ fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { } } -const dirMake = switch (native_os) { - .windows => dirMakeWindows, - .wasi => dirMakeWasi, - else => dirMakePosix, +const dirCreateDir = switch (native_os) { + .windows => dirCreateDirWindows, + .wasi => dirCreateDirWasi, + else => dirCreateDirPosix, }; -fn dirMakePosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.MakeError!void { +fn dirCreateDirPosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.CreateDirError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); const current_thread = Thread.getCurrent(t); @@ -1559,8 +1559,8 @@ fn dirMakePosix(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissio } } -fn dirMakeWasi(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.MakeError!void { - if (builtin.link_libc) return dirMakePosix(userdata, dir, sub_path, permissions); +fn dirCreateDirWasi(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.CreateDirError!void { + if (builtin.link_libc) return dirCreateDirPosix(userdata, dir, sub_path, permissions); const t: *Threaded = @ptrCast(@alignCast(userdata)); const current_thread = Thread.getCurrent(t); try current_thread.beginSyscall(); @@ -1601,7 +1601,7 @@ fn dirMakeWasi(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permission } } -fn dirMakeWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.MakeError!void { +fn dirCreateDirWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions) Dir.CreateDirError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); const current_thread = Thread.getCurrent(t); try current_thread.checkCancel(); @@ -1627,19 +1627,19 @@ fn dirMakeWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permiss windows.CloseHandle(sub_dir_handle); } -fn dirMakePath( +fn dirCreateDirPath( userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions, -) Dir.MakePathError!Dir.MakePathStatus { +) Dir.CreateDirPathError!Dir.CreatePathStatus { const t: *Threaded = @ptrCast(@alignCast(userdata)); var it = std.fs.path.componentIterator(sub_path); - var status: Dir.MakePathStatus = .existed; + var status: Dir.CreatePathStatus = .existed; var component = it.last() orelse return error.BadPathName; while (true) { - if (dirMake(t, dir, component.path, permissions)) |_| { + if (dirCreateDir(t, dir, component.path, permissions)) |_| { status = .created; } else |err| switch (err) { error.PathAlreadyExists => { @@ -1659,37 +1659,37 @@ fn dirMakePath( } } -const dirMakeOpenPath = switch (native_os) { - .windows => dirMakeOpenPathWindows, - .wasi => dirMakeOpenPathWasi, - else => dirMakeOpenPathPosix, +const dirCreateDirPathOpen = switch (native_os) { + .windows => dirCreateDirPathOpenWindows, + .wasi => dirCreateDirPathOpenWasi, + else => dirCreateDirPathOpenPosix, }; -fn dirMakeOpenPathPosix( +fn dirCreateDirPathOpenPosix( userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions, options: Dir.OpenOptions, -) Dir.MakeOpenPathError!Dir { +) Dir.CreateDirPathOpenError!Dir { const t: *Threaded = @ptrCast(@alignCast(userdata)); const t_io = ioBasic(t); return dirOpenDirPosix(t, dir, sub_path, options) catch |err| switch (err) { error.FileNotFound => { - _ = try dir.makePathStatus(t_io, sub_path, permissions); + _ = try dir.createDirPathStatus(t_io, sub_path, permissions); return dirOpenDirPosix(t, dir, sub_path, options); }, else => |e| return e, }; } -fn dirMakeOpenPathWindows( +fn dirCreateDirPathOpenWindows( userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions, options: Dir.OpenOptions, -) Dir.MakeOpenPathError!Dir { +) Dir.CreateDirPathOpenError!Dir { const t: *Threaded = @ptrCast(@alignCast(userdata)); const current_thread = Thread.getCurrent(t); const w = windows; @@ -1795,18 +1795,18 @@ fn dirMakeOpenPathWindows( } } -fn dirMakeOpenPathWasi( +fn dirCreateDirPathOpenWasi( userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, permissions: Dir.Permissions, options: Dir.OpenOptions, -) Dir.MakeOpenPathError!Dir { +) Dir.CreateDirPathOpenError!Dir { const t: *Threaded = @ptrCast(@alignCast(userdata)); const t_io = ioBasic(t); return dirOpenDirWasi(t, dir, sub_path, options) catch |err| switch (err) { error.FileNotFound => { - _ = try dir.makePathStatus(t_io, sub_path, permissions); + _ = try dir.createDirPathStatus(t_io, sub_path, permissions); return dirOpenDirWasi(t, dir, sub_path, options); }, else => |e| return e, @@ -3352,11 +3352,6 @@ pub fn dirOpenDirWindows( } } -const MakeOpenDirAccessMaskWOptions = struct { - no_follow: bool, - create_disposition: u32, -}; - fn dirClose(userdata: ?*anyopaque, dirs: []const Dir) void { const t: *Threaded = @ptrCast(@alignCast(userdata)); _ = t; diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 644a6fc52d..bcb9048e0e 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -226,7 +226,7 @@ test "Dir.readLink" { // test 3: relative path symlink const parent_file = ".." ++ Dir.path.sep_str ++ "target.txt"; const canonical_parent_file = try ctx.toCanonicalPathSep(parent_file); - var subdir = try ctx.dir.makeOpenPath(io, "subdir", .{}); + var subdir = try ctx.dir.createDirPathOpen(io, "subdir", .{}); defer subdir.close(io); try setupSymlink(io, subdir, canonical_parent_file, "relative-link.txt", .{}); try testReadLink(io, subdir, canonical_parent_file, "relative-link.txt"); @@ -411,7 +411,7 @@ test "openDir non-cwd parent '..'" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - var subdir = try tmp.dir.makeOpenPath(io, "subdir", .{}); + var subdir = try tmp.dir.createDirPathOpen(io, "subdir", .{}); defer subdir.close(io); var dir = try subdir.openDir(io, "..", .{}); @@ -613,13 +613,13 @@ test "Dir.Iterator but dir is deleted during iteration" { defer tmp.cleanup(); // Create directory and setup an iterator for it - var subdir = try tmp.dir.makeOpenPath(io, "subdir", .{ .open_options = .{ .iterate = true } }); + var subdir = try tmp.dir.createDirPathOpen(io, "subdir", .{ .open_options = .{ .iterate = true } }); defer subdir.close(io); var iterator = subdir.iterate(); // Create something to iterate over within the subdir - try tmp.dir.makePath(io, "subdir" ++ Dir.path.sep_str ++ "b"); + try tmp.dir.createDirPath(io, "subdir" ++ Dir.path.sep_str ++ "b"); // Then, before iterating, delete the directory that we're iterating. // This is a contrived reproduction, but this could happen outside of the program, in another thread, etc. @@ -862,13 +862,13 @@ test "file operations on directories" { }.impl); } -test "makeOpenPath parent dirs do not exist" { +test "createDirPathOpen parent dirs do not exist" { const io = testing.io; var tmp_dir = tmpDir(.{}); defer tmp_dir.cleanup(); - var dir = try tmp_dir.dir.makeOpenPath(io, "root_dir/parent_dir/some_dir", .{}); + var dir = try tmp_dir.dir.createDirPathOpen(io, "root_dir/parent_dir/some_dir", .{}); dir.close(io); // double check that the full directory structure was created @@ -1016,7 +1016,7 @@ test "Dir.rename directory onto non-empty dir" { try ctx.dir.createDir(io, test_dir_path, .default_dir); - var target_dir = try ctx.dir.makeOpenPath(io, target_dir_path, .{}); + var target_dir = try ctx.dir.createDirPathOpen(io, target_dir_path, .{}); var file = try target_dir.createFile(io, "test_file", .{ .read = true }); file.close(io); target_dir.close(io); @@ -1155,9 +1155,9 @@ test "deleteTree does not follow symlinks" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.makePath(io, "b"); + try tmp.dir.createDirPath(io, "b"); { - var a = try tmp.dir.makeOpenPath(io, "a", .{}); + var a = try tmp.dir.createDirPathOpen(io, "a", .{}); defer a.close(io); try setupSymlink(io, a, "../b", "b", .{ .is_directory = true }); @@ -1184,7 +1184,7 @@ test "deleteTree on a symlink" { try tmp.dir.access(io, "file", .{}); // Symlink to a directory - try tmp.dir.makePath(io, "dir"); + try tmp.dir.createDirPath(io, "dir"); try setupSymlink(io, tmp.dir, "dir", "dirlink", .{ .is_directory = true }); try tmp.dir.deleteTree(io, "dirlink"); @@ -1192,14 +1192,14 @@ test "deleteTree on a symlink" { try tmp.dir.access(io, "dir", .{}); } -test "makePath, put some files in it, deleteTree" { +test "createDirPath, put some files in it, deleteTree" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { const io = ctx.io; const allocator = ctx.arena.allocator(); const dir_path = try ctx.transformPath("os_test_tmp"); - try ctx.dir.makePath(io, try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); + try ctx.dir.createDirPath(io, try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); try ctx.dir.writeFile(io, .{ .sub_path = try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }), .data = "nonsense", @@ -1215,14 +1215,14 @@ test "makePath, put some files in it, deleteTree" { }.impl); } -test "makePath, put some files in it, deleteTreeMinStackSize" { +test "createDirPath, put some files in it, deleteTreeMinStackSize" { try testWithAllSupportedPathTypes(struct { fn impl(ctx: *TestContext) !void { const io = ctx.io; const allocator = ctx.arena.allocator(); const dir_path = try ctx.transformPath("os_test_tmp"); - try ctx.dir.makePath(io, try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); + try ctx.dir.createDirPath(io, try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c" })); try ctx.dir.writeFile(io, .{ .sub_path = try Dir.path.join(allocator, &.{ "os_test_tmp", "b", "c", "file.txt" }), .data = "nonsense", @@ -1238,7 +1238,7 @@ test "makePath, put some files in it, deleteTreeMinStackSize" { }.impl); } -test "makePath in a directory that no longer exists" { +test "createDirPath in a directory that no longer exists" { if (native_os == .windows) return error.SkipZigTest; // Windows returns FileBusy if attempting to remove an open dir const io = testing.io; @@ -1247,10 +1247,10 @@ test "makePath in a directory that no longer exists" { defer tmp.cleanup(); try tmp.parent_dir.deleteTree(io, &tmp.sub_path); - try expectError(error.FileNotFound, tmp.dir.makePath(io, "sub-path")); + try expectError(error.FileNotFound, tmp.dir.createDirPath(io, "sub-path")); } -test "makePath but sub_path contains pre-existing file" { +test "createDirPath but sub_path contains pre-existing file" { const io = testing.io; var tmp = tmpDir(.{}); @@ -1259,7 +1259,7 @@ test "makePath but sub_path contains pre-existing file" { try tmp.dir.createDir(io, "foo", .default_dir); try tmp.dir.writeFile(io, .{ .sub_path = "foo/bar", .data = "" }); - try expectError(error.NotDir, tmp.dir.makePath(io, "foo/bar/baz")); + try expectError(error.NotDir, tmp.dir.createDirPath(io, "foo/bar/baz")); } fn expectDir(io: Io, dir: Dir, path: []const u8) !void { @@ -1279,7 +1279,7 @@ test "makepath existing directories" { try tmpA.createDir(io, "B", .default_dir); const testPath = "A" ++ Dir.path.sep_str ++ "B" ++ Dir.path.sep_str ++ "C"; - try tmp.dir.makePath(io, testPath); + try tmp.dir.createDirPath(io, testPath); try expectDir(io, tmp.dir, testPath); } @@ -1293,7 +1293,7 @@ test "makepath through existing valid symlink" { try tmp.dir.createDir(io, "realfolder", .default_dir); try setupSymlink(io, tmp.dir, "." ++ Dir.path.sep_str ++ "realfolder", "working-symlink", .{}); - try tmp.dir.makePath(io, "working-symlink" ++ Dir.path.sep_str ++ "in-realfolder"); + try tmp.dir.createDirPath(io, "working-symlink" ++ Dir.path.sep_str ++ "in-realfolder"); try expectDir(io, tmp.dir, "realfolder" ++ Dir.path.sep_str ++ "in-realfolder"); } @@ -1309,7 +1309,7 @@ test "makepath relative walks" { }); defer testing.allocator.free(relPath); - try tmp.dir.makePath(io, relPath); + try tmp.dir.createDirPath(io, relPath); // How .. is handled is different on Windows than non-Windows switch (native_os) { @@ -1348,7 +1348,7 @@ test "makepath ignores '.'" { }); defer testing.allocator.free(expectedPath); - try tmp.dir.makePath(io, dotPath); + try tmp.dir.createDirPath(io, dotPath); try expectDir(io, tmp.dir, expectedPath); } @@ -1358,7 +1358,7 @@ fn testFilenameLimits(io: Io, iterable_dir: Dir, maxed_filename: []const u8, max { try iterable_dir.writeFile(io, .{ .sub_path = maxed_filename, .data = "" }); - var maxed_dir = try iterable_dir.makeOpenPath(io, maxed_dirname, .{}); + var maxed_dir = try iterable_dir.createDirPathOpen(io, maxed_dirname, .{}); defer maxed_dir.close(io); try maxed_dir.writeFile(io, .{ .sub_path = maxed_filename, .data = "" }); @@ -1511,7 +1511,7 @@ test "access file" { const dir_path = try ctx.transformPath("os_test_tmp"); const file_path = try ctx.transformPath("os_test_tmp" ++ Dir.path.sep_str ++ "file.txt"); - try ctx.dir.makePath(io, dir_path); + try ctx.dir.createDirPath(io, dir_path); try expectError(error.FileNotFound, ctx.dir.access(io, file_path, .{})); try ctx.dir.writeFile(io, .{ .sub_path = file_path, .data = "" }); @@ -1527,7 +1527,7 @@ test "sendfile" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.makePath(io, "os_test_tmp"); + try tmp.dir.createDirPath(io, "os_test_tmp"); var dir = try tmp.dir.openDir(io, "os_test_tmp", .{}); defer dir.close(io); @@ -1574,7 +1574,7 @@ test "sendfile with buffered data" { var tmp = tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.makePath(io, "os_test_tmp"); + try tmp.dir.createDirPath(io, "os_test_tmp"); var dir = try tmp.dir.openDir(io, "os_test_tmp", .{}); defer dir.close(io); @@ -1851,7 +1851,7 @@ test "walker" { }); for (expected_paths.keys()) |key| { - try tmp.dir.makePath(io, key); + try tmp.dir.createDirPath(io, key); } var walker = try tmp.dir.walk(testing.allocator); @@ -1913,7 +1913,7 @@ test "selective walker, skip entries that start with ." { }); for (paths_to_create) |path| { - try tmp.dir.makePath(io, path); + try tmp.dir.createDirPath(io, path); } var walker = try tmp.dir.walkSelectively(testing.allocator); @@ -1959,8 +1959,8 @@ test "walker without fully iterating" { // Create 2 directories inside the tmp directory, but then only iterate once before breaking. // This ensures that walker doesn't try to close the initial directory when not fully iterating. - try tmp.dir.makePath(io, "a"); - try tmp.dir.makePath(io, "b"); + try tmp.dir.createDirPath(io, "a"); + try tmp.dir.createDirPath(io, "b"); var num_walked: usize = 0; while (try walker.next(io)) |_| { @@ -2109,8 +2109,8 @@ test "invalid UTF-8/WTF-8 paths" { try expectError(expected_err, ctx.dir.createDir(io, invalid_path, .default_dir)); - try expectError(expected_err, ctx.dir.makePath(io, invalid_path)); - try expectError(expected_err, ctx.dir.makeOpenPath(io, invalid_path, .{})); + try expectError(expected_err, ctx.dir.createDirPath(io, invalid_path)); + try expectError(expected_err, ctx.dir.createDirPathOpen(io, invalid_path, .{})); try expectError(expected_err, ctx.dir.openDir(io, invalid_path, .{})); @@ -2574,7 +2574,7 @@ test "hard link with different directories" { const target_name = "link-target"; const link_name = "newlink"; - const subdir = try tmp.dir.makeOpenPath(io, "subdir", .{}); + const subdir = try tmp.dir.createDirPathOpen(io, "subdir", .{}); defer tmp.dir.deleteFile(io, target_name) catch {}; try tmp.dir.writeFile(io, .{ .sub_path = target_name, .data = "example" }); diff --git a/lib/std/posix.zig b/lib/std/posix.zig index 3997bc70cd..133dfc0293 100644 --- a/lib/std/posix.zig +++ b/lib/std/posix.zig @@ -1079,7 +1079,7 @@ pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: mode_t) MakeDir } } -pub const MakeDirError = std.Io.Dir.MakeError; +pub const MakeDirError = std.Io.Dir.CreateDirError; /// Create a directory. /// `mode` is ignored on Windows and WASI. diff --git a/lib/std/tar.zig b/lib/std/tar.zig index d45aa99443..024a425919 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -606,7 +606,7 @@ pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOp switch (file.kind) { .directory => { if (file_name.len > 0 and !options.exclude_empty_directories) { - try dir.makePath(io, file_name); + try dir.createDirPath(io, file_name); } }, .file => { @@ -642,7 +642,7 @@ fn createDirAndFile(io: Io, dir: Io.Dir, file_name: []const u8, permissions: Io. const fs_file = dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions }) catch |err| { if (err == error.FileNotFound) { if (std.fs.path.dirname(file_name)) |dir_name| { - try dir.makePath(io, dir_name); + try dir.createDirPath(io, dir_name); return try dir.createFile(io, file_name, .{ .exclusive = true, .permissions = permissions }); } } @@ -656,7 +656,7 @@ fn createDirAndSymlink(io: Io, dir: Io.Dir, link_name: []const u8, file_name: [] dir.symLink(io, link_name, file_name, .{}) catch |err| { if (err == error.FileNotFound) { if (std.fs.path.dirname(file_name)) |dir_name| { - try dir.makePath(io, dir_name); + try dir.createDirPath(io, dir_name); return try dir.symLink(io, link_name, file_name, .{}); } } diff --git a/lib/std/testing.zig b/lib/std/testing.zig index f3dd114af3..5b5ec852e9 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -635,12 +635,12 @@ pub fn tmpDir(opts: Io.Dir.OpenOptions) TmpDir { _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes); const cwd = Io.Dir.cwd(); - var cache_dir = cwd.makeOpenPath(io, ".zig-cache", .{}) catch + var cache_dir = cwd.createDirPathOpen(io, ".zig-cache", .{}) catch @panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir"); defer cache_dir.close(io); - const parent_dir = cache_dir.makeOpenPath(io, "tmp", .{}) catch + const parent_dir = cache_dir.createDirPathOpen(io, "tmp", .{}) catch @panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir"); - const dir = parent_dir.makeOpenPath(io, &sub_path, .{ .open_options = opts }) catch + const dir = parent_dir.createDirPathOpen(io, &sub_path, .{ .open_options = opts }) catch @panic("unable to make tmp dir for testing: unable to make and open the tmp dir"); return .{ diff --git a/lib/std/zip.zig b/lib/std/zip.zig index 770bfd8ae7..ff95587e7a 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -554,13 +554,13 @@ pub const Iterator = struct { if (filename[filename.len - 1] == '/') { if (self.uncompressed_size != 0) return error.ZipBadDirectorySize; - try dest.makePath(io, filename[0 .. filename.len - 1]); + try dest.createDirPath(io, filename[0 .. filename.len - 1]); return; } const out_file = blk: { if (std.fs.path.dirname(filename)) |dirname| { - var parent_dir = try dest.makeOpenPath(io, dirname, .{}); + var parent_dir = try dest.createDirPathOpen(io, dirname, .{}); defer parent_dir.close(io); const basename = std.fs.path.basename(filename); diff --git a/src/Compilation.zig b/src/Compilation.zig index dc254a36c5..8b840b8d45 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -832,7 +832,7 @@ pub const Directories = struct { const nonempty_path = if (path.len == 0) "." else path; const handle_or_err = switch (thing) { .@"zig lib" => Io.Dir.cwd().openDir(io, nonempty_path, .{}), - .@"global cache", .@"local cache" => Io.Dir.cwd().makeOpenPath(io, nonempty_path, .{}), + .@"global cache", .@"local cache" => Io.Dir.cwd().createDirPathOpen(io, nonempty_path, .{}), }; return .{ .path = if (path.len == 0) null else path, @@ -1879,7 +1879,7 @@ pub const CreateDiagnostic = union(enum) { pub const CreateCachePath = struct { which: enum { local, global }, sub: []const u8, - err: (Io.Dir.MakeError || Io.Dir.OpenError || Io.Dir.StatFileError), + err: (Io.Dir.CreateDirError || Io.Dir.OpenError || Io.Dir.StatFileError), }; pub fn format(diag: CreateDiagnostic, w: *Writer) Writer.Error!void { switch (diag) { @@ -2120,7 +2120,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, cache.* = .{ .gpa = gpa, .io = io, - .manifest_dir = options.dirs.local_cache.handle.makeOpenPath(io, "h", .{}) catch |err| { + .manifest_dir = options.dirs.local_cache.handle.createDirPathOpen(io, "h", .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = "h", .err = err } }); }, }; @@ -2170,7 +2170,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, // to redundantly happen for each AstGen operation. const zir_sub_dir = "z"; - var local_zir_dir = options.dirs.local_cache.handle.makeOpenPath(io, zir_sub_dir, .{}) catch |err| { + var local_zir_dir = options.dirs.local_cache.handle.createDirPathOpen(io, zir_sub_dir, .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = zir_sub_dir, .err = err } }); }; errdefer local_zir_dir.close(io); @@ -2178,7 +2178,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, .handle = local_zir_dir, .path = try options.dirs.local_cache.join(arena, &.{zir_sub_dir}), }; - var global_zir_dir = options.dirs.global_cache.handle.makeOpenPath(io, zir_sub_dir, .{}) catch |err| { + var global_zir_dir = options.dirs.global_cache.handle.createDirPathOpen(io, zir_sub_dir, .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .global, .sub = zir_sub_dir, .err = err } }); }; errdefer global_zir_dir.close(io); @@ -2449,7 +2449,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, const digest = hash.final(); const artifact_sub_dir = "o" ++ fs.path.sep_str ++ digest; - var artifact_dir = options.dirs.local_cache.handle.makeOpenPath(io, artifact_sub_dir, .{}) catch |err| { + var artifact_dir = options.dirs.local_cache.handle.createDirPathOpen(io, artifact_sub_dir, .{}) catch |err| { return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = artifact_sub_dir, .err = err } }); }; errdefer artifact_dir.close(io); @@ -2917,7 +2917,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE tmp_dir_rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int); const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path}); - const handle = comp.dirs.local_cache.handle.makeOpenPath(io, tmp_dir_sub_path, .{}) catch |err| { + const handle = comp.dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{}) catch |err| { return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err }); }; break :d .{ .path = path, .handle = handle }; @@ -2998,7 +2998,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE tmp_dir_rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int); const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path}); - const handle = comp.dirs.local_cache.handle.makeOpenPath(io, tmp_dir_sub_path, .{}) catch |err| { + const handle = comp.dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{}) catch |err| { return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err }); }; break :d .{ .path = path, .handle = handle }; @@ -3437,7 +3437,7 @@ fn renameTmpIntoCache( continue; }, error.FileNotFound => { - try cache_directory.handle.makePath(io, "o"); + try cache_directory.handle.createDirPath(io, "o"); continue; }, else => |e| return e, @@ -5276,7 +5276,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { const io = comp.io; const docs_path = comp.resolveEmitPath(comp.emit_docs.?); - var out_dir = docs_path.root_dir.handle.makeOpenPath(io, docs_path.sub_path, .{}) catch |err| { + var out_dir = docs_path.root_dir.handle.createDirPathOpen(io, docs_path.sub_path, .{}) catch |err| { return comp.lockAndSetMiscFailure( .docs_copy, "unable to create output directory '{f}': {s}", @@ -5513,7 +5513,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU assert(docs_bin_file.sub_path.len > 0); // emitted binary is not a directory const docs_path = comp.resolveEmitPath(comp.emit_docs.?); - var out_dir = docs_path.root_dir.handle.makeOpenPath(io, docs_path.sub_path, .{}) catch |err| { + var out_dir = docs_path.root_dir.handle.createDirPathOpen(io, docs_path.sub_path, .{}) catch |err| { comp.lockAndSetMiscFailure( .docs_copy, "unable to create output directory '{f}': {t}", @@ -5705,7 +5705,7 @@ pub fn translateC( const tmp_basename = std.fmt.hex(std.crypto.random.int(u64)); const tmp_sub_path = "tmp" ++ fs.path.sep_str ++ tmp_basename; const cache_dir = comp.dirs.local_cache.handle; - var cache_tmp_dir = try cache_dir.makeOpenPath(io, tmp_sub_path, .{}); + var cache_tmp_dir = try cache_dir.createDirPathOpen(io, tmp_sub_path, .{}); defer cache_tmp_dir.close(io); const translated_path = try comp.dirs.local_cache.join(arena, &.{ tmp_sub_path, translated_basename }); @@ -6280,7 +6280,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr // We can't know the digest until we do the C compiler invocation, // so we need a temporary filename. const out_obj_path = try comp.tmpFilePath(arena, o_basename); - var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, "tmp", .{}); + var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, "tmp", .{}); defer zig_cache_tmp_dir.close(io); const out_diag_path = if (comp.clang_passthrough_mode or !ext.clangSupportsDiagnostics()) @@ -6445,7 +6445,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr // Rename into place. const digest = man.final(); const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest }); - var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, o_sub_path, .{}); + var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_obj_path); try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename, io); @@ -6534,7 +6534,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 const digest = man.final(); const o_sub_path = try fs.path.join(arena, &.{ "o", &digest }); - var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, o_sub_path, .{}); + var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{}); defer o_dir.close(io); const in_rc_path = try comp.dirs.local_cache.join(comp.gpa, &.{ @@ -6622,7 +6622,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 const rc_basename_noext = src_basename[0 .. src_basename.len - fs.path.extension(src_basename).len]; const digest = if (try man.hit()) man.final() else blk: { - var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, "tmp", .{}); + var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, "tmp", .{}); defer zig_cache_tmp_dir.close(io); const res_filename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext}); @@ -6693,7 +6693,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 // Rename into place. const digest = man.final(); const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest }); - var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(io, o_sub_path, .{}); + var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{}); defer o_dir.close(io); const tmp_basename = fs.path.basename(out_res_path); try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename, io); diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 45fcd66b8c..8a11736e42 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -500,7 +500,7 @@ fn runResource( var tmp_directory: Cache.Directory = .{ .path = tmp_directory_path, .handle = handle: { - const dir = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{ + const dir = cache_root.handle.createDirPathOpen(io, tmp_dir_sub_path, .{ .open_options = .{ .iterate = true }, }) catch |err| { try eb.addRootErrorMessage(.{ @@ -524,7 +524,7 @@ fn runResource( if (native_os == .linux and f.job_queue.work_around_btrfs_bug) { // https://github.com/ziglang/zig/issues/17095 pkg_path.root_dir.handle.close(io); - pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{ + pkg_path.root_dir.handle = cache_root.handle.createDirPathOpen(io, tmp_dir_sub_path, .{ .open_options = .{ .iterate = true }, }) catch @panic("btrfs workaround failed"); } @@ -1366,7 +1366,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U // we do not attempt to replicate the exact structure of a real .git // directory, since that isn't relevant for fetching a package. { - var pack_dir = try out_dir.makeOpenPath(io, ".git", .{}); + var pack_dir = try out_dir.createDirPathOpen(io, ".git", .{}); defer pack_dir.close(io); var pack_file = try pack_dir.createFile(io, "pkg.pack", .{ .read = true }); defer pack_file.close(io); @@ -1427,7 +1427,7 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void .file => { dir.copyFile(entry.path, tmp_dir, entry.path, io, .{}) catch |err| switch (err) { error.FileNotFound => { - if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(io, dirname); + if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.createDirPath(io, dirname); try dir.copyFile(entry.path, tmp_dir, entry.path, io, .{}); }, else => |e| return e, @@ -1440,7 +1440,7 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void // the destination directory, fail with an error instead. tmp_dir.symLink(io, link_name, entry.path, .{}) catch |err| switch (err) { error.FileNotFound => { - if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(io, dirname); + if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.createDirPath(io, dirname); try tmp_dir.symLink(io, link_name, entry.path, .{}); }, else => |e| return e, @@ -2250,7 +2250,7 @@ const TestFetchBuilder = struct { cache_parent_dir: std.Io.Dir, path_or_url: []const u8, ) !*Fetch { - const cache_dir = try cache_parent_dir.makeOpenPath(io, "zig-global-cache", .{}); + const cache_dir = try cache_parent_dir.createDirPathOpen(io, "zig-global-cache", .{}); self.http_client = .{ .allocator = allocator, .io = io }; self.global_cache_directory = .{ .handle = cache_dir, .path = null }; diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index eb3d2a447e..f241f768f6 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -1720,10 +1720,10 @@ pub fn main() !void { var pack_file_reader = pack_file.reader(io, &pack_file_buffer); const commit = try Oid.parse(format, args[3]); - var worktree = try Io.Dir.cwd().makeOpenPath(io, args[4], .{}); + var worktree = try Io.Dir.cwd().createDirPathOpen(io, args[4], .{}); defer worktree.close(io); - var git_dir = try worktree.makeOpenPath(io, ".git", .{}); + var git_dir = try worktree.createDirPathOpen(io, ".git", .{}); defer git_dir.close(io); std.debug.print("Starting index...\n", .{}); diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index 44676007e2..ba85f45830 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -444,7 +444,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -477,7 +477,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest }); var o_directory: Cache.Directory = .{ - .handle = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}), + .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}), .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}), }; defer o_directory.handle.close(io); diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index 8371b3288d..e9b6ce1882 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -679,7 +679,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -712,7 +712,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest }); var o_directory: Cache.Directory = .{ - .handle = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}), + .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}), .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}), }; defer o_directory.handle.close(io); diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig index c4b002f983..03ed917c4f 100644 --- a/src/libs/mingw.zig +++ b/src/libs/mingw.zig @@ -258,7 +258,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -297,7 +297,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const digest = man.final(); const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest }); - var o_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}); + var o_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}); defer o_dir.close(io); const aro = @import("aro"); diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index 9c09c35b0a..9e4213d237 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -385,7 +385,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye var cache: Cache = .{ .gpa = gpa, .io = io, - .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath(io, "h", .{}), + .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}), }; cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() }); cache.addPrefix(comp.dirs.zig_lib); @@ -418,7 +418,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest }); var o_directory: Cache.Directory = .{ - .handle = try comp.dirs.global_cache.handle.makeOpenPath(io, o_sub_path, .{}), + .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}), .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}), }; defer o_directory.handle.close(io); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 513c857dbf..97493ada32 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3319,7 +3319,7 @@ pub fn reopenDebugInfo(self: *MachO) !void { ); defer gpa.free(d_sym_path); - var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(io, d_sym_path, .{}); + var d_sym_bundle = try self.base.emit.root_dir.handle.createDirPathOpen(io, d_sym_path, .{}); defer d_sym_bundle.close(io); self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{ diff --git a/src/main.zig b/src/main.zig index fb2a8cfe0f..507b01ab0e 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3381,7 +3381,7 @@ fn buildOutputType( const dump_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-dump-stdin{s}", .{ std.crypto.random.int(u64), ext.canonicalName(target), }); - try dirs.local_cache.handle.makePath(io, "tmp"); + try dirs.local_cache.handle.createDirPath(io, "tmp"); // Note that in one of the happy paths, execve() is used to switch to // clang in which case any cleanup logic that exists for this temporary @@ -6955,7 +6955,7 @@ fn cmdFetch( var global_cache_directory: Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); break :l .{ - .handle = try Io.Dir.cwd().makeOpenPath(io, p, .{}), + .handle = try Io.Dir.cwd().createDirPathOpen(io, p, .{}), .path = p, }; }; @@ -7201,7 +7201,7 @@ fn createDependenciesModule( const rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int); { - var tmp_dir = try dirs.local_cache.handle.makeOpenPath(io, tmp_dir_sub_path, .{}); + var tmp_dir = try dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{}); defer tmp_dir.close(io); try tmp_dir.writeFile(io, .{ .sub_path = basename, .data = source }); } @@ -7396,7 +7396,7 @@ const Templates = struct { fingerprint: Package.Fingerprint, ) !void { if (fs.path.dirname(template_path)) |dirname| { - out_dir.makePath(io, dirname) catch |err| { + out_dir.createDirPath(io, dirname) catch |err| { fatal("unable to make path '{s}': {t}", .{ dirname, err }); }; } -- cgit v1.2.3 From 51a6f3a5251096f346eb28b22e1c20ed2ceb7d9e Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Sat, 20 Dec 2025 21:14:25 -0800 Subject: Update a few more callsites for std.Io changes --- lib/compiler/translate-c/main.zig | 2 +- lib/std/Build.zig | 2 +- lib/std/Build/Cache.zig | 2 +- lib/std/zig/LibCInstallation.zig | 2 +- lib/std/zig/WindowsSdk.zig | 24 +++++++++++++----------- src/link/Wasm.zig | 2 +- src/main.zig | 2 +- 7 files changed, 19 insertions(+), 17 deletions(-) (limited to 'lib/std/Build/Cache.zig') diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig index 80dc67fbfe..76d67afd18 100644 --- a/lib/compiler/translate-c/main.zig +++ b/lib/compiler/translate-c/main.zig @@ -35,7 +35,7 @@ pub fn main() u8 { } var stderr_buf: [1024]u8 = undefined; - var stderr = Io.File.stderr().writer(&stderr_buf); + var stderr = Io.File.stderr().writer(io, &stderr_buf); var diagnostics: aro.Diagnostics = switch (zig_integration) { false => .{ .output = .{ .to_writer = .{ .color = .detect(stderr.file), diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 4b8909e689..29499d8767 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1776,7 +1776,7 @@ fn tryFindProgram(b: *Build, full_path: []const u8) ?[]const u8 { while (it.next()) |ext| { if (!supportedWindowsProgramExtension(ext)) continue; - return fs.realpathAlloc(b.allocator, b.fmt("{s}{s}", .{ full_path, ext })) catch |err| switch (err) { + return fs.realPathFileAlloc(b.graph.io, b.fmt("{s}{s}", .{ full_path, ext }), b.allocator) catch |err| switch (err) { error.OutOfMemory => @panic("OOM"), else => continue, }; diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 396f204350..b384ab13ff 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -298,7 +298,7 @@ pub const Lock = struct { if (builtin.os.tag == .windows) { // Windows does not guarantee that locks are immediately unlocked when // the file handle is closed. See LockFileEx documentation. - lock.manifest_file.unlock(); + lock.manifest_file.unlock(io); } lock.manifest_file.close(io); diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig index 83ed23be04..f2489f9ee7 100644 --- a/lib/std/zig/LibCInstallation.zig +++ b/lib/std/zig/LibCInstallation.zig @@ -201,7 +201,7 @@ pub fn findNative(gpa: Allocator, io: Io, args: FindNativeOptions) FindError!Lib try self.findNativeMsvcIncludeDir(gpa, io, sdk); try self.findNativeMsvcLibDir(gpa, sdk); try self.findNativeKernel32LibDir(gpa, io, args, sdk); - try self.findNativeIncludeDirWindows(gpa, io, args, sdk); + try self.findNativeIncludeDirWindows(gpa, io, sdk); try self.findNativeCrtDirWindows(gpa, io, args.target, sdk); } else if (is_haiku) { try self.findNativeIncludeDirPosix(gpa, io, args); diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index 5568070793..b0f24c2aca 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -24,7 +24,7 @@ const product_version_max_length = version_major_minor_max_length + ".65535".len /// Find path and version of Windows 10 SDK and Windows 8.1 SDK, and find path to MSVC's `lib/` directory. /// Caller owns the result's fields. /// Returns memory allocated by `gpa` -pub fn find(gpa: Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFound, PathTooLong }!WindowsSdk { +pub fn find(gpa: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFound, PathTooLong }!WindowsSdk { if (builtin.os.tag != .windows) return error.NotFound; //note(dimenus): If this key doesn't exist, neither the Win 8 SDK nor the Win 10 SDK is installed @@ -33,7 +33,7 @@ pub fn find(gpa: Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFo }; defer roots_key.closeKey(); - const windows10sdk = Installation.find(gpa, roots_key, "KitsRoot10", "", "v10.0") catch |err| switch (err) { + const windows10sdk = Installation.find(gpa, io, roots_key, "KitsRoot10", "", "v10.0") catch |err| switch (err) { error.InstallationNotFound => null, error.PathTooLong => null, error.VersionTooLong => null, @@ -41,7 +41,7 @@ pub fn find(gpa: Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFo }; errdefer if (windows10sdk) |*w| w.free(gpa); - const windows81sdk = Installation.find(gpa, roots_key, "KitsRoot81", "winver", "v8.1") catch |err| switch (err) { + const windows81sdk = Installation.find(gpa, io, roots_key, "KitsRoot81", "winver", "v8.1") catch |err| switch (err) { error.InstallationNotFound => null, error.PathTooLong => null, error.VersionTooLong => null, @@ -49,7 +49,7 @@ pub fn find(gpa: Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFo }; errdefer if (windows81sdk) |*w| w.free(gpa); - const msvc_lib_dir: ?[]const u8 = MsvcLibDir.find(gpa, arch) catch |err| switch (err) { + const msvc_lib_dir: ?[]const u8 = MsvcLibDir.find(gpa, io, arch) catch |err| switch (err) { error.MsvcLibDirNotFound => null, error.OutOfMemory => return error.OutOfMemory, }; @@ -80,6 +80,7 @@ pub fn free(sdk: WindowsSdk, gpa: Allocator) void { fn iterateAndFilterByVersion( iterator: *Dir.Iterator, gpa: Allocator, + io: Io, prefix: []const u8, ) error{OutOfMemory}![][]const u8 { const Version = struct { @@ -104,7 +105,7 @@ fn iterateAndFilterByVersion( dirs.deinit(); } - iterate: while (iterator.next() catch null) |entry| { + iterate: while (iterator.next(io) catch null) |entry| { if (entry.kind != .directory) continue; if (!std.mem.startsWith(u8, entry.name, prefix)) continue; @@ -420,13 +421,14 @@ pub const Installation = struct { /// Caller owns the result's fields. fn find( gpa: Allocator, + io: Io, roots_key: RegistryWtf8, roots_subkey: []const u8, prefix: []const u8, version_key_name: []const u8, ) error{ OutOfMemory, InstallationNotFound, PathTooLong, VersionTooLong }!Installation { roots: { - const installation = findFromRoot(gpa, roots_key, roots_subkey, prefix) catch + const installation = findFromRoot(gpa, io, roots_key, roots_subkey, prefix) catch break :roots; if (installation.isValidVersion()) return installation; installation.free(gpa); @@ -485,7 +487,7 @@ pub const Installation = struct { defer sdk_lib_dir.close(io); var iterator = sdk_lib_dir.iterate(); - const versions = try iterateAndFilterByVersion(&iterator, gpa, prefix); + const versions = try iterateAndFilterByVersion(&iterator, gpa, io, prefix); if (versions.len == 0) return error.InstallationNotFound; defer { for (versions[1..]) |version| gpa.free(version); @@ -673,7 +675,7 @@ const MsvcLibDir = struct { // First, try getting the packages cache path from the registry. // This only seems to exist when the path is different from the default. method1: { - return findInstancesDirViaSetup(gpa) catch |err| switch (err) { + return findInstancesDirViaSetup(gpa, io) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.PathNotFound => break :method1, }; @@ -766,7 +768,7 @@ const MsvcLibDir = struct { var latest_version: u64 = 0; var instances_dir_it = instances_dir.iterateAssumeFirstIteration(); - while (instances_dir_it.next() catch return error.PathNotFound) |entry| { + while (instances_dir_it.next(io) catch return error.PathNotFound) |entry| { if (entry.kind != .directory) continue; var writer: Writer = .fixed(&state_subpath_buf); @@ -828,7 +830,7 @@ const MsvcLibDir = struct { try lib_dir_buf.appendSlice("VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); var default_tools_version_buf: [512]u8 = undefined; - const default_tools_version_contents = Dir.cwd().readFile(lib_dir_buf.items, &default_tools_version_buf) catch { + const default_tools_version_contents = Dir.cwd().readFile(io, lib_dir_buf.items, &default_tools_version_buf) catch { return error.PathNotFound; }; var tokenizer = std.mem.tokenizeAny(u8, default_tools_version_contents, " \r\n"); @@ -871,7 +873,7 @@ const MsvcLibDir = struct { defer visualstudio_folder.close(io); var iterator = visualstudio_folder.iterate(); - break :vs_versions try iterateAndFilterByVersion(&iterator, gpa, ""); + break :vs_versions try iterateAndFilterByVersion(&iterator, gpa, io, ""); }; defer { for (vs_versions) |vs_version| gpa.free(vs_version); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 216a641db3..e9c76ff40a 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -3008,7 +3008,7 @@ pub fn createEmpty( else .default_file else - 0, + .default_file, }); wasm.name = emit.sub_path; diff --git a/src/main.zig b/src/main.zig index 507b01ab0e..3a12882468 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4513,7 +4513,7 @@ fn runOrTestHotSwap( // tmp zig-cache and use it to spawn the child process. This way we are free to update // the binary with each requested hot update. .windows => blk: { - try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.dirs.local_cache.handle, lf.emit.sub_path, .{}); + try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.dirs.local_cache.handle, lf.emit.sub_path, io, .{}); break :blk try fs.path.join(gpa, &.{ comp.dirs.local_cache.path orelse ".", lf.emit.sub_path }); }, -- cgit v1.2.3