aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2025-12-05 19:08:37 -0800
committerAndrew Kelley <andrew@ziglang.org>2025-12-23 22:15:07 -0800
commitaafddc2ea13e40a8262d9378aeca2e097a37ac03 (patch)
tree46770e51147a635a43c2e7356e62064466b51c34
parenteab354b2f5d7242c036523394023e9824be7eca9 (diff)
downloadzig-aafddc2ea13e40a8262d9378aeca2e097a37ac03.tar.gz
zig-aafddc2ea13e40a8262d9378aeca2e097a37ac03.zip
update all occurrences of close() to close(io)
-rw-r--r--build.zig4
-rw-r--r--lib/compiler/aro/aro/Compilation.zig8
-rw-r--r--lib/compiler/aro/aro/Driver.zig12
-rw-r--r--lib/compiler/aro/aro/Driver/Filesystem.zig28
-rw-r--r--lib/compiler/aro/aro/Toolchain.zig3
-rw-r--r--lib/compiler/objcopy.zig4
-rw-r--r--lib/compiler/resinator/cli.zig4
-rw-r--r--lib/compiler/resinator/compile.zig34
-rw-r--r--lib/compiler/resinator/errors.zig4
-rw-r--r--lib/compiler/resinator/main.zig14
-rw-r--r--lib/compiler/resinator/utils.zig9
-rw-r--r--lib/compiler/std-docs.zig33
-rw-r--r--lib/compiler/translate-c/main.zig5
-rw-r--r--lib/std/Build/Cache/Directory.zig4
-rw-r--r--lib/std/Build/Fuzz.zig4
-rw-r--r--lib/std/Build/Step.zig3
-rw-r--r--lib/std/Build/Step/InstallArtifact.zig3
-rw-r--r--lib/std/Build/Step/InstallDir.zig2
-rw-r--r--lib/std/Build/Step/Run.zig20
-rw-r--r--lib/std/Build/Step/WriteFile.zig10
-rw-r--r--lib/std/Build/Watch/FsEvents.zig9
-rw-r--r--lib/std/Build/WebServer.zig7
-rw-r--r--lib/std/Io/Dir.zig41
-rw-r--r--lib/std/Io/Writer.zig6
-rw-r--r--lib/std/Io/net/test.zig10
-rw-r--r--lib/std/Io/test.zig16
-rw-r--r--lib/std/Thread.zig7
-rw-r--r--lib/std/crypto/Certificate/Bundle.zig6
-rw-r--r--lib/std/crypto/codecs/asn1/test.zig2
-rw-r--r--lib/std/debug.zig8
-rw-r--r--lib/std/debug/ElfFile.zig26
-rw-r--r--lib/std/debug/Info.zig21
-rw-r--r--lib/std/debug/MachOFile.zig18
-rw-r--r--lib/std/debug/SelfInfo/Elf.zig10
-rw-r--r--lib/std/debug/SelfInfo/MachO.zig4
-rw-r--r--lib/std/debug/SelfInfo/Windows.zig6
-rw-r--r--lib/std/dynamic_library.zig34
-rw-r--r--lib/std/fs.zig4
-rw-r--r--lib/std/fs/test.zig270
-rw-r--r--lib/std/http/Client.zig4
-rw-r--r--lib/std/os/linux/IoUring.zig85
-rw-r--r--lib/std/os/linux/test.zig12
-rw-r--r--lib/std/posix/test.zig54
-rw-r--r--lib/std/process.zig12
-rw-r--r--lib/std/process/Child.zig43
-rw-r--r--lib/std/tar.zig71
-rw-r--r--lib/std/tar/test.zig8
-rw-r--r--lib/std/testing.zig6
-rw-r--r--lib/std/zig/LibCInstallation.zig71
-rw-r--r--lib/std/zig/WindowsSdk.zig67
-rw-r--r--lib/std/zig/llvm/Builder.zig19
-rw-r--r--lib/std/zig/system.zig4
-rw-r--r--lib/std/zig/system/linux.zig2
-rw-r--r--lib/std/zip.zig6
-rw-r--r--src/Compilation.zig64
-rw-r--r--src/Package/Fetch.zig72
-rw-r--r--src/Package/Fetch/git.zig26
-rw-r--r--src/Zcu.zig10
-rw-r--r--src/Zcu/PerThread.zig6
-rw-r--r--src/codegen/llvm.zig5
-rw-r--r--src/fmt.zig10
-rw-r--r--src/introspect.zig28
-rw-r--r--src/libs/freebsd.zig4
-rw-r--r--src/libs/glibc.zig4
-rw-r--r--src/libs/mingw.zig6
-rw-r--r--src/libs/netbsd.zig4
-rw-r--r--src/link.zig83
-rw-r--r--src/link/C.zig6
-rw-r--r--src/link/Elf.zig6
-rw-r--r--src/link/Lld.zig2
-rw-r--r--src/link/MachO.zig16
-rw-r--r--src/link/MachO/DebugSymbols.zig50
-rw-r--r--src/link/Wasm.zig4
-rw-r--r--src/main.zig135
-rw-r--r--src/print_targets.zig3
75 files changed, 1014 insertions, 707 deletions
diff --git a/build.zig b/build.zig
index ade4825927..7ea515de3e 100644
--- a/build.zig
+++ b/build.zig
@@ -1604,12 +1604,12 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
b.build_root, @errorName(err),
});
};
- defer dir.close();
+ defer dir.close(io);
var wf = b.addWriteFiles();
var it = dir.iterateAssumeFirstIteration();
- while (it.next() catch @panic("failed to read dir")) |entry| {
+ while (it.next(io) catch @panic("failed to read dir")) |entry| {
if (std.mem.startsWith(u8, entry.name, ".") or entry.kind != .file)
continue;
diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig
index d5f4ebe2d9..9fb8123146 100644
--- a/lib/compiler/aro/aro/Compilation.zig
+++ b/lib/compiler/aro/aro/Compilation.zig
@@ -1639,8 +1639,10 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin
return error.FileNotFound;
}
+ const io = comp.io;
+
const file = try comp.cwd.openFile(path, .{});
- defer file.close();
+ defer file.close(io);
return comp.addSourceFromFile(file, path, kind);
}
@@ -1971,8 +1973,10 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8
return error.FileNotFound;
}
+ const io = comp.io;
+
const file = try comp.cwd.openFile(path, .{});
- defer file.close();
+ defer file.close(io);
return comp.getFileContents(file, limit);
}
diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig
index 6a399ece39..888ade2be4 100644
--- a/lib/compiler/aro/aro/Driver.zig
+++ b/lib/compiler/aro/aro/Driver.zig
@@ -1286,6 +1286,8 @@ fn processSource(
d.comp.generated_buf.items.len = 0;
const prev_total = d.diagnostics.errors;
+ const io = d.comp.io;
+
var pp = try Preprocessor.initDefault(d.comp);
defer pp.deinit();
@@ -1328,7 +1330,7 @@ fn processSource(
return d.fatal("unable to create dependency file '{s}': {s}", .{ path, errorDescription(er) })
else
std.fs.File.stdout();
- defer if (dep_file_name != null) file.close();
+ defer if (dep_file_name != null) file.close(io);
var file_writer = file.writer(&writer_buf);
dep_file.write(&file_writer.interface) catch
@@ -1353,7 +1355,7 @@ fn processSource(
return d.fatal("unable to create output file '{s}': {s}", .{ some, errorDescription(er) })
else
std.fs.File.stdout();
- defer if (d.output_name != null) file.close();
+ defer if (d.output_name != null) file.close(io);
var file_writer = file.writer(&writer_buf);
pp.prettyPrintTokens(&file_writer.interface, dump_mode) catch
@@ -1404,7 +1406,7 @@ fn processSource(
if (d.only_preprocess_and_compile) {
const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) });
- defer out_file.close();
+ defer out_file.close(io);
assembly.writeToFile(out_file) catch |er|
return d.fatal("unable to write to output file '{s}': {s}", .{ out_file_name, errorDescription(er) });
@@ -1418,7 +1420,7 @@ fn processSource(
const assembly_out_file_name = try d.getRandomFilename(&assembly_name_buf, ".s");
const out_file = d.comp.cwd.createFile(assembly_out_file_name, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ assembly_out_file_name, errorDescription(er) });
- defer out_file.close();
+ defer out_file.close(io);
assembly.writeToFile(out_file) catch |er|
return d.fatal("unable to write to output file '{s}': {s}", .{ assembly_out_file_name, errorDescription(er) });
try d.invokeAssembler(tc, assembly_out_file_name, out_file_name);
@@ -1454,7 +1456,7 @@ fn processSource(
const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, errorDescription(er) });
- defer out_file.close();
+ defer out_file.close(io);
var file_writer = out_file.writer(&writer_buf);
obj.finish(&file_writer.interface) catch
diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig
index 87092cb235..19ac9bfe41 100644
--- a/lib/compiler/aro/aro/Driver/Filesystem.zig
+++ b/lib/compiler/aro/aro/Driver/Filesystem.zig
@@ -1,8 +1,10 @@
-const std = @import("std");
-const mem = std.mem;
const builtin = @import("builtin");
const is_windows = builtin.os.tag == .windows;
+const std = @import("std");
+const Io = std.Io;
+const mem = std.std.mem;
+
fn readFileFake(entries: []const Filesystem.Entry, path: []const u8, buf: []u8) ?[]const u8 {
@branchHint(.cold);
for (entries) |entry| {
@@ -96,7 +98,7 @@ fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]con
}
pub const Filesystem = union(enum) {
- real: std.fs.Dir,
+ real: std.Io.Dir,
fake: []const Entry,
const Entry = struct {
@@ -121,7 +123,7 @@ pub const Filesystem = union(enum) {
base: []const u8,
i: usize = 0,
- fn next(self: *@This()) !?std.fs.Dir.Entry {
+ fn next(self: *@This()) !?std.Io.Dir.Entry {
while (self.i < self.entries.len) {
const entry = self.entries[self.i];
self.i += 1;
@@ -130,7 +132,7 @@ pub const Filesystem = union(enum) {
const remaining = entry.path[self.base.len + 1 ..];
if (std.mem.indexOfScalar(u8, remaining, std.fs.path.sep) != null) continue;
const extension = std.fs.path.extension(remaining);
- const kind: std.fs.Dir.Entry.Kind = if (extension.len == 0) .directory else .file;
+ const kind: std.Io.Dir.Entry.Kind = if (extension.len == 0) .directory else .file;
return .{ .name = remaining, .kind = kind };
}
}
@@ -140,7 +142,7 @@ pub const Filesystem = union(enum) {
};
const Dir = union(enum) {
- dir: std.fs.Dir,
+ dir: std.Io.Dir,
fake: FakeDir,
pub fn iterate(self: Dir) Iterator {
@@ -150,19 +152,19 @@ pub const Filesystem = union(enum) {
};
}
- pub fn close(self: *Dir) void {
+ pub fn close(self: *Dir, io: Io) void {
switch (self.*) {
- .dir => |*d| d.close(),
+ .dir => |*d| d.close(io),
.fake => {},
}
}
};
const Iterator = union(enum) {
- iterator: std.fs.Dir.Iterator,
+ iterator: std.Io.Dir.Iterator,
fake: FakeDir.Iterator,
- pub fn next(self: *Iterator) std.fs.Dir.Iterator.Error!?std.fs.Dir.Entry {
+ pub fn next(self: *Iterator) std.Io.Dir.Iterator.Error!?std.Io.Dir.Entry {
return switch (self.*) {
.iterator => |*it| it.next(),
.fake => |*it| it.next(),
@@ -208,11 +210,11 @@ pub const Filesystem = union(enum) {
/// Read the file at `path` into `buf`.
/// Returns null if any errors are encountered
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
- pub fn readFile(fs: Filesystem, path: []const u8, buf: []u8) ?[]const u8 {
+ pub fn readFile(fs: Filesystem, io: Io, path: []const u8, buf: []u8) ?[]const u8 {
return switch (fs) {
.real => |cwd| {
const file = cwd.openFile(path, .{}) catch return null;
- defer file.close();
+ defer file.close(io);
const bytes_read = file.readAll(buf) catch return null;
return buf[0..bytes_read];
@@ -221,7 +223,7 @@ pub const Filesystem = union(enum) {
};
}
- pub fn openDir(fs: Filesystem, dir_name: []const u8) std.fs.Dir.OpenError!Dir {
+ pub fn openDir(fs: Filesystem, dir_name: []const u8) std.Io.Dir.OpenError!Dir {
return switch (fs) {
.real => |cwd| .{ .dir = try cwd.openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) },
.fake => |entries| .{ .fake = .{ .entries = entries, .path = dir_name } },
diff --git a/lib/compiler/aro/aro/Toolchain.zig b/lib/compiler/aro/aro/Toolchain.zig
index 326278cc38..ae84369205 100644
--- a/lib/compiler/aro/aro/Toolchain.zig
+++ b/lib/compiler/aro/aro/Toolchain.zig
@@ -497,6 +497,7 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void {
const comp = d.comp;
const gpa = comp.gpa;
const arena = comp.arena;
+ const io = comp.io;
try d.includes.ensureUnusedCapacity(gpa, 1);
if (d.resource_dir) |resource_dir| {
const path = try std.fs.path.join(arena, &.{ resource_dir, "include" });
@@ -509,7 +510,7 @@ pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void {
var search_path = d.aro_name;
while (std.fs.path.dirname(search_path)) |dirname| : (search_path = dirname) {
var base_dir = d.comp.cwd.openDir(dirname, .{}) catch continue;
- defer base_dir.close();
+ defer base_dir.close(io);
base_dir.access("include/stddef.h", .{}) catch continue;
const path = try std.fs.path.join(arena, &.{ dirname, "include" });
diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig
index 7cf0f14e42..1608c121b1 100644
--- a/lib/compiler/objcopy.zig
+++ b/lib/compiler/objcopy.zig
@@ -152,7 +152,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
const io = threaded.io();
const input_file = fs.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err });
- defer input_file.close();
+ defer input_file.close(io);
const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err });
@@ -180,7 +180,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
const mode = if (out_fmt != .elf or only_keep_debug) fs.File.default_mode else stat.mode;
var output_file = try fs.cwd().createFile(output, .{ .mode = mode });
- defer output_file.close();
+ defer output_file.close(io);
var out = output_file.writer(&output_buffer);
diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig
index 59568e9cef..ffaa62e7ca 100644
--- a/lib/compiler/resinator/cli.zig
+++ b/lib/compiler/resinator/cli.zig
@@ -1991,6 +1991,8 @@ test "parse: input and output formats" {
}
test "maybeAppendRC" {
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
@@ -2001,7 +2003,7 @@ test "maybeAppendRC" {
// Create the file so that it's found. In this scenario, .rc should not get
// appended.
var file = try tmp.dir.createFile("foo", .{});
- file.close();
+ file.close(io);
try options.maybeAppendRC(tmp.dir);
try std.testing.expectEqualStrings("foo", options.input_source.filename);
diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig
index 08e161e505..7dc77e5ee1 100644
--- a/lib/compiler/resinator/compile.zig
+++ b/lib/compiler/resinator/compile.zig
@@ -34,7 +34,7 @@ const code_pages = @import("code_pages.zig");
const errors = @import("errors.zig");
pub const CompileOptions = struct {
- cwd: std.fs.Dir,
+ cwd: std.Io.Dir,
diagnostics: *Diagnostics,
source_mappings: ?*SourceMappings = null,
/// List of paths (absolute or relative to `cwd`) for every file that the resources within the .rc file depend on.
@@ -107,7 +107,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
// the cwd so we don't need to add it as a distinct search path.
if (std.fs.path.dirname(root_path)) |root_dir_path| {
var root_dir = try options.cwd.openDir(root_dir_path, .{});
- errdefer root_dir.close();
+ errdefer root_dir.close(io);
try search_dirs.append(allocator, .{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) });
}
}
@@ -136,7 +136,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
// TODO: maybe a warning that the search path is skipped?
continue;
};
- errdefer dir.close();
+ errdefer dir.close(io);
try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, extra_include_path) });
}
for (options.system_include_paths) |system_include_path| {
@@ -144,7 +144,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
// TODO: maybe a warning that the search path is skipped?
continue;
};
- errdefer dir.close();
+ errdefer dir.close(io);
try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, system_include_path) });
}
if (!options.ignore_include_env_var) {
@@ -160,7 +160,7 @@ pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io
var it = std.mem.tokenizeScalar(u8, INCLUDE, delimiter);
while (it.next()) |search_path| {
var dir = openSearchPathDir(options.cwd, search_path) catch continue;
- errdefer dir.close();
+ errdefer dir.close(io);
try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, search_path) });
}
}
@@ -196,7 +196,7 @@ pub const Compiler = struct {
arena: Allocator,
allocator: Allocator,
io: Io,
- cwd: std.fs.Dir,
+ cwd: std.Io.Dir,
state: State = .{},
diagnostics: *Diagnostics,
dependencies: ?*Dependencies,
@@ -388,7 +388,9 @@ pub const Compiler = struct {
/// matching file is invalid. That is, it does not do the `cmd` PATH searching
/// thing of continuing to look for matching files until it finds a valid
/// one if a matching file is invalid.
- fn searchForFile(self: *Compiler, path: []const u8) !std.fs.File {
+ fn searchForFile(self: *Compiler, path: []const u8) !std.Io.File {
+ const io = self.io;
+
// If the path is absolute, then it is not resolved relative to any search
// paths, so there's no point in checking them.
//
@@ -405,7 +407,7 @@ pub const Compiler = struct {
// an absolute path.
if (std.fs.path.isAbsolute(path)) {
const file = try utils.openFileNotDir(std.fs.cwd(), path, .{});
- errdefer file.close();
+ errdefer file.close(io);
if (self.dependencies) |dependencies| {
const duped_path = try dependencies.allocator.dupe(u8, path);
@@ -414,10 +416,10 @@ pub const Compiler = struct {
}
}
- var first_error: ?(std.fs.File.OpenError || std.fs.File.StatError) = null;
+ var first_error: ?(std.Io.File.OpenError || std.Io.File.StatError) = null;
for (self.search_dirs) |search_dir| {
if (utils.openFileNotDir(search_dir.dir, path, .{})) |file| {
- errdefer file.close();
+ errdefer file.close(io);
if (self.dependencies) |dependencies| {
const searched_file_path = try std.fs.path.join(dependencies.allocator, &.{
@@ -587,7 +589,7 @@ pub const Compiler = struct {
});
},
};
- defer file_handle.close();
+ defer file_handle.close(io);
var file_buffer: [2048]u8 = undefined;
var file_reader = file_handle.reader(io, &file_buffer);
@@ -2892,9 +2894,9 @@ pub const Compiler = struct {
}
};
-pub const OpenSearchPathError = std.fs.Dir.OpenError;
+pub const OpenSearchPathError = std.Io.Dir.OpenError;
-fn openSearchPathDir(dir: std.fs.Dir, path: []const u8) OpenSearchPathError!std.fs.Dir {
+fn openSearchPathDir(dir: std.Io.Dir, path: []const u8) OpenSearchPathError!std.Io.Dir {
// Validate the search path to avoid possible unreachable on invalid paths,
// see https://github.com/ziglang/zig/issues/15607 for why this is currently necessary.
try validateSearchPath(path);
@@ -2927,11 +2929,11 @@ fn validateSearchPath(path: []const u8) error{BadPathName}!void {
}
pub const SearchDir = struct {
- dir: std.fs.Dir,
+ dir: std.Io.Dir,
path: ?[]const u8,
- pub fn deinit(self: *SearchDir, allocator: Allocator) void {
- self.dir.close();
+ pub fn deinit(self: *SearchDir, allocator: Allocator, io: Io) void {
+ self.dir.close(io);
if (self.path) |path| {
allocator.free(path);
}
diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig
index 0060990ab6..8509aa610f 100644
--- a/lib/compiler/resinator/errors.zig
+++ b/lib/compiler/resinator/errors.zig
@@ -1221,8 +1221,8 @@ const CorrespondingLines = struct {
};
}
- pub fn deinit(self: *CorrespondingLines) void {
- self.file.close();
+ pub fn deinit(self: *CorrespondingLines, io: Io) void {
+ self.file.close(io);
}
};
diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig
index 6d6819f45a..42308a8987 100644
--- a/lib/compiler/resinator/main.zig
+++ b/lib/compiler/resinator/main.zig
@@ -296,7 +296,7 @@ pub fn main() !void {
error.ParseError, error.CompileError => {
try error_handler.emitDiagnostics(gpa, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings);
// Delete the output file on error
- res_stream.cleanupAfterError();
+ res_stream.cleanupAfterError(io);
std.process.exit(1);
},
else => |e| return e,
@@ -315,7 +315,7 @@ pub fn main() !void {
try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
std.process.exit(1);
};
- defer depfile.close();
+ defer depfile.close(io);
var depfile_buffer: [1024]u8 = undefined;
var depfile_writer = depfile.writer(&depfile_buffer);
@@ -402,7 +402,7 @@ pub fn main() !void {
},
}
// Delete the output file on error
- coff_stream.cleanupAfterError();
+ coff_stream.cleanupAfterError(io);
std.process.exit(1);
};
@@ -434,11 +434,11 @@ const IoStream = struct {
self.source.deinit(allocator);
}
- pub fn cleanupAfterError(self: *IoStream) void {
+ pub fn cleanupAfterError(self: *IoStream, io: Io) void {
switch (self.source) {
.file => |file| {
// Delete the output file on error
- file.close();
+ file.close(io);
// Failing to delete is not really a big deal, so swallow any errors
std.fs.cwd().deleteFile(self.name) catch {};
},
@@ -465,9 +465,9 @@ const IoStream = struct {
}
}
- pub fn deinit(self: *Source, allocator: Allocator) void {
+ pub fn deinit(self: *Source, allocator: Allocator, io: Io) void {
switch (self.*) {
- .file => |file| file.close(),
+ .file => |file| file.close(io),
.stdio => {},
.memory => |*list| list.deinit(allocator),
.closed => {},
diff --git a/lib/compiler/resinator/utils.zig b/lib/compiler/resinator/utils.zig
index 021b8cf4de..f8080539cb 100644
--- a/lib/compiler/resinator/utils.zig
+++ b/lib/compiler/resinator/utils.zig
@@ -1,6 +1,8 @@
-const std = @import("std");
const builtin = @import("builtin");
+const std = @import("std");
+const Io = std.Io;
+
pub const UncheckedSliceWriter = struct {
const Self = @This();
@@ -28,11 +30,12 @@ pub const UncheckedSliceWriter = struct {
/// TODO: Remove once https://github.com/ziglang/zig/issues/5732 is addressed.
pub fn openFileNotDir(
cwd: std.fs.Dir,
+ io: Io,
path: []const u8,
flags: std.fs.File.OpenFlags,
) (std.fs.File.OpenError || std.fs.File.StatError)!std.fs.File {
- const file = try cwd.openFile(path, flags);
- errdefer file.close();
+ const file = try cwd.openFile(io, path, flags);
+ errdefer file.close(io);
// https://github.com/ziglang/zig/issues/5732
if (builtin.os.tag != .windows) {
const stat = try file.stat();
diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig
index 12825146a2..15c17ee59b 100644
--- a/lib/compiler/std-docs.zig
+++ b/lib/compiler/std-docs.zig
@@ -1,12 +1,14 @@
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Cache = std.Build.Cache;
fn usage() noreturn {
- std.fs.File.stdout().writeAll(
+ std.Io.File.stdout().writeAll(
\\Usage: zig std [options]
\\
\\Options:
@@ -27,6 +29,10 @@ pub fn main() !void {
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
const gpa = general_purpose_allocator.allocator();
+ var threaded: std.Io.Threaded = .init(gpa);
+ defer threaded.deinit();
+ const io = threaded.io();
+
var argv = try std.process.argsWithAllocator(arena);
defer argv.deinit();
assert(argv.skip());
@@ -35,7 +41,7 @@ pub fn main() !void {
const global_cache_path = argv.next().?;
var lib_dir = try std.fs.cwd().openDir(zig_lib_directory, .{});
- defer lib_dir.close();
+ defer lib_dir.close(io);
var listen_port: u16 = 0;
var force_open_browser: ?bool = null;
@@ -64,7 +70,7 @@ pub fn main() !void {
});
const port = http_server.listen_address.in.getPort();
const url_with_newline = try std.fmt.allocPrint(arena, "http://127.0.0.1:{d}/\n", .{port});
- std.fs.File.stdout().writeAll(url_with_newline) catch {};
+ std.Io.File.stdout().writeAll(url_with_newline) catch {};
if (should_open_browser) {
openBrowserTab(gpa, url_with_newline[0 .. url_with_newline.len - 1 :'\n']) catch |err| {
std.log.err("unable to open browser: {s}", .{@errorName(err)});
@@ -73,6 +79,7 @@ pub fn main() !void {
var context: Context = .{
.gpa = gpa,
+ .io = io,
.zig_exe_path = zig_exe_path,
.global_cache_path = global_cache_path,
.lib_dir = lib_dir,
@@ -83,14 +90,15 @@ pub fn main() !void {
const connection = try http_server.accept();
_ = std.Thread.spawn(.{}, accept, .{ &context, connection }) catch |err| {
std.log.err("unable to accept connection: {s}", .{@errorName(err)});
- connection.stream.close();
+ connection.stream.close(io);
continue;
};
}
}
fn accept(context: *Context, connection: std.net.Server.Connection) void {
- defer connection.stream.close();
+ const io = context.io;
+ defer connection.stream.close(io);
var recv_buffer: [4000]u8 = undefined;
var send_buffer: [4000]u8 = undefined;
@@ -124,6 +132,7 @@ fn accept(context: *Context, connection: std.net.Server.Connection) void {
const Context = struct {
gpa: Allocator,
+ io: Io,
lib_dir: std.fs.Dir,
zig_lib_directory: []const u8,
zig_exe_path: []const u8,
@@ -185,6 +194,7 @@ fn serveDocsFile(
fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
const gpa = context.gpa;
+ const io = context.io;
var send_buffer: [0x4000]u8 = undefined;
var response = try request.respondStreaming(&send_buffer, .{
@@ -197,7 +207,7 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
});
var std_dir = try context.lib_dir.openDir("std", .{ .iterate = true });
- defer std_dir.close();
+ defer std_dir.close(io);
var walker = try std_dir.walk(gpa);
defer walker.deinit();
@@ -216,11 +226,11 @@ fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
else => continue,
}
var file = try entry.dir.openFile(entry.basename, .{});
- defer file.close();
+ defer file.close(io);
const stat = try file.stat();
- var file_reader: std.fs.File.Reader = .{
+ var file_reader: std.Io.File.Reader = .{
.file = file,
- .interface = std.fs.File.Reader.initInterface(&.{}),
+ .interface = std.Io.File.Reader.initInterface(&.{}),
.size = stat.size,
};
try archiver.writeFile(entry.path, &file_reader, stat.mtime);
@@ -283,6 +293,7 @@ fn buildWasmBinary(
optimize_mode: std.builtin.OptimizeMode,
) !Cache.Path {
const gpa = context.gpa;
+ const io = context.io;
var argv: std.ArrayList([]const u8) = .empty;
@@ -371,7 +382,7 @@ fn buildWasmBinary(
}
// Send EOF to stdin.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
switch (try child.wait()) {
@@ -410,7 +421,7 @@ fn buildWasmBinary(
};
}
-fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
+fn sendMessage(file: std.Io.File, tag: std.zig.Client.Message.Tag) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 0,
diff --git a/lib/compiler/translate-c/main.zig b/lib/compiler/translate-c/main.zig
index b0d7a5d9bd..0c72298b30 100644
--- a/lib/compiler/translate-c/main.zig
+++ b/lib/compiler/translate-c/main.zig
@@ -121,6 +121,7 @@ pub const usage =
fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration: bool) !void {
const gpa = d.comp.gpa;
+ const io = d.comp.io;
const aro_args = args: {
var i: usize = 0;
@@ -228,7 +229,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
return d.fatal("unable to create dependency file '{s}': {s}", .{ path, aro.Driver.errorDescription(er) })
else
std.fs.File.stdout();
- defer if (dep_file_name != null) file.close();
+ defer if (dep_file_name != null) file.close(io);
var file_writer = file.writer(&out_buf);
dep_file.write(&file_writer.interface) catch
@@ -246,7 +247,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
var close_out_file = false;
var out_file_path: []const u8 = "<stdout>";
var out_file: std.fs.File = .stdout();
- defer if (close_out_file) out_file.close();
+ defer if (close_out_file) out_file.close(io);
if (d.output_name) |path| blk: {
if (std.mem.eql(u8, path, "-")) break :blk;
diff --git a/lib/std/Build/Cache/Directory.zig b/lib/std/Build/Cache/Directory.zig
index 305ef25361..ce5f5b02bb 100644
--- a/lib/std/Build/Cache/Directory.zig
+++ b/lib/std/Build/Cache/Directory.zig
@@ -52,8 +52,8 @@ pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) !
/// Whether or not the handle should be closed, or the path should be freed
/// is determined by usage, however this function is provided for convenience
/// if it happens to be what the caller needs.
-pub fn closeAndFree(self: *Directory, gpa: Allocator) void {
- self.handle.close();
+pub fn closeAndFree(self: *Directory, gpa: Allocator, io: Io) void {
+ self.handle.close(io);
if (self.path) |p| gpa.free(p);
self.* = undefined;
}
diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig
index 2897b29969..db83f393fd 100644
--- a/lib/std/Build/Fuzz.zig
+++ b/lib/std/Build/Fuzz.zig
@@ -411,7 +411,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
});
return error.AlreadyReported;
};
- defer coverage_file.close();
+ defer coverage_file.close(io);
const file_size = coverage_file.getEndPos() catch |err| {
log.err("unable to check len of coverage file '{f}': {t}", .{ coverage_file_path, err });
@@ -533,7 +533,7 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
cov.run.step.name, coverage_file_path, err,
});
};
- defer coverage_file.close();
+ defer coverage_file.close(io);
const fuzz_abi = std.Build.abi.fuzz;
var rbuf: [0x1000]u8 = undefined;
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 33fe755c2b..acde47071d 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -441,6 +441,7 @@ pub fn evalZigProcess(
assert(argv.len != 0);
const b = s.owner;
const arena = b.allocator;
+ const io = b.graph.io;
try handleChildProcUnsupported(s);
try handleVerbose(s.owner, null, argv);
@@ -474,7 +475,7 @@ pub fn evalZigProcess(
if (!watch) {
// Send EOF to stdin.
- zp.child.stdin.?.close();
+ zp.child.stdin.?.close(io);
zp.child.stdin = null;
const term = zp.child.wait() catch |err| {
diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig
index c203ae924b..1cdb232770 100644
--- a/lib/std/Build/Step/InstallArtifact.zig
+++ b/lib/std/Build/Step/InstallArtifact.zig
@@ -119,6 +119,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const install_artifact: *InstallArtifact = @fieldParentPtr("step", step);
const b = step.owner;
+ const io = b.graph.io;
var all_cached = true;
@@ -168,7 +169,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
src_dir_path, @errorName(err),
});
};
- defer src_dir.close();
+ defer src_dir.close(io);
var it = try src_dir.walk(b.allocator);
next_entry: while (try it.next()) |entry| {
diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig
index ecb0959cc7..788d5565a7 100644
--- a/lib/std/Build/Step/InstallDir.zig
+++ b/lib/std/Build/Step/InstallDir.zig
@@ -68,7 +68,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
var src_dir = src_dir_path.root_dir.handle.openDir(io, src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{f}': {t}", .{ src_dir_path, err });
};
- defer src_dir.close();
+ defer src_dir.close(io);
var it = try src_dir.walk(arena);
var all_cached = true;
next_entry: while (try it.next()) |entry| {
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index 28c09e1faf..e66e30cc79 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -851,7 +851,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
.{ file_path, err },
);
};
- defer file.close();
+ defer file.close(io);
var buf: [1024]u8 = undefined;
var file_reader = file.reader(io, &buf);
@@ -1111,7 +1111,7 @@ pub fn rerunInFuzzMode(
result.writer.writeAll(file_plp.prefix) catch return error.OutOfMemory;
const file = try file_path.root_dir.handle.openFile(file_path.subPathOrDot(), .{});
- defer file.close();
+ defer file.close(io);
var buf: [1024]u8 = undefined;
var file_reader = file.reader(io, &buf);
@@ -1671,8 +1671,10 @@ fn evalZigTest(
options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
) !EvalZigTestResult {
- const gpa = run.step.owner.allocator;
- const arena = run.step.owner.allocator;
+ const step_owner = run.step.owner;
+ const gpa = step_owner.allocator;
+ const arena = step_owner.allocator;
+ const io = step_owner.graph.io;
// We will update this every time a child runs.
run.step.result_peak_rss = 0;
@@ -1724,7 +1726,7 @@ fn evalZigTest(
run.step.result_stderr = try arena.dupe(u8, poller.reader(.stderr).buffered());
// Clean up everything and wait for the child to exit.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
poller.deinit();
child_killed = true;
@@ -1744,7 +1746,7 @@ fn evalZigTest(
poller.reader(.stderr).tossBuffered();
// Clean up everything and wait for the child to exit.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
poller.deinit();
child_killed = true;
@@ -2177,7 +2179,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
child.stdin.?.writeAll(bytes) catch |err| {
return run.step.fail("unable to write stdin: {s}", .{@errorName(err)});
};
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
},
.lazy_path => |lazy_path| {
@@ -2185,7 +2187,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
const file = path.root_dir.handle.openFile(path.subPathOrDot(), .{}) catch |err| {
return run.step.fail("unable to open stdin file: {s}", .{@errorName(err)});
};
- defer file.close();
+ defer file.close(io);
// TODO https://github.com/ziglang/zig/issues/23955
var read_buffer: [1024]u8 = undefined;
var file_reader = file.reader(io, &read_buffer);
@@ -2204,7 +2206,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
stdin_writer.err.?,
}),
};
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
},
.none => {},
diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig
index 030c7c6811..201b132271 100644
--- a/lib/std/Build/Step/WriteFile.zig
+++ b/lib/std/Build/Step/WriteFile.zig
@@ -206,7 +206,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}
}
- const open_dir_cache = try arena.alloc(fs.Dir, write_file.directories.items.len);
+ const open_dir_cache = try arena.alloc(Io.Dir, write_file.directories.items.len);
var open_dirs_count: usize = 0;
defer closeDirs(open_dir_cache[0..open_dirs_count]);
@@ -264,7 +264,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
b.cache_root, cache_path, @errorName(err),
});
};
- defer cache_dir.close();
+ defer cache_dir.close(io);
for (write_file.files.items) |file| {
if (fs.path.dirname(file.sub_path)) |dirname| {
@@ -342,6 +342,8 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try step.writeManifest(&man);
}
-fn closeDirs(dirs: []fs.Dir) void {
- for (dirs) |*d| d.close();
+fn closeDirs(io: Io, dirs: []Io.Dir) void {
+ var group: Io.Group = .init;
+ defer group.wait();
+ for (dirs) |d| group.async(Io.Dir.close, .{ d, io });
}
diff --git a/lib/std/Build/Watch/FsEvents.zig b/lib/std/Build/Watch/FsEvents.zig
index 6131663993..59238c8725 100644
--- a/lib/std/Build/Watch/FsEvents.zig
+++ b/lib/std/Build/Watch/FsEvents.zig
@@ -78,10 +78,10 @@ const ResolvedSymbols = struct {
kCFAllocatorUseContext: *const CFAllocatorRef,
};
-pub fn init() error{ OpenFrameworkFailed, MissingCoreServicesSymbol }!FsEvents {
+pub fn init(io: Io) error{ OpenFrameworkFailed, MissingCoreServicesSymbol }!FsEvents {
var core_services = std.DynLib.open("/System/Library/Frameworks/CoreServices.framework/CoreServices") catch
return error.OpenFrameworkFailed;
- errdefer core_services.close();
+ errdefer core_services.close(io);
var resolved_symbols: ResolvedSymbols = undefined;
inline for (@typeInfo(ResolvedSymbols).@"struct".fields) |f| {
@@ -102,10 +102,10 @@ pub fn init() error{ OpenFrameworkFailed, MissingCoreServicesSymbol }!FsEvents {
};
}
-pub fn deinit(fse: *FsEvents, gpa: Allocator) void {
+pub fn deinit(fse: *FsEvents, gpa: Allocator, io: Io) void {
dispatch_release(fse.waiting_semaphore);
dispatch_release(fse.dispatch_queue);
- fse.core_services.close();
+ fse.core_services.close(io);
gpa.free(fse.watch_roots);
fse.watch_paths.deinit(gpa);
@@ -487,6 +487,7 @@ const FSEventStreamEventFlags = packed struct(u32) {
};
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const watch_log = std.log.scoped(.watch);
diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig
index 2c865a8889..f91075b444 100644
--- a/lib/std/Build/WebServer.zig
+++ b/lib/std/Build/WebServer.zig
@@ -129,6 +129,7 @@ pub fn init(opts: Options) WebServer {
}
pub fn deinit(ws: *WebServer) void {
const gpa = ws.gpa;
+ const io = ws.graph.io;
gpa.free(ws.step_names_trailing);
gpa.free(ws.step_status_bits);
@@ -139,7 +140,7 @@ pub fn deinit(ws: *WebServer) void {
gpa.free(ws.time_report_update_times);
if (ws.serve_thread) |t| {
- if (ws.tcp_server) |*s| s.stream.close();
+ if (ws.tcp_server) |*s| s.stream.close(io);
t.join();
}
if (ws.tcp_server) |*s| s.deinit();
@@ -507,7 +508,7 @@ pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []cons
log.err("failed to open '{f}': {s}", .{ path, @errorName(err) });
continue;
};
- defer file.close();
+ defer file.close(io);
const stat = try file.stat();
var read_buffer: [1024]u8 = undefined;
var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size);
@@ -634,7 +635,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
}
// Send EOF to stdin.
- child.stdin.?.close();
+ child.stdin.?.close(io);
child.stdin = null;
switch (try child.wait()) {
diff --git a/lib/std/Io/Dir.zig b/lib/std/Io/Dir.zig
index 348950e20a..5a74c8ca72 100644
--- a/lib/std/Io/Dir.zig
+++ b/lib/std/Io/Dir.zig
@@ -131,7 +131,7 @@ pub const SelectiveWalker = struct {
/// After each call to this function, and on deinit(), the memory returned
/// from this function becomes invalid. A copy must be made in order to keep
/// a reference to the path.
- pub fn next(self: *SelectiveWalker) Error!?Walker.Entry {
+ pub fn next(self: *SelectiveWalker, io: Io) Error!?Walker.Entry {
while (self.stack.items.len > 0) {
const top = &self.stack.items[self.stack.items.len - 1];
var dirname_len = top.dirname_len;
@@ -142,7 +142,7 @@ pub const SelectiveWalker = struct {
// likely just fail with the same error.
var item = self.stack.pop().?;
if (self.stack.items.len != 0) {
- item.iter.dir.close();
+ item.iter.dir.close(io);
}
return err;
}) |entry| {
@@ -164,7 +164,7 @@ pub const SelectiveWalker = struct {
} else {
var item = self.stack.pop().?;
if (self.stack.items.len != 0) {
- item.iter.dir.close();
+ item.iter.dir.close(io);
}
}
}
@@ -172,7 +172,7 @@ pub const SelectiveWalker = struct {
}
/// Traverses into the directory, continuing walking one level down.
- pub fn enter(self: *SelectiveWalker, entry: Walker.Entry) !void {
+ pub fn enter(self: *SelectiveWalker, io: Io, entry: Walker.Entry) !void {
if (entry.kind != .directory) {
@branchHint(.cold);
return;
@@ -184,7 +184,7 @@ pub const SelectiveWalker = struct {
else => |e| return e,
}
};
- errdefer new_dir.close();
+ errdefer new_dir.close(io);
try self.stack.append(self.allocator, .{
.iter = new_dir.iterateAssumeFirstIteration(),
@@ -200,11 +200,11 @@ pub const SelectiveWalker = struct {
/// Leaves the current directory, continuing walking one level up.
/// If the current entry is a directory entry, then the "current directory"
/// will pertain to that entry if `enter` is called before `leave`.
- pub fn leave(self: *SelectiveWalker) void {
+ pub fn leave(self: *SelectiveWalker, io: Io) void {
var item = self.stack.pop().?;
if (self.stack.items.len != 0) {
@branchHint(.likely);
- item.iter.dir.close();
+ item.iter.dir.close(io);
}
}
};
@@ -558,7 +558,8 @@ pub fn makeDir(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions)
pub const MakePathError = MakeError || StatPathError;
-/// Creates parent directories as necessary to ensure `sub_path` exists as a directory.
+/// Creates parent directories with default permissions as necessary to ensure
+/// `sub_path` exists as a directory.
///
/// Returns success if the path already exists and is a directory.
///
@@ -579,8 +580,11 @@ pub const MakePathError = MakeError || StatPathError;
/// - On other platforms, `..` are not resolved before the path is passed to `mkdirat`,
/// meaning a `sub_path` like "first/../second" will create both a `./first`
/// and a `./second` directory.
-pub fn makePath(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions) MakePathError!void {
- _ = try io.vtable.dirMakePath(io.userdata, dir, sub_path, permissions);
+///
+/// See also:
+/// * `makePathStatus`
+pub fn makePath(dir: Dir, io: Io, sub_path: []const u8) MakePathError!void {
+ _ = try io.vtable.dirMakePath(io.userdata, dir, sub_path, .default_dir);
}
pub const MakePathStatus = enum { existed, created };
@@ -593,6 +597,11 @@ pub fn makePathStatus(dir: Dir, io: Io, sub_path: []const u8, permissions: Permi
pub const MakeOpenPathError = MakeError || OpenError || StatPathError;
+pub const MakeOpenPathOptions = struct {
+ open_options: OpenOptions = .{},
+ permissions: Permissions = .default_dir,
+};
+
/// Performs the equivalent of `makePath` followed by `openDir`, atomically if possible.
///
/// When this operation is canceled, it may leave the file system in a
@@ -601,8 +610,8 @@ pub const MakeOpenPathError = MakeError || OpenError || StatPathError;
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
-pub fn makeOpenPath(dir: Dir, io: Io, sub_path: []const u8, permissions: Permissions, options: OpenOptions) MakeOpenPathError!Dir {
- return io.vtable.dirMakeOpenPath(io.userdata, dir, sub_path, permissions, options);
+pub fn makeOpenPath(dir: Dir, io: Io, sub_path: []const u8, options: MakeOpenPathOptions) MakeOpenPathError!Dir {
+ return io.vtable.dirMakeOpenPath(io.userdata, dir, sub_path, options.permissions, options.open_options);
}
pub const Stat = File.Stat;
@@ -1266,10 +1275,10 @@ fn deleteTreeMinStackSizeWithKindHint(parent: Dir, io: Io, sub_path: []const u8,
start_over: while (true) {
var dir = (try parent.deleteTreeOpenInitialSubpath(io, sub_path, kind_hint)) orelse return;
var cleanup_dir_parent: ?Dir = null;
- defer if (cleanup_dir_parent) |*d| d.close();
+ defer if (cleanup_dir_parent) |*d| d.close(io);
var cleanup_dir = true;
- defer if (cleanup_dir) dir.close();
+ defer if (cleanup_dir) dir.close(io);
// Valid use of max_path_bytes because dir_name_buf will only
// ever store a single path component that was returned from the
@@ -1315,7 +1324,7 @@ fn deleteTreeMinStackSizeWithKindHint(parent: Dir, io: Io, sub_path: []const u8,
error.Canceled,
=> |e| return e,
};
- if (cleanup_dir_parent) |*d| d.close();
+ if (cleanup_dir_parent) |*d| d.close(io);
cleanup_dir_parent = dir;
dir = new_dir;
const result = dir_name_buf[0..entry.name.len];
@@ -1354,7 +1363,7 @@ fn deleteTreeMinStackSizeWithKindHint(parent: Dir, io: Io, sub_path: []const u8,
}
// Reached the end of the directory entries, which means we successfully deleted all of them.
// Now to remove the directory itself.
- dir.close();
+ dir.close(io);
cleanup_dir = false;
if (cleanup_dir_parent) |d| {
diff --git a/lib/std/Io/Writer.zig b/lib/std/Io/Writer.zig
index f49ef8eb67..5601293cfb 100644
--- a/lib/std/Io/Writer.zig
+++ b/lib/std/Io/Writer.zig
@@ -2835,7 +2835,7 @@ test "discarding sendFile" {
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ defer file.close(io);
var r_buffer: [256]u8 = undefined;
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
try file_writer.interface.writeByte('h');
@@ -2857,7 +2857,7 @@ test "allocating sendFile" {
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ defer file.close(io);
var r_buffer: [2]u8 = undefined;
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
try file_writer.interface.writeAll("abcd");
@@ -2881,7 +2881,7 @@ test sendFileReading {
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ defer file.close(io);
var r_buffer: [2]u8 = undefined;
var file_writer: std.fs.File.Writer = .init(file, &r_buffer);
try file_writer.interface.writeAll("abcd");
diff --git a/lib/std/Io/net/test.zig b/lib/std/Io/net/test.zig
index e234a9edde..5818f6c3f7 100644
--- a/lib/std/Io/net/test.zig
+++ b/lib/std/Io/net/test.zig
@@ -232,8 +232,10 @@ test "listen on an in use port" {
fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
const connection = try net.tcpConnectToHost(allocator, name, port);
- defer connection.close();
+ defer connection.close(io);
var buf: [100]u8 = undefined;
const len = try connection.read(&buf);
@@ -244,8 +246,10 @@ fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyer
fn testClient(addr: net.IpAddress) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
const socket_file = try net.tcpConnectToAddress(addr);
- defer socket_file.close();
+ defer socket_file.close(io);
var buf: [100]u8 = undefined;
const len = try socket_file.read(&buf);
@@ -330,7 +334,7 @@ test "non-blocking tcp server" {
try testing.expectError(error.WouldBlock, accept_err);
const socket_file = try net.tcpConnectToAddress(server.socket.address);
- defer socket_file.close();
+ defer socket_file.close(io);
var stream = try server.accept(io);
defer stream.close(io);
diff --git a/lib/std/Io/test.zig b/lib/std/Io/test.zig
index f7965ed14e..9ea2d48ee5 100644
--- a/lib/std/Io/test.zig
+++ b/lib/std/Io/test.zig
@@ -28,7 +28,7 @@ test "write a file, read it, then delete it" {
const tmp_file_name = "temp_test_file.txt";
{
var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ defer file.close(io);
var file_writer = file.writer(&.{});
const st = &file_writer.interface;
@@ -45,7 +45,7 @@ test "write a file, read it, then delete it" {
{
var file = try tmp.dir.openFile(tmp_file_name, .{});
- defer file.close();
+ defer file.close(io);
const file_size = try file.getEndPos();
const expected_file_size: u64 = "begin".len + data.len + "end".len;
@@ -67,9 +67,11 @@ test "File seek ops" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
+ const io = testing.io;
+
const tmp_file_name = "temp_test_file.txt";
var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ defer file.close(io);
try file.writeAll(&([_]u8{0x55} ** 8192));
@@ -88,12 +90,14 @@ test "File seek ops" {
}
test "setEndPos" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "temp_test_file.txt";
var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ defer file.close(io);
// Verify that the file size changes and the file offset is not moved
try expect((try file.getEndPos()) == 0);
@@ -111,12 +115,14 @@ test "setEndPos" {
}
test "updateTimes" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "just_a_temporary_file.txt";
var file = try tmp.dir.createFile(tmp_file_name, .{ .read = true });
- defer file.close();
+ defer file.close(io);
const stat_old = try file.stat();
// Set atime and mtime to 5s before
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 35b268b349..9f532c3bec 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -7,6 +7,7 @@ const target = builtin.target;
const native_os = builtin.os.tag;
const std = @import("std.zig");
+const Io = std.Io;
const math = std.math;
const assert = std.debug.assert;
const posix = std.posix;
@@ -176,7 +177,7 @@ pub const SetNameError = error{
InvalidWtf8,
} || posix.PrctlError || posix.WriteError || std.fs.File.OpenError || std.fmt.BufPrintError;
-pub fn setName(self: Thread, name: []const u8) SetNameError!void {
+pub fn setName(self: Thread, io: Io, name: []const u8) SetNameError!void {
if (name.len > max_name_len) return error.NameTooLong;
const name_with_terminator = blk: {
@@ -208,7 +209,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only });
- defer file.close();
+ defer file.close(io);
try file.writeAll(name);
return;
@@ -325,7 +326,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
const io = threaded.ioBasic();
const file = try std.fs.cwd().openFile(path, .{});
- defer file.close();
+ defer file.close(io);
var file_reader = file.readerStreaming(io, &.{});
const data_len = file_reader.interface.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig
index 53fb638250..9541e01db5 100644
--- a/lib/std/crypto/Certificate/Bundle.zig
+++ b/lib/std/crypto/Certificate/Bundle.zig
@@ -181,7 +181,7 @@ pub fn addCertsFromDirPath(
sub_dir_path: []const u8,
) AddCertsFromDirPathError!void {
var iterable_dir = try dir.openDir(sub_dir_path, .{ .iterate = true });
- defer iterable_dir.close();
+ defer iterable_dir.close(io);
return addCertsFromDir(cb, gpa, io, iterable_dir);
}
@@ -194,7 +194,7 @@ pub fn addCertsFromDirPathAbsolute(
) AddCertsFromDirPathError!void {
assert(fs.path.isAbsolute(abs_dir_path));
var iterable_dir = try fs.openDirAbsolute(abs_dir_path, .{ .iterate = true });
- defer iterable_dir.close();
+ defer iterable_dir.close(io);
return addCertsFromDir(cb, gpa, io, now, iterable_dir);
}
@@ -222,7 +222,7 @@ pub fn addCertsFromFilePathAbsolute(
abs_file_path: []const u8,
) AddCertsFromFilePathError!void {
var file = try fs.openFileAbsolute(abs_file_path, .{});
- defer file.close();
+ defer file.close(io);
var file_reader = file.reader(io, &.{});
return addCertsFromFile(cb, gpa, &file_reader, now.toSeconds());
}
diff --git a/lib/std/crypto/codecs/asn1/test.zig b/lib/std/crypto/codecs/asn1/test.zig
index fe12cba819..ff854fcbde 100644
--- a/lib/std/crypto/codecs/asn1/test.zig
+++ b/lib/std/crypto/codecs/asn1/test.zig
@@ -75,6 +75,6 @@ test AllTypes {
// Use this to update test file.
// const dir = try std.fs.cwd().openDir("lib/std/crypto/asn1", .{});
// var file = try dir.createFile(path, .{});
- // defer file.close();
+ // defer file.close(io);
// try file.writeAll(buf);
}
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 7c5993dfcf..0cb96ed593 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -1298,7 +1298,7 @@ test printLineFromFile {
}
{
const file = try test_dir.dir.createFile("line_overlaps_page_boundary.zig", .{});
- defer file.close();
+ defer file.close(io);
const path = try fs.path.join(gpa, &.{ test_dir_path, "line_overlaps_page_boundary.zig" });
defer gpa.free(path);
@@ -1317,7 +1317,7 @@ test printLineFromFile {
}
{
const file = try test_dir.dir.createFile("file_ends_on_page_boundary.zig", .{});
- defer file.close();
+ defer file.close(io);
const path = try fs.path.join(gpa, &.{ test_dir_path, "file_ends_on_page_boundary.zig" });
defer gpa.free(path);
@@ -1331,7 +1331,7 @@ test printLineFromFile {
}
{
const file = try test_dir.dir.createFile("very_long_first_line_spanning_multiple_pages.zig", .{});
- defer file.close();
+ defer file.close(io);
const path = try fs.path.join(gpa, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" });
defer gpa.free(path);
@@ -1357,7 +1357,7 @@ test printLineFromFile {
}
{
const file = try test_dir.dir.createFile("file_of_newlines.zig", .{});
- defer file.close();
+ defer file.close(io);
const path = try fs.path.join(gpa, &.{ test_dir_path, "file_of_newlines.zig" });
defer gpa.free(path);
diff --git a/lib/std/debug/ElfFile.zig b/lib/std/debug/ElfFile.zig
index e81943ab49..92bcca1bcf 100644
--- a/lib/std/debug/ElfFile.zig
+++ b/lib/std/debug/ElfFile.zig
@@ -1,5 +1,13 @@
//! A helper type for loading an ELF file and collecting its DWARF debug information, unwind
//! information, and symbol table.
+const ElfFile = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const Endian = std.builtin.Endian;
+const Dwarf = std.debug.Dwarf;
+const Allocator = std.mem.Allocator;
+const elf = std.elf;
is_64: bool,
endian: Endian,
@@ -358,10 +366,17 @@ const Section = struct {
const Array = std.enums.EnumArray(Section.Id, ?Section);
};
-fn loadSeparateDebugFile(arena: Allocator, main_loaded: *LoadInnerResult, opt_crc: ?u32, comptime fmt: []const u8, args: anytype) Allocator.Error!?[]align(std.heap.page_size_min) const u8 {
+fn loadSeparateDebugFile(
+ arena: Allocator,
+ io: Io,
+ main_loaded: *LoadInnerResult,
+ opt_crc: ?u32,
+ comptime fmt: []const u8,
+ args: anytype,
+) Allocator.Error!?[]align(std.heap.page_size_min) const u8 {
const path = try std.fmt.allocPrint(arena, fmt, args);
const elf_file = std.fs.cwd().openFile(path, .{}) catch return null;
- defer elf_file.close();
+ defer elf_file.close(io);
const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
@@ -529,10 +544,3 @@ fn loadInner(
.mapped_mem = mapped_mem,
};
}
-
-const std = @import("std");
-const Endian = std.builtin.Endian;
-const Dwarf = std.debug.Dwarf;
-const ElfFile = @This();
-const Allocator = std.mem.Allocator;
-const elf = std.elf;
diff --git a/lib/std/debug/Info.zig b/lib/std/debug/Info.zig
index 921cd36ab8..9268ca0247 100644
--- a/lib/std/debug/Info.zig
+++ b/lib/std/debug/Info.zig
@@ -5,19 +5,18 @@
//! Unlike `std.debug.SelfInfo`, this API does not assume the debug information
//! in question happens to match the host CPU architecture, OS, or other target
//! properties.
+const Info = @This();
const std = @import("../std.zig");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
const assert = std.debug.assert;
const Coverage = std.debug.Coverage;
const SourceLocation = std.debug.Coverage.SourceLocation;
-
const ElfFile = std.debug.ElfFile;
const MachOFile = std.debug.MachOFile;
-const Info = @This();
-
impl: union(enum) {
elf: ElfFile,
macho: MachOFile,
@@ -25,13 +24,23 @@ impl: union(enum) {
/// Externally managed, outlives this `Info` instance.
coverage: *Coverage,
-pub const LoadError = std.fs.File.OpenError || ElfFile.LoadError || MachOFile.Error || std.debug.Dwarf.ScanError || error{ MissingDebugInfo, UnsupportedDebugInfo };
+pub const LoadError = error{
+ MissingDebugInfo,
+ UnsupportedDebugInfo,
+} || std.fs.File.OpenError || ElfFile.LoadError || MachOFile.Error || std.debug.Dwarf.ScanError;
-pub fn load(gpa: Allocator, path: Path, coverage: *Coverage, format: std.Target.ObjectFormat, arch: std.Target.Cpu.Arch) LoadError!Info {
+pub fn load(
+ gpa: Allocator,
+ io: Io,
+ path: Path,
+ coverage: *Coverage,
+ format: std.Target.ObjectFormat,
+ arch: std.Target.Cpu.Arch,
+) LoadError!Info {
switch (format) {
.elf => {
var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- defer file.close();
+ defer file.close(io);
var elf_file: ElfFile = try .load(gpa, file, null, &.none);
errdefer elf_file.deinit(gpa);
diff --git a/lib/std/debug/MachOFile.zig b/lib/std/debug/MachOFile.zig
index 3be1b1daff..3f0f620a90 100644
--- a/lib/std/debug/MachOFile.zig
+++ b/lib/std/debug/MachOFile.zig
@@ -27,13 +27,13 @@ pub fn deinit(mf: *MachOFile, gpa: Allocator) void {
posix.munmap(mf.mapped_memory);
}
-pub fn load(gpa: Allocator, path: []const u8, arch: std.Target.Cpu.Arch) Error!MachOFile {
+pub fn load(gpa: Allocator, io: Io, path: []const u8, arch: std.Target.Cpu.Arch) Error!MachOFile {
switch (arch) {
.x86_64, .aarch64 => {},
else => unreachable,
}
- const all_mapped_memory = try mapDebugInfoFile(path);
+ const all_mapped_memory = try mapDebugInfoFile(io, path);
errdefer posix.munmap(all_mapped_memory);
// In most cases, the file we just mapped is a Mach-O binary. However, it could be a "universal
@@ -239,7 +239,7 @@ pub fn load(gpa: Allocator, path: []const u8, arch: std.Target.Cpu.Arch) Error!M
.text_vmaddr = text_vmaddr,
};
}
-pub fn getDwarfForAddress(mf: *MachOFile, gpa: Allocator, vaddr: u64) !struct { *Dwarf, u64 } {
+pub fn getDwarfForAddress(mf: *MachOFile, gpa: Allocator, io: Io, vaddr: u64) !struct { *Dwarf, u64 } {
const symbol = Symbol.find(mf.symbols, vaddr) orelse return error.MissingDebugInfo;
if (symbol.ofile == Symbol.unknown_ofile) return error.MissingDebugInfo;
@@ -254,7 +254,7 @@ pub fn getDwarfForAddress(mf: *MachOFile, gpa: Allocator, vaddr: u64) !struct {
const gop = try mf.ofiles.getOrPut(gpa, symbol.ofile);
if (!gop.found_existing) {
const name = mem.sliceTo(mf.strings[symbol.ofile..], 0);
- gop.value_ptr.* = loadOFile(gpa, name);
+ gop.value_ptr.* = loadOFile(gpa, io, name);
}
const of = &(gop.value_ptr.* catch |err| return err);
@@ -356,7 +356,7 @@ test {
_ = Symbol;
}
-fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
+fn loadOFile(gpa: Allocator, io: Io, o_file_name: []const u8) !OFile {
const all_mapped_memory, const mapped_ofile = map: {
const open_paren = paren: {
if (std.mem.endsWith(u8, o_file_name, ")")) {
@@ -365,7 +365,7 @@ fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
}
}
// Not an archive, just a normal path to a .o file
- const m = try mapDebugInfoFile(o_file_name);
+ const m = try mapDebugInfoFile(io, o_file_name);
break :map .{ m, m };
};
@@ -373,7 +373,7 @@ fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
const archive_path = o_file_name[0..open_paren];
const target_name_in_archive = o_file_name[open_paren + 1 .. o_file_name.len - 1];
- const mapped_archive = try mapDebugInfoFile(archive_path);
+ const mapped_archive = try mapDebugInfoFile(io, archive_path);
errdefer posix.munmap(mapped_archive);
var ar_reader: Io.Reader = .fixed(mapped_archive);
@@ -511,12 +511,12 @@ fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
}
/// Uses `mmap` to map the file at `path` into memory.
-fn mapDebugInfoFile(path: []const u8) ![]align(std.heap.page_size_min) const u8 {
+fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 {
const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return error.ReadFailed,
};
- defer file.close();
+ defer file.close(io);
const file_len = std.math.cast(
usize,
diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig
index 59c0b42451..155dac6fb8 100644
--- a/lib/std/debug/SelfInfo/Elf.zig
+++ b/lib/std/debug/SelfInfo/Elf.zig
@@ -319,14 +319,14 @@ const Module = struct {
}
/// Assumes we already hold an exclusive lock.
- fn getLoadedElf(mod: *Module, gpa: Allocator) Error!*LoadedElf {
- if (mod.loaded_elf == null) mod.loaded_elf = loadElf(mod, gpa);
+ fn getLoadedElf(mod: *Module, gpa: Allocator, io: Io) Error!*LoadedElf {
+ if (mod.loaded_elf == null) mod.loaded_elf = loadElf(mod, gpa, io);
return if (mod.loaded_elf.?) |*elf| elf else |err| err;
}
- fn loadElf(mod: *Module, gpa: Allocator) Error!LoadedElf {
+ fn loadElf(mod: *Module, gpa: Allocator, io: Io) Error!LoadedElf {
const load_result = if (mod.name.len > 0) res: {
var file = std.fs.cwd().openFile(mod.name, .{}) catch return error.MissingDebugInfo;
- defer file.close();
+ defer file.close(io);
break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name));
} else res: {
const path = std.fs.selfExePathAlloc(gpa) catch |err| switch (err) {
@@ -335,7 +335,7 @@ const Module = struct {
};
defer gpa.free(path);
var file = std.fs.cwd().openFile(path, .{}) catch return error.MissingDebugInfo;
- defer file.close();
+ defer file.close(io);
break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path));
};
diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig
index dd11b4c8bf..2491cf416c 100644
--- a/lib/std/debug/SelfInfo/MachO.zig
+++ b/lib/std/debug/SelfInfo/MachO.zig
@@ -615,12 +615,12 @@ test {
}
/// Uses `mmap` to map the file at `path` into memory.
-fn mapDebugInfoFile(path: []const u8) ![]align(std.heap.page_size_min) const u8 {
+fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) const u8 {
const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return error.ReadFailed,
};
- defer file.close();
+ defer file.close(io);
const file_end_pos = file.getEndPos() catch |err| switch (err) {
error.Unexpected => |e| return e,
diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig
index 0e923a0f16..557f3901eb 100644
--- a/lib/std/debug/SelfInfo/Windows.zig
+++ b/lib/std/debug/SelfInfo/Windows.zig
@@ -207,11 +207,11 @@ const Module = struct {
file: fs.File,
section_handle: windows.HANDLE,
section_view: []const u8,
- fn deinit(mf: *const MappedFile) void {
+ fn deinit(mf: *const MappedFile, io: Io) void {
const process_handle = windows.GetCurrentProcess();
assert(windows.ntdll.NtUnmapViewOfSection(process_handle, @constCast(mf.section_view.ptr)) == .SUCCESS);
windows.CloseHandle(mf.section_handle);
- mf.file.close();
+ mf.file.close(io);
}
};
@@ -447,7 +447,7 @@ const Module = struct {
error.FileNotFound, error.IsDir => break :pdb null,
else => return error.ReadFailed,
};
- errdefer pdb_file.close();
+ errdefer pdb_file.close(io);
const pdb_reader = try arena.create(Io.File.Reader);
pdb_reader.* = pdb_file.reader(io, try arena.alloc(u8, 4096));
diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig
index a3490ed7db..c91056b0ab 100644
--- a/lib/std/dynamic_library.zig
+++ b/lib/std/dynamic_library.zig
@@ -1,10 +1,12 @@
-const std = @import("std.zig");
const builtin = @import("builtin");
+const native_os = builtin.os.tag;
+
+const std = @import("std.zig");
+const Io = std.Io;
const mem = std.mem;
const testing = std.testing;
const elf = std.elf;
const windows = std.os.windows;
-const native_os = builtin.os.tag;
const posix = std.posix;
/// Cross-platform dynamic library loading and symbol lookup.
@@ -38,8 +40,8 @@ pub const DynLib = struct {
}
/// Trusts the file.
- pub fn close(self: *DynLib) void {
- return self.inner.close();
+ pub fn close(self: *DynLib, io: Io) void {
+ return self.inner.close(io);
}
pub fn lookup(self: *DynLib, comptime T: type, name: [:0]const u8) ?T {
@@ -155,23 +157,23 @@ pub const ElfDynLib = struct {
dt_gnu_hash: *elf.gnu_hash.Header,
};
- fn openPath(path: []const u8) !std.fs.Dir {
+ fn openPath(path: []const u8, io: Io) !std.fs.Dir {
if (path.len == 0) return error.NotDir;
var parts = std.mem.tokenizeScalar(u8, path, '/');
var parent = if (path[0] == '/') try std.fs.cwd().openDir("/", .{}) else std.fs.cwd();
while (parts.next()) |part| {
const child = try parent.openDir(part, .{});
- parent.close();
+ parent.close(io);
parent = child;
}
return parent;
}
- fn resolveFromSearchPath(search_path: []const u8, file_name: []const u8, delim: u8) ?posix.fd_t {
+ fn resolveFromSearchPath(io: Io, search_path: []const u8, file_name: []const u8, delim: u8) ?posix.fd_t {
var paths = std.mem.tokenizeScalar(u8, search_path, delim);
while (paths.next()) |p| {
var dir = openPath(p) catch continue;
- defer dir.close();
+ defer dir.close(io);
const fd = posix.openat(dir.fd, file_name, .{
.ACCMODE = .RDONLY,
.CLOEXEC = true,
@@ -181,9 +183,9 @@ pub const ElfDynLib = struct {
return null;
}
- fn resolveFromParent(dir_path: []const u8, file_name: []const u8) ?posix.fd_t {
+ fn resolveFromParent(io: Io, dir_path: []const u8, file_name: []const u8) ?posix.fd_t {
var dir = std.fs.cwd().openDir(dir_path, .{}) catch return null;
- defer dir.close();
+ defer dir.close(io);
return posix.openat(dir.fd, file_name, .{
.ACCMODE = .RDONLY,
.CLOEXEC = true,
@@ -195,7 +197,7 @@ pub const ElfDynLib = struct {
// - DT_RPATH of the calling binary is not used as a search path
// - DT_RUNPATH of the calling binary is not used as a search path
// - /etc/ld.so.cache is not read
- fn resolveFromName(path_or_name: []const u8) !posix.fd_t {
+ fn resolveFromName(io: Io, path_or_name: []const u8) !posix.fd_t {
// If filename contains a slash ("/"), then it is interpreted as a (relative or absolute) pathname
if (std.mem.findScalarPos(u8, path_or_name, 0, '/')) |_| {
return posix.open(path_or_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
@@ -206,21 +208,21 @@ pub const ElfDynLib = struct {
std.os.linux.getegid() == std.os.linux.getgid())
{
if (posix.getenvZ("LD_LIBRARY_PATH")) |ld_library_path| {
- if (resolveFromSearchPath(ld_library_path, path_or_name, ':')) |fd| {
+ if (resolveFromSearchPath(io, ld_library_path, path_or_name, ':')) |fd| {
return fd;
}
}
}
// Lastly the directories /lib and /usr/lib are searched (in this exact order)
- if (resolveFromParent("/lib", path_or_name)) |fd| return fd;
- if (resolveFromParent("/usr/lib", path_or_name)) |fd| return fd;
+ if (resolveFromParent(io, "/lib", path_or_name)) |fd| return fd;
+ if (resolveFromParent(io, "/usr/lib", path_or_name)) |fd| return fd;
return error.FileNotFound;
}
/// Trusts the file. Malicious file will be able to execute arbitrary code.
- pub fn open(path: []const u8) Error!ElfDynLib {
- const fd = try resolveFromName(path);
+ pub fn open(io: Io, path: []const u8) Error!ElfDynLib {
+ const fd = try resolveFromName(io, path);
defer posix.close(fd);
const file: std.fs.File = .{ .handle = fd };
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 72fab9c7c2..9472e5d2a5 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -227,7 +227,7 @@ pub fn deleteFileAbsolute(absolute_path: []const u8) Dir.DeleteFileError!void {
/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `absolute_path` should be encoded as valid UTF-8.
/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
-pub fn deleteTreeAbsolute(absolute_path: []const u8) !void {
+pub fn deleteTreeAbsolute(io: Io, absolute_path: []const u8) !void {
assert(path.isAbsolute(absolute_path));
const dirname = path.dirname(absolute_path) orelse return error{
/// Attempt to remove the root file system path.
@@ -236,7 +236,7 @@ pub fn deleteTreeAbsolute(absolute_path: []const u8) !void {
}.CannotDeleteRootDirectory;
var dir = try cwd().openDir(dirname, .{});
- defer dir.close();
+ defer dir.close(io);
return dir.deleteTree(path.basename(absolute_path));
}
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 52baa0699d..15b8e9b558 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -178,6 +178,8 @@ fn setupSymlinkAbsolute(target: []const u8, link: []const u8, flags: SymLinkFlag
}
test "Dir.readLink" {
+ const io = testing.io;
+
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
// Create some targets
@@ -208,7 +210,7 @@ test "Dir.readLink" {
const parent_file = ".." ++ fs.path.sep_str ++ "target.txt";
const canonical_parent_file = try ctx.toCanonicalPathSep(parent_file);
var subdir = try ctx.dir.makeOpenPath("subdir", .{});
- defer subdir.close();
+ defer subdir.close(io);
try setupSymlink(subdir, canonical_parent_file, "relative-link.txt", .{});
try testReadLink(subdir, canonical_parent_file, "relative-link.txt");
if (builtin.os.tag == .windows) {
@@ -268,6 +270,8 @@ fn testReadLinkAbsolute(target_path: []const u8, symlink_path: []const u8) !void
}
test "File.stat on a File that is a symlink returns Kind.sym_link" {
+ const io = testing.io;
+
// This test requires getting a file descriptor of a symlink which
// is not possible on all targets
switch (builtin.target.os.tag) {
@@ -302,7 +306,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" {
.SecurityDescriptor = null,
.SecurityQualityOfService = null,
};
- var io: windows.IO_STATUS_BLOCK = undefined;
+ var io_status_block: windows.IO_STATUS_BLOCK = undefined;
const rc = windows.ntdll.NtCreateFile(
&handle,
.{
@@ -317,7 +321,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" {
},
},
&attr,
- &io,
+ &io_status_block,
null,
.{ .NORMAL = true },
.VALID_FLAGS,
@@ -352,7 +356,7 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" {
},
else => unreachable,
};
- defer symlink.close();
+ defer symlink.close(io);
const stat = try symlink.stat();
try testing.expectEqual(File.Kind.sym_link, stat.kind);
@@ -361,6 +365,8 @@ test "File.stat on a File that is a symlink returns Kind.sym_link" {
}
test "openDir" {
+ const io = testing.io;
+
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const allocator = ctx.arena.allocator();
@@ -370,7 +376,7 @@ test "openDir" {
for ([_][]const u8{ "", ".", ".." }) |sub_path| {
const dir_path = try fs.path.join(allocator, &.{ subdir_path, sub_path });
var dir = try ctx.dir.openDir(dir_path, .{});
- defer dir.close();
+ defer dir.close(io);
}
}
}.impl);
@@ -393,6 +399,8 @@ test "openDirAbsolute" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -404,7 +412,7 @@ test "openDirAbsolute" {
// Can open sub_path
var tmp_sub = try fs.openDirAbsolute(sub_path, .{});
- defer tmp_sub.close();
+ defer tmp_sub.close(io);
const sub_ino = (try tmp_sub.stat()).inode;
@@ -414,7 +422,7 @@ test "openDirAbsolute" {
defer testing.allocator.free(dir_path);
var dir = try fs.openDirAbsolute(dir_path, .{});
- defer dir.close();
+ defer dir.close(io);
const ino = (try dir.stat()).inode;
try testing.expectEqual(tmp_ino, ino);
@@ -426,7 +434,7 @@ test "openDirAbsolute" {
defer testing.allocator.free(dir_path);
var dir = try fs.openDirAbsolute(dir_path, .{});
- defer dir.close();
+ defer dir.close(io);
const ino = (try dir.stat()).inode;
try testing.expectEqual(sub_ino, ino);
@@ -438,7 +446,7 @@ test "openDirAbsolute" {
defer testing.allocator.free(dir_path);
var dir = try fs.openDirAbsolute(dir_path, .{});
- defer dir.close();
+ defer dir.close(io);
const ino = (try dir.stat()).inode;
try testing.expectEqual(tmp_ino, ino);
@@ -446,13 +454,15 @@ test "openDirAbsolute" {
}
test "openDir cwd parent '..'" {
+ const io = testing.io;
+
var dir = fs.cwd().openDir("..", .{}) catch |err| {
if (native_os == .wasi and err == error.PermissionDenied) {
return; // This is okay. WASI disallows escaping from the fs sandbox
}
return err;
};
- defer dir.close();
+ defer dir.close(io);
}
test "openDir non-cwd parent '..'" {
@@ -461,14 +471,16 @@ test "openDir non-cwd parent '..'" {
else => {},
}
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
var subdir = try tmp.dir.makeOpenPath("subdir", .{});
- defer subdir.close();
+ defer subdir.close(io);
var dir = try subdir.openDir("..", .{});
- defer dir.close();
+ defer dir.close(io);
const expected_path = try tmp.dir.realpathAlloc(testing.allocator, ".");
defer testing.allocator.free(expected_path);
@@ -516,12 +528,14 @@ test "readLinkAbsolute" {
}
test "Dir.Iterator" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
// First, create a couple of entries to iterate over.
const file = try tmp_dir.dir.createFile("some_file", .{});
- file.close();
+ file.close(io);
try tmp_dir.dir.makeDir("some_dir");
@@ -546,6 +560,8 @@ test "Dir.Iterator" {
}
test "Dir.Iterator many entries" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
@@ -555,7 +571,7 @@ test "Dir.Iterator many entries" {
while (i < num) : (i += 1) {
const name = try std.fmt.bufPrint(&buf, "{}", .{i});
const file = try tmp_dir.dir.createFile(name, .{});
- file.close();
+ file.close(io);
}
var arena = ArenaAllocator.init(testing.allocator);
@@ -581,12 +597,14 @@ test "Dir.Iterator many entries" {
}
test "Dir.Iterator twice" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
// First, create a couple of entries to iterate over.
const file = try tmp_dir.dir.createFile("some_file", .{});
- file.close();
+ file.close(io);
try tmp_dir.dir.makeDir("some_dir");
@@ -614,12 +632,14 @@ test "Dir.Iterator twice" {
}
test "Dir.Iterator reset" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{ .iterate = true });
defer tmp_dir.cleanup();
// First, create a couple of entries to iterate over.
const file = try tmp_dir.dir.createFile("some_file", .{});
- file.close();
+ file.close(io);
try tmp_dir.dir.makeDir("some_dir");
@@ -650,12 +670,14 @@ test "Dir.Iterator reset" {
}
test "Dir.Iterator but dir is deleted during iteration" {
+ const io = testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
// Create directory and setup an iterator for it
var subdir = try tmp.dir.makeOpenPath("subdir", .{ .iterate = true });
- defer subdir.close();
+ defer subdir.close(io);
var iterator = subdir.iterate();
@@ -742,11 +764,13 @@ test "Dir.realpath smoke test" {
}
test "readFileAlloc" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
- defer file.close();
+ defer file.close(io);
const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024));
defer testing.allocator.free(buf1);
@@ -815,10 +839,12 @@ test "statFile on dangling symlink" {
test "directory operations on files" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
+
const test_file_name = try ctx.transformPath("test_file");
var file = try ctx.dir.createFile(test_file_name, .{ .read = true });
- file.close();
+ file.close(io);
try testing.expectError(error.PathAlreadyExists, ctx.dir.makeDir(test_file_name));
try testing.expectError(error.NotDir, ctx.dir.openDir(test_file_name, .{}));
@@ -833,7 +859,7 @@ test "directory operations on files" {
file = try ctx.dir.openFile(test_file_name, .{});
const stat = try file.stat();
try testing.expectEqual(File.Kind.file, stat.kind);
- file.close();
+ file.close(io);
}
}.impl);
}
@@ -842,6 +868,8 @@ test "file operations on directories" {
// TODO: fix this test on FreeBSD. https://github.com/ziglang/zig/issues/1759
if (native_os == .freebsd) return error.SkipZigTest;
+ const io = testing.io;
+
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_dir_name = try ctx.transformPath("test_dir");
@@ -869,7 +897,7 @@ test "file operations on directories" {
if (native_os == .wasi and builtin.link_libc) {
// wasmtime unexpectedly succeeds here, see https://github.com/ziglang/zig/issues/20747
const handle = try ctx.dir.openFile(test_dir_name, .{ .mode = .read_write });
- handle.close();
+ handle.close(io);
} else {
// Note: The `.mode = .read_write` is necessary to ensure the error occurs on all platforms.
// TODO: Add a read-only test as well, see https://github.com/ziglang/zig/issues/5732
@@ -883,21 +911,23 @@ test "file operations on directories" {
// ensure the directory still exists as a sanity check
var dir = try ctx.dir.openDir(test_dir_name, .{});
- dir.close();
+ dir.close(io);
}
}.impl);
}
test "makeOpenPath parent dirs do not exist" {
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
var dir = try tmp_dir.dir.makeOpenPath("root_dir/parent_dir/some_dir", .{});
- dir.close();
+ dir.close(io);
// double check that the full directory structure was created
var dir_verification = try tmp_dir.dir.openDir("root_dir/parent_dir/some_dir", .{});
- dir_verification.close();
+ dir_verification.close(io);
}
test "deleteDir" {
@@ -924,6 +954,7 @@ test "deleteDir" {
test "Dir.rename files" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
// Rename on Windows can hit intermittent AccessDenied errors
// when certain conditions are true about the host system.
// For now, skip this test when the path type is UNC to avoid them.
@@ -939,13 +970,13 @@ test "Dir.rename files" {
const test_file_name = try ctx.transformPath("test_file");
const renamed_test_file_name = try ctx.transformPath("test_file_renamed");
var file = try ctx.dir.createFile(test_file_name, .{ .read = true });
- file.close();
+ file.close(io);
try ctx.dir.rename(test_file_name, renamed_test_file_name);
// Ensure the file was renamed
try testing.expectError(error.FileNotFound, ctx.dir.openFile(test_file_name, .{}));
file = try ctx.dir.openFile(renamed_test_file_name, .{});
- file.close();
+ file.close(io);
// Rename to self succeeds
try ctx.dir.rename(renamed_test_file_name, renamed_test_file_name);
@@ -953,12 +984,12 @@ test "Dir.rename files" {
// Rename to existing file succeeds
const existing_file_path = try ctx.transformPath("existing_file");
var existing_file = try ctx.dir.createFile(existing_file_path, .{ .read = true });
- existing_file.close();
+ existing_file.close(io);
try ctx.dir.rename(renamed_test_file_name, existing_file_path);
try testing.expectError(error.FileNotFound, ctx.dir.openFile(renamed_test_file_name, .{}));
file = try ctx.dir.openFile(existing_file_path, .{});
- file.close();
+ file.close(io);
}
}.impl);
}
@@ -966,6 +997,8 @@ test "Dir.rename files" {
test "Dir.rename directories" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
+
// Rename on Windows can hit intermittent AccessDenied errors
// when certain conditions are true about the host system.
// For now, skip this test when the path type is UNC to avoid them.
@@ -985,8 +1018,8 @@ test "Dir.rename directories" {
// Put a file in the directory
var file = try dir.createFile("test_file", .{ .read = true });
- file.close();
- dir.close();
+ file.close(io);
+ dir.close(io);
const test_dir_renamed_again_path = try ctx.transformPath("test_dir_renamed_again");
try ctx.dir.rename(test_dir_renamed_path, test_dir_renamed_again_path);
@@ -995,8 +1028,8 @@ test "Dir.rename directories" {
try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_renamed_path, .{}));
dir = try ctx.dir.openDir(test_dir_renamed_again_path, .{});
file = try dir.openFile("test_file", .{});
- file.close();
- dir.close();
+ file.close(io);
+ dir.close(io);
}
}.impl);
}
@@ -1007,6 +1040,8 @@ test "Dir.rename directory onto empty dir" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
+
const test_dir_path = try ctx.transformPath("test_dir");
const target_dir_path = try ctx.transformPath("target_dir_path");
@@ -1017,7 +1052,7 @@ test "Dir.rename directory onto empty dir" {
// Ensure the directory was renamed
try testing.expectError(error.FileNotFound, ctx.dir.openDir(test_dir_path, .{}));
var dir = try ctx.dir.openDir(target_dir_path, .{});
- dir.close();
+ dir.close(io);
}
}.impl);
}
@@ -1028,6 +1063,7 @@ test "Dir.rename directory onto non-empty dir" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const test_dir_path = try ctx.transformPath("test_dir");
const target_dir_path = try ctx.transformPath("target_dir_path");
@@ -1035,15 +1071,15 @@ test "Dir.rename directory onto non-empty dir" {
var target_dir = try ctx.dir.makeOpenPath(target_dir_path, .{});
var file = try target_dir.createFile("test_file", .{ .read = true });
- file.close();
- target_dir.close();
+ file.close(io);
+ target_dir.close(io);
// Rename should fail with PathAlreadyExists if target_dir is non-empty
try testing.expectError(error.PathAlreadyExists, ctx.dir.rename(test_dir_path, target_dir_path));
// Ensure the directory was not renamed
var dir = try ctx.dir.openDir(test_dir_path, .{});
- dir.close();
+ dir.close(io);
}
}.impl);
}
@@ -1054,11 +1090,12 @@ test "Dir.rename file <-> dir" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const test_file_path = try ctx.transformPath("test_file");
const test_dir_path = try ctx.transformPath("test_dir");
var file = try ctx.dir.createFile(test_file_path, .{ .read = true });
- file.close();
+ file.close(io);
try ctx.dir.makeDir(test_dir_path);
try testing.expectError(error.IsDir, ctx.dir.rename(test_file_path, test_dir_path));
try testing.expectError(error.NotDir, ctx.dir.rename(test_dir_path, test_file_path));
@@ -1067,6 +1104,8 @@ test "Dir.rename file <-> dir" {
}
test "rename" {
+ const io = testing.io;
+
var tmp_dir1 = tmpDir(.{});
defer tmp_dir1.cleanup();
@@ -1077,19 +1116,21 @@ test "rename" {
const test_file_name = "test_file";
const renamed_test_file_name = "test_file_renamed";
var file = try tmp_dir1.dir.createFile(test_file_name, .{ .read = true });
- file.close();
+ file.close(io);
try fs.rename(tmp_dir1.dir, test_file_name, tmp_dir2.dir, renamed_test_file_name);
// ensure the file was renamed
try testing.expectError(error.FileNotFound, tmp_dir1.dir.openFile(test_file_name, .{}));
file = try tmp_dir2.dir.openFile(renamed_test_file_name, .{});
- file.close();
+ file.close(io);
}
test "renameAbsolute" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
@@ -1109,7 +1150,7 @@ test "renameAbsolute" {
const test_file_name = "test_file";
const renamed_test_file_name = "test_file_renamed";
var file = try tmp_dir.dir.createFile(test_file_name, .{ .read = true });
- file.close();
+ file.close(io);
try fs.renameAbsolute(
try fs.path.join(allocator, &.{ base_path, test_file_name }),
try fs.path.join(allocator, &.{ base_path, renamed_test_file_name }),
@@ -1120,7 +1161,7 @@ test "renameAbsolute" {
file = try tmp_dir.dir.openFile(renamed_test_file_name, .{});
const stat = try file.stat();
try testing.expectEqual(File.Kind.file, stat.kind);
- file.close();
+ file.close(io);
// Renaming directories
const test_dir_name = "test_dir";
@@ -1134,14 +1175,16 @@ test "renameAbsolute" {
// ensure the directory was renamed
try testing.expectError(error.FileNotFound, tmp_dir.dir.openDir(test_dir_name, .{}));
var dir = try tmp_dir.dir.openDir(renamed_test_dir_name, .{});
- dir.close();
+ dir.close(io);
}
test "openSelfExe" {
if (native_os == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
const self_exe_file = try std.fs.openSelfExe(.{});
- self_exe_file.close();
+ self_exe_file.close(io);
}
test "selfExePath" {
@@ -1155,13 +1198,15 @@ test "selfExePath" {
}
test "deleteTree does not follow symlinks" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
try tmp.dir.makePath("b");
{
var a = try tmp.dir.makeOpenPath("a", .{});
- defer a.close();
+ defer a.close(io);
try setupSymlink(a, "../b", "b", .{ .is_directory = true });
}
@@ -1257,27 +1302,31 @@ test "makePath but sub_path contains pre-existing file" {
try testing.expectError(error.NotDir, tmp.dir.makePath("foo/bar/baz"));
}
-fn expectDir(dir: Dir, path: []const u8) !void {
+fn expectDir(io: Io, dir: Dir, path: []const u8) !void {
var d = try dir.openDir(path, .{});
- d.close();
+ d.close(io);
}
test "makepath existing directories" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
try tmp.dir.makeDir("A");
var tmpA = try tmp.dir.openDir("A", .{});
- defer tmpA.close();
+ defer tmpA.close(io);
try tmpA.makeDir("B");
const testPath = "A" ++ fs.path.sep_str ++ "B" ++ fs.path.sep_str ++ "C";
try tmp.dir.makePath(testPath);
- try expectDir(tmp.dir, testPath);
+ try expectDir(io, tmp.dir, testPath);
}
test "makepath through existing valid symlink" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1286,10 +1335,12 @@ test "makepath through existing valid symlink" {
try tmp.dir.makePath("working-symlink" ++ fs.path.sep_str ++ "in-realfolder");
- try expectDir(tmp.dir, "realfolder" ++ fs.path.sep_str ++ "in-realfolder");
+ try expectDir(io, tmp.dir, "realfolder" ++ fs.path.sep_str ++ "in-realfolder");
}
test "makepath relative walks" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1305,21 +1356,23 @@ test "makepath relative walks" {
.windows => {
// On Windows, .. is resolved before passing the path to NtCreateFile,
// meaning everything except `first/C` drops out.
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "C");
+ try expectDir(io, tmp.dir, "first" ++ fs.path.sep_str ++ "C");
try testing.expectError(error.FileNotFound, tmp.dir.access("second", .{}));
try testing.expectError(error.FileNotFound, tmp.dir.access("third", .{}));
},
else => {
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "A");
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "B");
- try expectDir(tmp.dir, "first" ++ fs.path.sep_str ++ "C");
- try expectDir(tmp.dir, "second");
- try expectDir(tmp.dir, "third");
+ try expectDir(io, tmp.dir, "first" ++ fs.path.sep_str ++ "A");
+ try expectDir(io, tmp.dir, "first" ++ fs.path.sep_str ++ "B");
+ try expectDir(io, tmp.dir, "first" ++ fs.path.sep_str ++ "C");
+ try expectDir(io, tmp.dir, "second");
+ try expectDir(io, tmp.dir, "third");
},
}
}
test "makepath ignores '.'" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1337,14 +1390,14 @@ test "makepath ignores '.'" {
try tmp.dir.makePath(dotPath);
- try expectDir(tmp.dir, expectedPath);
+ try expectDir(io, tmp.dir, expectedPath);
}
-fn testFilenameLimits(iterable_dir: Dir, maxed_filename: []const u8) !void {
+fn testFilenameLimits(io: Io, iterable_dir: Dir, maxed_filename: []const u8) !void {
// setup, create a dir and a nested file both with maxed filenames, and walk the dir
{
var maxed_dir = try iterable_dir.makeOpenPath(maxed_filename, .{});
- defer maxed_dir.close();
+ defer maxed_dir.close(io);
try maxed_dir.writeFile(.{ .sub_path = maxed_filename, .data = "" });
@@ -1364,6 +1417,8 @@ fn testFilenameLimits(iterable_dir: Dir, maxed_filename: []const u8) !void {
}
test "max file name component lengths" {
+ const io = testing.io;
+
var tmp = tmpDir(.{ .iterate = true });
defer tmp.cleanup();
@@ -1371,16 +1426,16 @@ test "max file name component lengths" {
// U+FFFF is the character with the largest code point that is encoded as a single
// UTF-16 code unit, so Windows allows for NAME_MAX of them.
const maxed_windows_filename = ("\u{FFFF}".*) ** windows.NAME_MAX;
- try testFilenameLimits(tmp.dir, &maxed_windows_filename);
+ try testFilenameLimits(io, tmp.dir, &maxed_windows_filename);
} else if (native_os == .wasi) {
// On WASI, the maxed filename depends on the host OS, so in order for this test to
// work on any host, we need to use a length that will work for all platforms
// (i.e. the minimum max_name_bytes of all supported platforms).
const maxed_wasi_filename = [_]u8{'1'} ** 255;
- try testFilenameLimits(tmp.dir, &maxed_wasi_filename);
+ try testFilenameLimits(io, tmp.dir, &maxed_wasi_filename);
} else {
const maxed_ascii_filename = [_]u8{'1'} ** std.fs.max_name_bytes;
- try testFilenameLimits(tmp.dir, &maxed_ascii_filename);
+ try testFilenameLimits(io, tmp.dir, &maxed_ascii_filename);
}
}
@@ -1399,7 +1454,7 @@ test "writev, readv" {
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
- defer src_file.close();
+ defer src_file.close(io);
var writer = src_file.writerStreaming(&.{});
@@ -1429,7 +1484,7 @@ test "pwritev, preadv" {
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
- defer src_file.close();
+ defer src_file.close(io);
var writer = src_file.writer(&.{});
@@ -1459,7 +1514,7 @@ test "setEndPos" {
const file_name = "afile.txt";
try tmp.dir.writeFile(.{ .sub_path = file_name, .data = "ninebytes" });
const f = try tmp.dir.openFile(file_name, .{ .mode = .read_write });
- defer f.close();
+ defer f.close(io);
const initial_size = try f.getEndPos();
var buffer: [32]u8 = undefined;
@@ -1522,21 +1577,21 @@ test "sendfile" {
try tmp.dir.makePath("os_test_tmp");
var dir = try tmp.dir.openDir("os_test_tmp", .{});
- defer dir.close();
+ defer dir.close(io);
const line1 = "line1\n";
const line2 = "second line\n";
var vecs = [_][]const u8{ line1, line2 };
var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
- defer src_file.close();
+ defer src_file.close(io);
{
var fw = src_file.writer(&.{});
try fw.interface.writeVecAll(&vecs);
}
var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
- defer dest_file.close();
+ defer dest_file.close(io);
const header1 = "header1\n";
const header2 = "second header\n";
@@ -1569,15 +1624,15 @@ test "sendfile with buffered data" {
try tmp.dir.makePath("os_test_tmp");
var dir = try tmp.dir.openDir("os_test_tmp", .{});
- defer dir.close();
+ defer dir.close(io);
var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
- defer src_file.close();
+ defer src_file.close(io);
try src_file.writeAll("AAAABBBB");
var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
- defer dest_file.close();
+ defer dest_file.close(io);
var src_buffer: [32]u8 = undefined;
var file_reader = src_file.reader(io, &src_buffer);
@@ -1659,10 +1714,11 @@ test "open file with exclusive nonblocking lock twice" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
- defer file1.close();
+ defer file1.close(io);
const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
try testing.expectError(error.WouldBlock, file2);
@@ -1675,10 +1731,11 @@ test "open file with shared and exclusive nonblocking lock" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
const file1 = try ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true });
- defer file1.close();
+ defer file1.close(io);
const file2 = ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
try testing.expectError(error.WouldBlock, file2);
@@ -1691,10 +1748,11 @@ test "open file with exclusive and shared nonblocking lock" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
const file1 = try ctx.dir.createFile(filename, .{ .lock = .exclusive, .lock_nonblocking = true });
- defer file1.close();
+ defer file1.close(io);
const file2 = ctx.dir.createFile(filename, .{ .lock = .shared, .lock_nonblocking = true });
try testing.expectError(error.WouldBlock, file2);
@@ -1707,10 +1765,11 @@ test "open file with exclusive lock twice, make sure second lock waits" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("file_lock_test.txt");
const file = try ctx.dir.createFile(filename, .{ .lock = .exclusive });
- errdefer file.close();
+ errdefer file.close(io);
const S = struct {
fn checkFn(dir: *fs.Dir, path: []const u8, started: *std.Thread.ResetEvent, locked: *std.Thread.ResetEvent) !void {
@@ -1718,7 +1777,7 @@ test "open file with exclusive lock twice, make sure second lock waits" {
const file1 = try dir.createFile(path, .{ .lock = .exclusive });
locked.set();
- file1.close();
+ file1.close(io);
}
};
@@ -1739,7 +1798,7 @@ test "open file with exclusive lock twice, make sure second lock waits" {
try testing.expectError(error.Timeout, locked.timedWait(10 * std.time.ns_per_ms));
// Release the file lock which should unlock the thread to lock it and set the locked event.
- file.close();
+ file.close(io);
locked.wait();
}
}.impl);
@@ -1748,6 +1807,8 @@ test "open file with exclusive lock twice, make sure second lock waits" {
test "open file with exclusive nonblocking lock twice (absolute paths)" {
if (native_os == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
var random_bytes: [12]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
@@ -1774,18 +1835,19 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" {
.lock = .exclusive,
.lock_nonblocking = true,
});
- file1.close();
+ file1.close(io);
try testing.expectError(error.WouldBlock, file2);
}
test "read from locked file" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
+ const io = ctx.io;
const filename = try ctx.transformPath("read_lock_file_test.txt");
{
const f = try ctx.dir.createFile(filename, .{ .read = true });
- defer f.close();
+ defer f.close(io);
var buffer: [1]u8 = undefined;
_ = try f.read(&buffer);
}
@@ -1794,9 +1856,9 @@ test "read from locked file" {
.read = true,
.lock = .exclusive,
});
- defer f.close();
+ defer f.close(io);
const f2 = try ctx.dir.openFile(filename, .{});
- defer f2.close();
+ defer f2.close(io);
var buffer: [1]u8 = undefined;
if (builtin.os.tag == .windows) {
try std.testing.expectError(error.LockViolation, f2.read(&buffer));
@@ -1809,6 +1871,8 @@ test "read from locked file" {
}
test "walker" {
+ const io = testing.io;
+
var tmp = tmpDir(.{ .iterate = true });
defer tmp.cleanup();
@@ -1857,13 +1921,15 @@ test "walker" {
};
// make sure that the entry.dir is the containing dir
var entry_dir = try entry.dir.openDir(entry.basename, .{});
- defer entry_dir.close();
+ defer entry_dir.close(io);
num_walked += 1;
}
try testing.expectEqual(expected_paths.kvs.len, num_walked);
}
test "selective walker, skip entries that start with ." {
+ const io = testing.io;
+
var tmp = tmpDir(.{ .iterate = true });
defer tmp.cleanup();
@@ -1923,7 +1989,7 @@ test "selective walker, skip entries that start with ." {
// make sure that the entry.dir is the containing dir
var entry_dir = try entry.dir.openDir(entry.basename, .{});
- defer entry_dir.close();
+ defer entry_dir.close(io);
num_walked += 1;
}
try testing.expectEqual(expected_paths.kvs.len, num_walked);
@@ -1968,16 +2034,16 @@ test "'.' and '..' in fs.Dir functions" {
try ctx.dir.makeDir(subdir_path);
try ctx.dir.access(subdir_path, .{});
var created_subdir = try ctx.dir.openDir(subdir_path, .{});
- created_subdir.close();
+ created_subdir.close(io);
const created_file = try ctx.dir.createFile(file_path, .{});
- created_file.close();
+ created_file.close(io);
try ctx.dir.access(file_path, .{});
try ctx.dir.copyFile(file_path, ctx.dir, copy_path, .{});
try ctx.dir.rename(copy_path, rename_path);
const renamed_file = try ctx.dir.openFile(rename_path, .{});
- renamed_file.close();
+ renamed_file.close(io);
try ctx.dir.deleteFile(rename_path);
try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" });
@@ -1994,6 +2060,8 @@ test "'.' and '..' in absolute functions" {
if (native_os == .wasi) return error.SkipZigTest;
if (native_os == .openbsd) return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -2007,11 +2075,11 @@ test "'.' and '..' in absolute functions" {
try fs.makeDirAbsolute(subdir_path);
try fs.accessAbsolute(subdir_path, .{});
var created_subdir = try fs.openDirAbsolute(subdir_path, .{});
- created_subdir.close();
+ created_subdir.close(io);
const created_file_path = try fs.path.join(allocator, &.{ subdir_path, "../file" });
const created_file = try fs.createFileAbsolute(created_file_path, .{});
- created_file.close();
+ created_file.close(io);
try fs.accessAbsolute(created_file_path, .{});
const copied_file_path = try fs.path.join(allocator, &.{ subdir_path, "../copy" });
@@ -2019,7 +2087,7 @@ test "'.' and '..' in absolute functions" {
const renamed_file_path = try fs.path.join(allocator, &.{ subdir_path, "../rename" });
try fs.renameAbsolute(copied_file_path, renamed_file_path);
const renamed_file = try fs.openFileAbsolute(renamed_file_path, .{});
- renamed_file.close();
+ renamed_file.close(io);
try fs.deleteFileAbsolute(renamed_file_path);
try fs.deleteDirAbsolute(subdir_path);
@@ -2029,11 +2097,13 @@ test "chmod" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const file = try tmp.dir.createFile("test_file", .{ .mode = 0o600 });
- defer file.close();
+ defer file.close(io);
try testing.expectEqual(@as(File.Mode, 0o600), (try file.stat()).mode & 0o7777);
try file.chmod(0o644);
@@ -2041,7 +2111,7 @@ test "chmod" {
try tmp.dir.makeDir("test_dir");
var dir = try tmp.dir.openDir("test_dir", .{ .iterate = true });
- defer dir.close();
+ defer dir.close(io);
try dir.chmod(0o700);
try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat()).mode & 0o7777);
@@ -2051,17 +2121,19 @@ test "chown" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const file = try tmp.dir.createFile("test_file", .{});
- defer file.close();
+ defer file.close(io);
try file.chown(null, null);
try tmp.dir.makeDir("test_dir");
var dir = try tmp.dir.openDir("test_dir", .{ .iterate = true });
- defer dir.close();
+ defer dir.close(io);
try dir.chown(null, null);
}
@@ -2157,7 +2229,7 @@ test "read file non vectored" {
const contents = "hello, world!\n";
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ defer file.close(io);
{
var file_writer: std.fs.File.Writer = .init(file, &.{});
try file_writer.interface.writeAll(contents);
@@ -2189,7 +2261,7 @@ test "seek keeping partial buffer" {
const contents = "0123456789";
const file = try tmp_dir.dir.createFile("input.txt", .{ .read = true });
- defer file.close();
+ defer file.close(io);
{
var file_writer: std.fs.File.Writer = .init(file, &.{});
try file_writer.interface.writeAll(contents);
@@ -2231,7 +2303,7 @@ test "seekBy" {
try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" });
const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only });
- defer f.close();
+ defer f.close(io);
var reader = f.readerStreaming(io, &.{});
try reader.seekBy(2);
@@ -2250,7 +2322,7 @@ test "seekTo flushes buffered data" {
const contents = "data";
const file = try tmp.dir.createFile("seek.bin", .{ .read = true });
- defer file.close();
+ defer file.close(io);
{
var buf: [16]u8 = undefined;
var file_writer = std.fs.File.writer(file, &buf);
@@ -2277,9 +2349,9 @@ test "File.Writer sendfile with buffered contents" {
{
try tmp_dir.dir.writeFile(.{ .sub_path = "a", .data = "bcd" });
const in = try tmp_dir.dir.openFile("a", .{});
- defer in.close();
+ defer in.close(io);
const out = try tmp_dir.dir.createFile("b", .{});
- defer out.close();
+ defer out.close(io);
var in_buf: [2]u8 = undefined;
var in_r = in.reader(io, &in_buf);
@@ -2294,7 +2366,7 @@ test "File.Writer sendfile with buffered contents" {
}
var check = try tmp_dir.dir.openFile("b", .{});
- defer check.close();
+ defer check.close(io);
var check_buf: [4]u8 = undefined;
var check_r = check.reader(io, &check_buf);
try testing.expectEqualStrings("abcd", try check_r.interface.take(4));
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 10ab23f476..7f8c9827ef 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -1473,6 +1473,8 @@ pub const ConnectUnixError = Allocator.Error || std.posix.SocketError || error{N
///
/// This function is threadsafe.
pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*Connection {
+ const io = client.io;
+
if (client.connection_pool.findConnection(.{
.host = path,
.port = 0,
@@ -1485,7 +1487,7 @@ pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*Connecti
conn.* = .{ .data = undefined };
const stream = try Io.net.connectUnixSocket(path);
- errdefer stream.close();
+ errdefer stream.close(io);
conn.data = .{
.stream = stream,
diff --git a/lib/std/os/linux/IoUring.zig b/lib/std/os/linux/IoUring.zig
index c927dab376..e4a5bd3738 100644
--- a/lib/std/os/linux/IoUring.zig
+++ b/lib/std/os/linux/IoUring.zig
@@ -1,13 +1,16 @@
const IoUring = @This();
-const std = @import("std");
+
const builtin = @import("builtin");
+const is_linux = builtin.os.tag == .linux;
+
+const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const mem = std.mem;
const net = std.Io.net;
const posix = std.posix;
const linux = std.os.linux;
const testing = std.testing;
-const is_linux = builtin.os.tag == .linux;
const page_size_min = std.heap.page_size_min;
fd: linux.fd_t = -1,
@@ -1975,6 +1978,8 @@ test "readv" {
test "writev/fsync/readv" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(4, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -1987,7 +1992,7 @@ test "writev/fsync/readv" {
const path = "test_io_uring_writev_fsync_readv";
const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
- defer file.close();
+ defer file.close(io);
const fd = file.handle;
const buffer_write = [_]u8{42} ** 128;
@@ -2045,6 +2050,8 @@ test "writev/fsync/readv" {
test "write/read" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(2, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2056,7 +2063,7 @@ test "write/read" {
defer tmp.cleanup();
const path = "test_io_uring_write_read";
const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
- defer file.close();
+ defer file.close(io);
const fd = file.handle;
const buffer_write = [_]u8{97} ** 20;
@@ -2092,6 +2099,8 @@ test "write/read" {
test "splice/read" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(4, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2102,12 +2111,12 @@ test "splice/read" {
var tmp = std.testing.tmpDir(.{});
const path_src = "test_io_uring_splice_src";
const file_src = try tmp.dir.createFile(path_src, .{ .read = true, .truncate = true });
- defer file_src.close();
+ defer file_src.close(io);
const fd_src = file_src.handle;
const path_dst = "test_io_uring_splice_dst";
const file_dst = try tmp.dir.createFile(path_dst, .{ .read = true, .truncate = true });
- defer file_dst.close();
+ defer file_dst.close(io);
const fd_dst = file_dst.handle;
const buffer_write = [_]u8{97} ** 20;
@@ -2163,6 +2172,8 @@ test "splice/read" {
test "write_fixed/read_fixed" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(2, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2175,7 +2186,7 @@ test "write_fixed/read_fixed" {
const path = "test_io_uring_write_read_fixed";
const file = try tmp.dir.createFile(path, .{ .read = true, .truncate = true });
- defer file.close();
+ defer file.close(io);
const fd = file.handle;
var raw_buffers: [2][11]u8 = undefined;
@@ -2282,6 +2293,8 @@ test "openat" {
test "close" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2294,7 +2307,7 @@ test "close" {
const path = "test_io_uring_close";
const file = try tmp.dir.createFile(path, .{});
- errdefer file.close();
+ errdefer file.close(io);
const sqe_close = try ring.close(0x44444444, file.handle);
try testing.expectEqual(linux.IORING_OP.CLOSE, sqe_close.opcode);
@@ -2313,6 +2326,8 @@ test "close" {
test "accept/connect/send/recv" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2321,7 +2336,7 @@ test "accept/connect/send/recv" {
defer ring.deinit();
const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
+ defer socket_test_harness.close(io);
const buffer_send = [_]u8{ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
@@ -2573,6 +2588,8 @@ test "timeout_remove" {
test "accept/connect/recv/link_timeout" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2581,7 +2598,7 @@ test "accept/connect/recv/link_timeout" {
defer ring.deinit();
const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
+ defer socket_test_harness.close(io);
var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
@@ -2622,6 +2639,8 @@ test "accept/connect/recv/link_timeout" {
test "fallocate" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2634,7 +2653,7 @@ test "fallocate" {
const path = "test_io_uring_fallocate";
const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
+ defer file.close(io);
try testing.expectEqual(@as(u64, 0), (try file.stat()).size);
@@ -2668,6 +2687,8 @@ test "fallocate" {
test "statx" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2679,7 +2700,7 @@ test "statx" {
defer tmp.cleanup();
const path = "test_io_uring_statx";
const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
+ defer file.close(io);
try testing.expectEqual(@as(u64, 0), (try file.stat()).size);
@@ -2725,6 +2746,8 @@ test "statx" {
test "accept/connect/recv/cancel" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2733,7 +2756,7 @@ test "accept/connect/recv/cancel" {
defer ring.deinit();
const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
+ defer socket_test_harness.close(io);
var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
@@ -2929,6 +2952,8 @@ test "shutdown" {
test "renameat" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2945,7 +2970,7 @@ test "renameat" {
// Write old file with data
const old_file = try tmp.dir.createFile(old_path, .{ .truncate = true, .mode = 0o666 });
- defer old_file.close();
+ defer old_file.close(io);
try old_file.writeAll("hello");
// Submit renameat
@@ -2987,6 +3012,8 @@ test "renameat" {
test "unlinkat" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -3002,7 +3029,7 @@ test "unlinkat" {
// Write old file with data
const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
+ defer file.close(io);
// Submit unlinkat
@@ -3083,6 +3110,8 @@ test "mkdirat" {
test "symlinkat" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -3097,7 +3126,7 @@ test "symlinkat" {
const link_path = "test_io_uring_symlinkat_link";
const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
+ defer file.close(io);
// Submit symlinkat
@@ -3131,6 +3160,8 @@ test "symlinkat" {
test "linkat" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -3147,7 +3178,7 @@ test "linkat" {
// Write file with data
const first_file = try tmp.dir.createFile(first_path, .{ .truncate = true, .mode = 0o666 });
- defer first_file.close();
+ defer first_file.close(io);
try first_file.writeAll("hello");
// Submit linkat
@@ -3407,6 +3438,8 @@ test "remove_buffers" {
test "provide_buffers: accept/connect/send/recv" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -3443,7 +3476,7 @@ test "provide_buffers: accept/connect/send/recv" {
}
const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
+ defer socket_test_harness.close(io);
// Do 4 send on the socket
@@ -3696,6 +3729,8 @@ test "accept multishot" {
test "accept/connect/send_zc/recv" {
try skipKernelLessThan(.{ .major = 6, .minor = 0, .patch = 0 });
+ const io = testing.io;
+
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -3704,7 +3739,7 @@ test "accept/connect/send_zc/recv" {
defer ring.deinit();
const socket_test_harness = try createSocketTestHarness(&ring);
- defer socket_test_harness.close();
+ defer socket_test_harness.close(io);
const buffer_send = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
var buffer_recv = [_]u8{0} ** 10;
@@ -4105,6 +4140,8 @@ inline fn skipKernelLessThan(required: std.SemanticVersion) !void {
test BufferGroup {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
// Init IoUring
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
@@ -4132,7 +4169,7 @@ test BufferGroup {
// Create client/server fds
const fds = try createSocketTestHarness(&ring);
- defer fds.close();
+ defer fds.close(io);
const data = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe };
// Client sends data
@@ -4170,6 +4207,8 @@ test BufferGroup {
test "ring mapped buffers recv" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -4196,7 +4235,7 @@ test "ring mapped buffers recv" {
// create client/server fds
const fds = try createSocketTestHarness(&ring);
- defer fds.close();
+ defer fds.close(io);
// for random user_data in sqe/cqe
var Rnd = std.Random.DefaultPrng.init(std.testing.random_seed);
@@ -4259,6 +4298,8 @@ test "ring mapped buffers recv" {
test "ring mapped buffers multishot recv" {
if (!is_linux) return error.SkipZigTest;
+ const io = testing.io;
+
var ring = IoUring.init(16, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -4285,7 +4326,7 @@ test "ring mapped buffers multishot recv" {
// create client/server fds
const fds = try createSocketTestHarness(&ring);
- defer fds.close();
+ defer fds.close(io);
// for random user_data in sqe/cqe
var Rnd = std.Random.DefaultPrng.init(std.testing.random_seed);
diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig
index 7399907477..39606ddfac 100644
--- a/lib/std/os/linux/test.zig
+++ b/lib/std/os/linux/test.zig
@@ -12,12 +12,14 @@ const fs = std.fs;
test "fallocate" {
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://codeberg.org/ziglang/zig/issues/30220
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
const path = "test_fallocate";
const file = try tmp.dir.createFile(path, .{ .truncate = true, .mode = 0o666 });
- defer file.close();
+ defer file.close(io);
try expect((try file.stat()).size == 0);
@@ -77,12 +79,14 @@ test "timer" {
}
test "statx" {
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "just_a_temporary_file.txt";
var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ defer file.close(io);
var buf: linux.Statx = undefined;
switch (linux.errno(linux.statx(file.handle, "", linux.AT.EMPTY_PATH, .BASIC_STATS, &buf))) {
@@ -111,12 +115,14 @@ test "user and group ids" {
}
test "fadvise" {
+ const io = std.testing.io;
+
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
const tmp_file_name = "temp_posix_fadvise.txt";
var file = try tmp.dir.createFile(tmp_file_name, .{});
- defer file.close();
+ defer file.close(io);
var buf: [2048]u8 = undefined;
try file.writeAll(&buf);
diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig
index 3bb5e64c73..8889e50ea3 100644
--- a/lib/std/posix/test.zig
+++ b/lib/std/posix/test.zig
@@ -148,6 +148,8 @@ test "linkat with different directories" {
else => return error.SkipZigTest,
}
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -163,10 +165,10 @@ test "linkat with different directories" {
try posix.linkat(tmp.dir.fd, target_name, subdir.fd, link_name, 0);
const efd = try tmp.dir.openFile(target_name, .{});
- defer efd.close();
+ defer efd.close(io);
const nfd = try subdir.openFile(link_name, .{});
- defer nfd.close();
+ defer nfd.close(io);
{
const eino, _ = try getLinkInfo(efd.handle);
@@ -381,6 +383,8 @@ test "mmap" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -413,7 +417,7 @@ test "mmap" {
// Create a file used for testing mmap() calls with a file descriptor
{
const file = try tmp.dir.createFile(test_out_file, .{});
- defer file.close();
+ defer file.close(io);
var stream = file.writer(&.{});
@@ -426,7 +430,7 @@ test "mmap" {
// Map the whole file
{
const file = try tmp.dir.openFile(test_out_file, .{});
- defer file.close();
+ defer file.close(io);
const data = try posix.mmap(
null,
@@ -451,7 +455,7 @@ test "mmap" {
// Map the upper half of the file
{
const file = try tmp.dir.openFile(test_out_file, .{});
- defer file.close();
+ defer file.close(io);
const data = try posix.mmap(
null,
@@ -476,13 +480,15 @@ test "fcntl" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
const file = try tmp.dir.createFile(test_out_file, .{});
- defer file.close();
+ defer file.close(io);
// Note: The test assumes createFile opens the file with CLOEXEC
{
@@ -526,12 +532,14 @@ test "fsync" {
else => return error.SkipZigTest,
}
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
const file = try tmp.dir.createFile(test_out_file, .{});
- defer file.close();
+ defer file.close(io);
try posix.fsync(file.handle);
try posix.fdatasync(file.handle);
@@ -646,22 +654,24 @@ test "dup & dup2" {
else => return error.SkipZigTest,
}
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
{
var file = try tmp.dir.createFile("os_dup_test", .{});
- defer file.close();
+ defer file.close(io);
var duped = std.fs.File{ .handle = try posix.dup(file.handle) };
- defer duped.close();
+ defer duped.close(io);
try duped.writeAll("dup");
// Tests aren't run in parallel so using the next fd shouldn't be an issue.
const new_fd = duped.handle + 1;
try posix.dup2(file.handle, new_fd);
var dup2ed = std.fs.File{ .handle = new_fd };
- defer dup2ed.close();
+ defer dup2ed.close(io);
try dup2ed.writeAll("dup2");
}
@@ -687,11 +697,13 @@ test "getppid" {
test "writev longer than IOV_MAX" {
if (native_os == .windows or native_os == .wasi) return error.SkipZigTest;
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
var file = try tmp.dir.createFile("pwritev", .{});
- defer file.close();
+ defer file.close(io);
const iovecs = [_]posix.iovec_const{.{ .base = "a", .len = 1 }} ** (posix.IOV_MAX + 1);
const amt = try file.writev(&iovecs);
@@ -709,12 +721,14 @@ test "POSIX file locking with fcntl" {
return error.SkipZigTest;
}
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
// Create a temporary lock file
var file = try tmp.dir.createFile("lock", .{ .read = true });
- defer file.close();
+ defer file.close(io);
try file.setEndPos(2);
const fd = file.handle;
@@ -905,21 +919,25 @@ test "timerfd" {
}
test "isatty" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
var file = try tmp.dir.createFile("foo", .{});
- defer file.close();
+ defer file.close(io);
try expectEqual(posix.isatty(file.handle), false);
}
test "pread with empty buffer" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
var file = try tmp.dir.createFile("pread_empty", .{ .read = true });
- defer file.close();
+ defer file.close(io);
const bytes = try a.alloc(u8, 0);
defer a.free(bytes);
@@ -929,11 +947,13 @@ test "pread with empty buffer" {
}
test "write with empty buffer" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
var file = try tmp.dir.createFile("write_empty", .{});
- defer file.close();
+ defer file.close(io);
const bytes = try a.alloc(u8, 0);
defer a.free(bytes);
@@ -943,11 +963,13 @@ test "write with empty buffer" {
}
test "pwrite with empty buffer" {
+ const io = testing.io;
+
var tmp = tmpDir(.{});
defer tmp.cleanup();
var file = try tmp.dir.createFile("pwrite_empty", .{});
- defer file.close();
+ defer file.close(io);
const bytes = try a.alloc(u8, 0);
defer a.free(bytes);
diff --git a/lib/std/process.zig b/lib/std/process.zig
index a0a26c766f..a8dede6ad4 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -1,12 +1,14 @@
-const std = @import("std.zig");
const builtin = @import("builtin");
+const native_os = builtin.os.tag;
+
+const std = @import("std.zig");
+const Io = std.Io;
const fs = std.fs;
const mem = std.mem;
const math = std.math;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const testing = std.testing;
-const native_os = builtin.os.tag;
const posix = std.posix;
const windows = std.os.windows;
const unicode = std.unicode;
@@ -1571,9 +1573,9 @@ pub fn getUserInfo(name: []const u8) !UserInfo {
/// TODO this reads /etc/passwd. But sometimes the user/id mapping is in something else
/// like NIS, AD, etc. See `man nss` or look at an strace for `id myuser`.
-pub fn posixGetUserInfo(name: []const u8) !UserInfo {
+pub fn posixGetUserInfo(io: Io, name: []const u8) !UserInfo {
const file = try std.fs.openFileAbsolute("/etc/passwd", .{});
- defer file.close();
+ defer file.close(io);
var buffer: [4096]u8 = undefined;
var file_reader = file.reader(&buffer);
return posixGetUserInfoPasswdStream(name, &file_reader.interface) catch |err| switch (err) {
diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig
index 6dad72df44..be3026ff10 100644
--- a/lib/std/process/Child.zig
+++ b/lib/std/process/Child.zig
@@ -4,6 +4,7 @@ const builtin = @import("builtin");
const native_os = builtin.os.tag;
const std = @import("../std.zig");
+const Io = std.Io;
const unicode = std.unicode;
const fs = std.fs;
const process = std.process;
@@ -277,17 +278,17 @@ pub fn spawnAndWait(self: *ChildProcess) SpawnError!Term {
}
/// Forcibly terminates child process and then cleans up all resources.
-pub fn kill(self: *ChildProcess) !Term {
+pub fn kill(self: *ChildProcess, io: Io) !Term {
if (native_os == .windows) {
- return self.killWindows(1);
+ return self.killWindows(io, 1);
} else {
- return self.killPosix();
+ return self.killPosix(io);
}
}
-pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
+pub fn killWindows(self: *ChildProcess, io: Io, exit_code: windows.UINT) !Term {
if (self.term) |term| {
- self.cleanupStreams();
+ self.cleanupStreams(io);
return term;
}
@@ -303,20 +304,20 @@ pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
},
else => return err,
};
- try self.waitUnwrappedWindows();
+ try self.waitUnwrappedWindows(io);
return self.term.?;
}
-pub fn killPosix(self: *ChildProcess) !Term {
+pub fn killPosix(self: *ChildProcess, io: Io) !Term {
if (self.term) |term| {
- self.cleanupStreams();
+ self.cleanupStreams(io);
return term;
}
posix.kill(self.id, posix.SIG.TERM) catch |err| switch (err) {
error.ProcessNotFound => return error.AlreadyTerminated,
else => return err,
};
- self.waitUnwrappedPosix();
+ self.waitUnwrappedPosix(io);
return self.term.?;
}
@@ -354,15 +355,15 @@ pub fn waitForSpawn(self: *ChildProcess) SpawnError!void {
}
/// Blocks until child process terminates and then cleans up all resources.
-pub fn wait(self: *ChildProcess) WaitError!Term {
+pub fn wait(self: *ChildProcess, io: Io) WaitError!Term {
try self.waitForSpawn(); // report spawn errors
if (self.term) |term| {
- self.cleanupStreams();
+ self.cleanupStreams(io);
return term;
}
switch (native_os) {
- .windows => try self.waitUnwrappedWindows(),
- else => self.waitUnwrappedPosix(),
+ .windows => try self.waitUnwrappedWindows(io),
+ else => self.waitUnwrappedPosix(io),
}
self.id = undefined;
return self.term.?;
@@ -474,7 +475,7 @@ pub fn run(args: struct {
};
}
-fn waitUnwrappedWindows(self: *ChildProcess) WaitError!void {
+fn waitUnwrappedWindows(self: *ChildProcess, io: Io) WaitError!void {
const result = windows.WaitForSingleObjectEx(self.id, windows.INFINITE, false);
self.term = @as(SpawnError!Term, x: {
@@ -492,11 +493,11 @@ fn waitUnwrappedWindows(self: *ChildProcess) WaitError!void {
posix.close(self.id);
posix.close(self.thread_handle);
- self.cleanupStreams();
+ self.cleanupStreams(io);
return result;
}
-fn waitUnwrappedPosix(self: *ChildProcess) void {
+fn waitUnwrappedPosix(self: *ChildProcess, io: Io) void {
const res: posix.WaitPidResult = res: {
if (self.request_resource_usage_statistics) {
switch (native_os) {
@@ -527,7 +528,7 @@ fn waitUnwrappedPosix(self: *ChildProcess) void {
break :res posix.waitpid(self.id, 0);
};
const status = res.status;
- self.cleanupStreams();
+ self.cleanupStreams(io);
self.handleWaitResult(status);
}
@@ -535,17 +536,17 @@ fn handleWaitResult(self: *ChildProcess, status: u32) void {
self.term = statusToTerm(status);
}
-fn cleanupStreams(self: *ChildProcess) void {
+fn cleanupStreams(self: *ChildProcess, io: Io) void {
if (self.stdin) |*stdin| {
- stdin.close();
+ stdin.close(io);
self.stdin = null;
}
if (self.stdout) |*stdout| {
- stdout.close();
+ stdout.close(io);
self.stdout = null;
}
if (self.stderr) |*stderr| {
- stderr.close();
+ stderr.close(io);
self.stderr = null;
}
}
diff --git a/lib/std/tar.zig b/lib/std/tar.zig
index bf96aed35c..d861314fec 100644
--- a/lib/std/tar.zig
+++ b/lib/std/tar.zig
@@ -16,6 +16,7 @@
//! pax reference: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const testing = std.testing;
@@ -302,7 +303,7 @@ pub const FileKind = enum {
/// Iterator over entries in the tar file represented by reader.
pub const Iterator = struct {
- reader: *std.Io.Reader,
+ reader: *Io.Reader,
diagnostics: ?*Diagnostics = null,
// buffers for heeader and file attributes
@@ -328,7 +329,7 @@ pub const Iterator = struct {
/// Iterates over files in tar archive.
/// `next` returns each file in tar archive.
- pub fn init(reader: *std.Io.Reader, options: Options) Iterator {
+ pub fn init(reader: *Io.Reader, options: Options) Iterator {
return .{
.reader = reader,
.diagnostics = options.diagnostics,
@@ -473,7 +474,7 @@ pub const Iterator = struct {
return null;
}
- pub fn streamRemaining(it: *Iterator, file: File, w: *std.Io.Writer) std.Io.Reader.StreamError!void {
+ pub fn streamRemaining(it: *Iterator, file: File, w: *Io.Writer) Io.Reader.StreamError!void {
try it.reader.streamExact64(w, file.size);
it.unread_file_bytes = 0;
}
@@ -499,14 +500,14 @@ const pax_max_size_attr_len = 64;
pub const PaxIterator = struct {
size: usize, // cumulative size of all pax attributes
- reader: *std.Io.Reader,
+ reader: *Io.Reader,
const Self = @This();
const Attribute = struct {
kind: PaxAttributeKind,
len: usize, // length of the attribute value
- reader: *std.Io.Reader, // reader positioned at value start
+ reader: *Io.Reader, // reader positioned at value start
// Copies pax attribute value into destination buffer.
// Must be called with destination buffer of size at least Attribute.len.
@@ -573,13 +574,13 @@ pub const PaxIterator = struct {
}
// Checks that each record ends with new line.
- fn validateAttributeEnding(reader: *std.Io.Reader) !void {
+ fn validateAttributeEnding(reader: *Io.Reader) !void {
if (try reader.takeByte() != '\n') return error.PaxInvalidAttributeEnd;
}
};
/// Saves tar file content to the file systems.
-pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOptions) !void {
+pub fn pipeToFileSystem(io: Io, dir: Io.Dir, reader: *Io.Reader, options: PipeOptions) !void {
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
var file_contents_buffer: [1024]u8 = undefined;
@@ -610,7 +611,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOp
},
.file => {
if (createDirAndFile(dir, file_name, fileMode(file.mode, options))) |fs_file| {
- defer fs_file.close();
+ defer fs_file.close(io);
var file_writer = fs_file.writer(&file_contents_buffer);
try it.streamRemaining(file, &file_writer.interface);
try file_writer.interface.flush();
@@ -637,7 +638,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.Io.Reader, options: PipeOp
}
}
-fn createDirAndFile(dir: std.fs.Dir, file_name: []const u8, mode: std.fs.File.Mode) !std.fs.File {
+fn createDirAndFile(dir: Io.Dir, file_name: []const u8, mode: Io.File.Mode) !Io.File {
const fs_file = dir.createFile(file_name, .{ .exclusive = true, .mode = mode }) catch |err| {
if (err == error.FileNotFound) {
if (std.fs.path.dirname(file_name)) |dir_name| {
@@ -651,7 +652,7 @@ fn createDirAndFile(dir: std.fs.Dir, file_name: []const u8, mode: std.fs.File.Mo
}
// Creates a symbolic link at path `file_name` which points to `link_name`.
-fn createDirAndSymlink(dir: std.fs.Dir, link_name: []const u8, file_name: []const u8) !void {
+fn createDirAndSymlink(dir: Io.Dir, link_name: []const u8, file_name: []const u8) !void {
dir.symLink(link_name, file_name, .{}) catch |err| {
if (err == error.FileNotFound) {
if (std.fs.path.dirname(file_name)) |dir_name| {
@@ -783,7 +784,7 @@ test PaxIterator {
var buffer: [1024]u8 = undefined;
outer: for (cases) |case| {
- var reader: std.Io.Reader = .fixed(case.data);
+ var reader: Io.Reader = .fixed(case.data);
var it: PaxIterator = .{
.size = case.data.len,
.reader = &reader,
@@ -874,13 +875,15 @@ test "header parse mode" {
}
test "create file and symlink" {
+ const io = testing.io;
+
var root = testing.tmpDir(.{});
defer root.cleanup();
var file = try createDirAndFile(root.dir, "file1", default_mode);
- file.close();
+ file.close(io);
file = try createDirAndFile(root.dir, "a/b/c/file2", default_mode);
- file.close();
+ file.close(io);
createDirAndSymlink(root.dir, "a/b/c/file2", "symlink1") catch |err| {
// On Windows when developer mode is not enabled
@@ -892,7 +895,7 @@ test "create file and symlink" {
// Danglink symlnik, file created later
try createDirAndSymlink(root.dir, "../../../g/h/i/file4", "j/k/l/symlink3");
file = try createDirAndFile(root.dir, "g/h/i/file4", default_mode);
- file.close();
+ file.close(io);
}
test Iterator {
@@ -916,7 +919,7 @@ test Iterator {
// example/empty/
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
// User provided buffers to the iterator
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
@@ -942,7 +945,7 @@ test Iterator {
.file => {
try testing.expectEqualStrings("example/a/file", file.name);
var buf: [16]u8 = undefined;
- var w: std.Io.Writer = .fixed(&buf);
+ var w: Io.Writer = .fixed(&buf);
try it.streamRemaining(file, &w);
try testing.expectEqualStrings("content\n", w.buffered());
},
@@ -955,6 +958,7 @@ test Iterator {
}
test pipeToFileSystem {
+ const io = testing.io;
// Example tar file is created from this tree structure:
// $ tree example
// example
@@ -975,14 +979,14 @@ test pipeToFileSystem {
// example/empty/
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
const dir = tmp.dir;
// Save tar from reader to the file system `dir`
- pipeToFileSystem(dir, &reader, .{
+ pipeToFileSystem(io, dir, &reader, .{
.mode_mode = .ignore,
.strip_components = 1,
.exclude_empty_directories = true,
@@ -1005,8 +1009,9 @@ test pipeToFileSystem {
}
test "pipeToFileSystem root_dir" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
// with strip_components = 1
{
@@ -1015,7 +1020,7 @@ test "pipeToFileSystem root_dir" {
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 1,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1037,7 +1042,7 @@ test "pipeToFileSystem root_dir" {
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 0,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1053,43 +1058,46 @@ test "pipeToFileSystem root_dir" {
}
test "findRoot with single file archive" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/22752.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try pipeToFileSystem(tmp.dir, &reader, .{ .diagnostics = &diagnostics });
+ try pipeToFileSystem(io, tmp.dir, &reader, .{ .diagnostics = &diagnostics });
try testing.expectEqualStrings("", diagnostics.root_dir);
}
test "findRoot without explicit root dir" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/19820.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try pipeToFileSystem(tmp.dir, &reader, .{ .diagnostics = &diagnostics });
+ try pipeToFileSystem(io, tmp.dir, &reader, .{ .diagnostics = &diagnostics });
try testing.expectEqualStrings("root", diagnostics.root_dir);
}
test "pipeToFileSystem strip_components" {
+ const io = testing.io;
const data = @embedFile("tar/testdata/example.tar");
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 3,
.diagnostics = &diagnostics,
}) catch |err| {
@@ -1110,10 +1118,10 @@ fn normalizePath(bytes: []u8) []u8 {
return bytes;
}
-const default_mode = std.fs.File.default_mode;
+const default_mode = Io.File.default_mode;
// File system mode based on tar header mode and mode_mode options.
-fn fileMode(mode: u32, options: PipeOptions) std.fs.File.Mode {
+fn fileMode(mode: u32, options: PipeOptions) Io.File.Mode {
if (!std.fs.has_executable_bit or options.mode_mode == .ignore)
return default_mode;
@@ -1139,16 +1147,17 @@ test fileMode {
test "executable bit" {
if (!std.fs.has_executable_bit) return error.SkipZigTest;
+ const io = testing.io;
const S = std.posix.S;
const data = @embedFile("tar/testdata/example.tar");
for ([_]PipeOptions.ModeMode{ .ignore, .executable_bit_only }) |opt| {
- var reader: std.Io.Reader = .fixed(data);
+ var reader: Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
//defer tmp.cleanup();
- pipeToFileSystem(tmp.dir, &reader, .{
+ pipeToFileSystem(io, tmp.dir, &reader, .{
.strip_components = 1,
.exclude_empty_directories = true,
.mode_mode = opt,
diff --git a/lib/std/tar/test.zig b/lib/std/tar/test.zig
index 780e2b844c..b3fbd8f4b3 100644
--- a/lib/std/tar/test.zig
+++ b/lib/std/tar/test.zig
@@ -424,6 +424,7 @@ test "insufficient buffer in Header name filed" {
}
test "should not overwrite existing file" {
+ const io = testing.io;
// Starting from this folder structure:
// $ tree root
// root
@@ -469,17 +470,18 @@ test "should not overwrite existing file" {
defer root.cleanup();
try testing.expectError(
error.PathAlreadyExists,
- tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }),
+ tar.pipeToFileSystem(io, root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }),
);
// Unpack with strip_components = 0 should pass
r = .fixed(data);
var root2 = std.testing.tmpDir(.{});
defer root2.cleanup();
- try tar.pipeToFileSystem(root2.dir, &r, .{ .mode_mode = .ignore, .strip_components = 0 });
+ try tar.pipeToFileSystem(io, root2.dir, &r, .{ .mode_mode = .ignore, .strip_components = 0 });
}
test "case sensitivity" {
+ const io = testing.io;
// Mimicking issue #18089, this tar contains, same file name in two case
// sensitive name version. Should fail on case insensitive file systems.
//
@@ -495,7 +497,7 @@ test "case sensitivity" {
var root = std.testing.tmpDir(.{});
defer root.cleanup();
- tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
+ tar.pipeToFileSystem(io, root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
// on case insensitive fs we fail on overwrite existing file
try testing.expectEqual(error.PathAlreadyExists, err);
return;
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 186cafad59..63131d771c 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -613,9 +613,9 @@ pub const TmpDir = struct {
const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
pub fn cleanup(self: *TmpDir) void {
- self.dir.close();
+ self.dir.close(io);
self.parent_dir.deleteTree(&self.sub_path) catch {};
- self.parent_dir.close();
+ self.parent_dir.close(io);
self.* = undefined;
}
};
@@ -629,7 +629,7 @@ pub fn tmpDir(opts: std.fs.Dir.OpenOptions) TmpDir {
const cwd = std.fs.cwd();
var cache_dir = cwd.makeOpenPath(".zig-cache", .{}) catch
@panic("unable to make tmp dir for testing: unable to make and open .zig-cache dir");
- defer cache_dir.close();
+ defer cache_dir.close(io);
const parent_dir = cache_dir.makeOpenPath("tmp", .{}) catch
@panic("unable to make tmp dir for testing: unable to make and open .zig-cache/tmp dir");
const dir = parent_dir.makeOpenPath(&sub_path, opts) catch
diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig
index 2ab4e48570..c8bde2ab02 100644
--- a/lib/std/zig/LibCInstallation.zig
+++ b/lib/std/zig/LibCInstallation.zig
@@ -1,4 +1,18 @@
//! See the render function implementation for documentation of the fields.
+const LibCInstallation = @This();
+
+const builtin = @import("builtin");
+const is_darwin = builtin.target.os.tag.isDarwin();
+const is_windows = builtin.target.os.tag == .windows;
+const is_haiku = builtin.target.os.tag == .haiku;
+
+const std = @import("std");
+const Io = std.Io;
+const Target = std.Target;
+const fs = std.fs;
+const Allocator = std.mem.Allocator;
+const Path = std.Build.Cache.Path;
+const log = std.log.scoped(.libc_installation);
include_dir: ?[]const u8 = null,
sys_include_dir: ?[]const u8 = null,
@@ -157,6 +171,7 @@ pub fn render(self: LibCInstallation, out: *std.Io.Writer) !void {
pub const FindNativeOptions = struct {
allocator: Allocator,
+ io: Io,
target: *const std.Target,
/// If enabled, will print human-friendly errors to stderr.
@@ -165,29 +180,32 @@ pub const FindNativeOptions = struct {
/// Finds the default, native libc.
pub fn findNative(args: FindNativeOptions) FindError!LibCInstallation {
+ const gpa = args.allocator;
+ const io = args.io;
+
var self: LibCInstallation = .{};
if (is_darwin and args.target.os.tag.isDarwin()) {
- if (!std.zig.system.darwin.isSdkInstalled(args.allocator))
+ if (!std.zig.system.darwin.isSdkInstalled(gpa))
return error.DarwinSdkNotFound;
- const sdk = std.zig.system.darwin.getSdk(args.allocator, args.target) orelse
+ const sdk = std.zig.system.darwin.getSdk(gpa, args.target) orelse
return error.DarwinSdkNotFound;
- defer args.allocator.free(sdk);
+ defer gpa.free(sdk);
- self.include_dir = try fs.path.join(args.allocator, &.{
+ self.include_dir = try fs.path.join(gpa, &.{
sdk, "usr/include",
});
- self.sys_include_dir = try fs.path.join(args.allocator, &.{
+ self.sys_include_dir = try fs.path.join(gpa, &.{
sdk, "usr/include",
});
return self;
} else if (is_windows) {
- const sdk = std.zig.WindowsSdk.find(args.allocator, args.target.cpu.arch) catch |err| switch (err) {
+ const sdk = std.zig.WindowsSdk.find(gpa, io, args.target.cpu.arch) catch |err| switch (err) {
error.NotFound => return error.WindowsSdkNotFound,
error.PathTooLong => return error.WindowsSdkNotFound,
error.OutOfMemory => return error.OutOfMemory,
};
- defer sdk.free(args.allocator);
+ defer sdk.free(gpa);
try self.findNativeMsvcIncludeDir(args, sdk);
try self.findNativeMsvcLibDir(args, sdk);
@@ -197,16 +215,16 @@ pub fn findNative(args: FindNativeOptions) FindError!LibCInstallation {
} else if (is_haiku) {
try self.findNativeIncludeDirPosix(args);
try self.findNativeGccDirHaiku(args);
- self.crt_dir = try args.allocator.dupeZ(u8, "/system/develop/lib");
+ self.crt_dir = try gpa.dupeZ(u8, "/system/develop/lib");
} else if (builtin.target.os.tag == .illumos) {
// There is only one libc, and its headers/libraries are always in the same spot.
- self.include_dir = try args.allocator.dupeZ(u8, "/usr/include");
- self.sys_include_dir = try args.allocator.dupeZ(u8, "/usr/include");
- self.crt_dir = try args.allocator.dupeZ(u8, "/usr/lib/64");
+ self.include_dir = try gpa.dupeZ(u8, "/usr/include");
+ self.sys_include_dir = try gpa.dupeZ(u8, "/usr/include");
+ self.crt_dir = try gpa.dupeZ(u8, "/usr/lib/64");
} else if (std.process.can_spawn) {
try self.findNativeIncludeDirPosix(args);
switch (builtin.target.os.tag) {
- .freebsd, .netbsd, .openbsd, .dragonfly => self.crt_dir = try args.allocator.dupeZ(u8, "/usr/lib"),
+ .freebsd, .netbsd, .openbsd, .dragonfly => self.crt_dir = try gpa.dupeZ(u8, "/usr/lib"),
.linux => try self.findNativeCrtDirPosix(args),
else => {},
}
@@ -229,6 +247,7 @@ pub fn deinit(self: *LibCInstallation, allocator: Allocator) void {
fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) FindError!void {
const allocator = args.allocator;
+ const io = args.io;
// Detect infinite loops.
var env_map = std.process.getEnvMap(allocator) catch |err| switch (err) {
@@ -326,7 +345,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
else => return error.FileSystem,
};
- defer search_dir.close();
+ defer search_dir.close(io);
if (self.include_dir == null) {
if (search_dir.access(include_dir_example_file, .{})) |_| {
@@ -361,6 +380,7 @@ fn findNativeIncludeDirWindows(
sdk: std.zig.WindowsSdk,
) FindError!void {
const allocator = args.allocator;
+ const io = args.io;
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
@@ -380,7 +400,7 @@ fn findNativeIncludeDirWindows(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
dir.access("stdlib.h", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
@@ -400,6 +420,7 @@ fn findNativeCrtDirWindows(
sdk: std.zig.WindowsSdk,
) FindError!void {
const allocator = args.allocator;
+ const io = args.io;
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
@@ -427,7 +448,7 @@ fn findNativeCrtDirWindows(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
dir.access("ucrt.lib", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
@@ -467,6 +488,7 @@ fn findNativeKernel32LibDir(
sdk: std.zig.WindowsSdk,
) FindError!void {
const allocator = args.allocator;
+ const io = args.io;
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
@@ -494,7 +516,7 @@ fn findNativeKernel32LibDir(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
dir.access("kernel32.lib", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
@@ -513,6 +535,7 @@ fn findNativeMsvcIncludeDir(
sdk: std.zig.WindowsSdk,
) FindError!void {
const allocator = args.allocator;
+ const io = args.io;
const msvc_lib_dir = sdk.msvc_lib_dir orelse return error.LibCStdLibHeaderNotFound;
const up1 = fs.path.dirname(msvc_lib_dir) orelse return error.LibCStdLibHeaderNotFound;
@@ -529,7 +552,7 @@ fn findNativeMsvcIncludeDir(
else => return error.FileSystem,
};
- defer dir.close();
+ defer dir.close(io);
dir.access("vcruntime.h", .{}) catch |err| switch (err) {
error.FileNotFound => return error.LibCStdLibHeaderNotFound,
@@ -1015,17 +1038,3 @@ pub fn resolveCrtPaths(
},
}
}
-
-const LibCInstallation = @This();
-const std = @import("std");
-const builtin = @import("builtin");
-const Target = std.Target;
-const fs = std.fs;
-const Allocator = std.mem.Allocator;
-const Path = std.Build.Cache.Path;
-
-const is_darwin = builtin.target.os.tag.isDarwin();
-const is_windows = builtin.target.os.tag == .windows;
-const is_haiku = builtin.target.os.tag == .haiku;
-
-const log = std.log.scoped(.libc_installation);
diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig
index 89d608633c..a7b16e1bed 100644
--- a/lib/std/zig/WindowsSdk.zig
+++ b/lib/std/zig/WindowsSdk.zig
@@ -1,7 +1,10 @@
const WindowsSdk = @This();
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const Writer = std.Io.Writer;
+const Allocator = std.mem.Allocator;
windows10sdk: ?Installation,
windows81sdk: ?Installation,
@@ -20,7 +23,7 @@ const product_version_max_length = version_major_minor_max_length + ".65535".len
/// Find path and version of Windows 10 SDK and Windows 8.1 SDK, and find path to MSVC's `lib/` directory.
/// Caller owns the result's fields.
/// After finishing work, call `free(allocator)`.
-pub fn find(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFound, PathTooLong }!WindowsSdk {
+pub fn find(allocator: Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, NotFound, PathTooLong }!WindowsSdk {
if (builtin.os.tag != .windows) return error.NotFound;
//note(dimenus): If this key doesn't exist, neither the Win 8 SDK nor the Win 10 SDK is installed
@@ -58,7 +61,7 @@ pub fn find(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutO
};
}
-pub fn free(sdk: WindowsSdk, allocator: std.mem.Allocator) void {
+pub fn free(sdk: WindowsSdk, allocator: Allocator) void {
if (sdk.windows10sdk) |*w10sdk| {
w10sdk.free(allocator);
}
@@ -75,7 +78,7 @@ pub fn free(sdk: WindowsSdk, allocator: std.mem.Allocator) void {
/// Caller owns result.
fn iterateAndFilterByVersion(
iterator: *std.fs.Dir.Iterator,
- allocator: std.mem.Allocator,
+ allocator: Allocator,
prefix: []const u8,
) error{OutOfMemory}![][]const u8 {
const Version = struct {
@@ -174,7 +177,7 @@ const RegistryWtf8 = struct {
/// Get string from registry.
/// Caller owns result.
- pub fn getString(reg: RegistryWtf8, allocator: std.mem.Allocator, subkey: []const u8, value_name: []const u8) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]u8 {
+ pub fn getString(reg: RegistryWtf8, allocator: Allocator, subkey: []const u8, value_name: []const u8) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]u8 {
const subkey_wtf16le: [:0]const u16 = subkey_wtf16le: {
var subkey_wtf16le_buf: [RegistryWtf16Le.key_name_max_len]u16 = undefined;
const subkey_wtf16le_len: usize = std.unicode.wtf8ToWtf16Le(subkey_wtf16le_buf[0..], subkey) catch unreachable;
@@ -282,7 +285,7 @@ const RegistryWtf16Le = struct {
}
/// Get string ([:0]const u16) from registry.
- fn getString(reg: RegistryWtf16Le, allocator: std.mem.Allocator, subkey_wtf16le: [:0]const u16, value_name_wtf16le: [:0]const u16) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]const u16 {
+ fn getString(reg: RegistryWtf16Le, allocator: Allocator, subkey_wtf16le: [:0]const u16, value_name_wtf16le: [:0]const u16) error{ OutOfMemory, ValueNameNotFound, NotAString, StringNotFound }![]const u16 {
var actual_type: windows.ULONG = undefined;
// Calculating length to allocate
@@ -416,7 +419,7 @@ pub const Installation = struct {
/// Caller owns the result's fields.
/// After finishing work, call `free(allocator)`.
fn find(
- allocator: std.mem.Allocator,
+ allocator: Allocator,
roots_key: RegistryWtf8,
roots_subkey: []const u8,
prefix: []const u8,
@@ -437,7 +440,8 @@ pub const Installation = struct {
}
fn findFromRoot(
- allocator: std.mem.Allocator,
+ allocator: Allocator,
+ io: Io,
roots_key: RegistryWtf8,
roots_subkey: []const u8,
prefix: []const u8,
@@ -478,7 +482,7 @@ pub const Installation = struct {
error.NameTooLong => return error.PathTooLong,
else => return error.InstallationNotFound,
};
- defer sdk_lib_dir.close();
+ defer sdk_lib_dir.close(io);
var iterator = sdk_lib_dir.iterate();
const versions = try iterateAndFilterByVersion(&iterator, allocator, prefix);
@@ -495,7 +499,7 @@ pub const Installation = struct {
}
fn findFromInstallationFolder(
- allocator: std.mem.Allocator,
+ allocator: Allocator,
version_key_name: []const u8,
) error{ OutOfMemory, InstallationNotFound, PathTooLong, VersionTooLong }!Installation {
var key_name_buf: [RegistryWtf16Le.key_name_max_len]u8 = undefined;
@@ -597,14 +601,14 @@ pub const Installation = struct {
return (reg_value == 1);
}
- fn free(install: Installation, allocator: std.mem.Allocator) void {
+ fn free(install: Installation, allocator: Allocator) void {
allocator.free(install.path);
allocator.free(install.version);
}
};
const MsvcLibDir = struct {
- fn findInstancesDirViaSetup(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
+ fn findInstancesDirViaSetup(allocator: Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
const vs_setup_key_path = "SOFTWARE\\Microsoft\\VisualStudio\\Setup";
const vs_setup_key = RegistryWtf8.openKey(windows.HKEY_LOCAL_MACHINE, vs_setup_key_path, .{}) catch |err| switch (err) {
error.KeyNotFound => return error.PathNotFound,
@@ -629,7 +633,7 @@ const MsvcLibDir = struct {
return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch return error.PathNotFound;
}
- fn findInstancesDirViaCLSID(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
+ fn findInstancesDirViaCLSID(allocator: Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
const setup_configuration_clsid = "{177f0c4a-1cd3-4de7-a32c-71dbbb9fa36d}";
const setup_config_key = RegistryWtf8.openKey(windows.HKEY_CLASSES_ROOT, "CLSID\\" ++ setup_configuration_clsid, .{}) catch |err| switch (err) {
error.KeyNotFound => return error.PathNotFound,
@@ -665,7 +669,7 @@ const MsvcLibDir = struct {
return std.fs.openDirAbsolute(instances_path, .{ .iterate = true }) catch return error.PathNotFound;
}
- fn findInstancesDir(allocator: std.mem.Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
+ fn findInstancesDir(allocator: Allocator) error{ OutOfMemory, PathNotFound }!std.fs.Dir {
// First, try getting the packages cache path from the registry.
// This only seems to exist when the path is different from the default.
method1: {
@@ -748,13 +752,13 @@ const MsvcLibDir = struct {
///
/// The logic in this function is intended to match what ISetupConfiguration does
/// under-the-hood, as verified using Procmon.
- fn findViaCOM(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
+ fn findViaCOM(allocator: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
// Typically `%PROGRAMDATA%\Microsoft\VisualStudio\Packages\_Instances`
// This will contain directories with names of instance IDs like 80a758ca,
// which will contain `state.json` files that have the version and
// installation directory.
var instances_dir = try findInstancesDir(allocator);
- defer instances_dir.close();
+ defer instances_dir.close(io);
var state_subpath_buf: [std.fs.max_name_bytes + 32]u8 = undefined;
var latest_version_lib_dir: std.ArrayList(u8) = .empty;
@@ -791,7 +795,7 @@ const MsvcLibDir = struct {
const installation_path = parsed.value.object.get("installationPath") orelse continue;
if (installation_path != .string) continue;
- const lib_dir_path = libDirFromInstallationPath(allocator, installation_path.string, arch) catch |err| switch (err) {
+ const lib_dir_path = libDirFromInstallationPath(allocator, io, installation_path.string, arch) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.PathNotFound => continue,
};
@@ -806,7 +810,12 @@ const MsvcLibDir = struct {
return latest_version_lib_dir.toOwnedSlice(allocator);
}
- fn libDirFromInstallationPath(allocator: std.mem.Allocator, installation_path: []const u8, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
+ fn libDirFromInstallationPath(
+ allocator: Allocator,
+ io: Io,
+ installation_path: []const u8,
+ arch: std.Target.Cpu.Arch,
+ ) error{ OutOfMemory, PathNotFound }![]const u8 {
var lib_dir_buf = try std.array_list.Managed(u8).initCapacity(allocator, installation_path.len + 64);
errdefer lib_dir_buf.deinit();
@@ -837,7 +846,7 @@ const MsvcLibDir = struct {
else => unreachable,
});
- if (!verifyLibDir(lib_dir_buf.items)) {
+ if (!verifyLibDir(io, lib_dir_buf.items)) {
return error.PathNotFound;
}
@@ -845,7 +854,7 @@ const MsvcLibDir = struct {
}
// https://learn.microsoft.com/en-us/visualstudio/install/tools-for-managing-visual-studio-instances?view=vs-2022#editing-the-registry-for-a-visual-studio-instance
- fn findViaRegistry(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
+ fn findViaRegistry(allocator: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
// %localappdata%\Microsoft\VisualStudio\
// %appdata%\Local\Microsoft\VisualStudio\
@@ -859,7 +868,7 @@ const MsvcLibDir = struct {
var visualstudio_folder = std.fs.openDirAbsolute(visualstudio_folder_path, .{
.iterate = true,
}) catch return error.PathNotFound;
- defer visualstudio_folder.close();
+ defer visualstudio_folder.close(io);
var iterator = visualstudio_folder.iterate();
break :vs_versions try iterateAndFilterByVersion(&iterator, allocator, "");
@@ -926,14 +935,14 @@ const MsvcLibDir = struct {
};
errdefer allocator.free(msvc_dir);
- if (!verifyLibDir(msvc_dir)) {
+ if (!verifyLibDir(io, msvc_dir)) {
return error.PathNotFound;
}
return msvc_dir;
}
- fn findViaVs7Key(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
+ fn findViaVs7Key(allocator: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
var base_path: std.array_list.Managed(u8) = base_path: {
try_env: {
var env_map = std.process.getEnvMap(allocator) catch |err| switch (err) {
@@ -989,7 +998,7 @@ const MsvcLibDir = struct {
else => unreachable,
});
- if (!verifyLibDir(base_path.items)) {
+ if (!verifyLibDir(io, base_path.items)) {
return error.PathNotFound;
}
@@ -997,11 +1006,11 @@ const MsvcLibDir = struct {
return full_path;
}
- fn verifyLibDir(lib_dir_path: []const u8) bool {
+ fn verifyLibDir(io: Io, lib_dir_path: []const u8) bool {
std.debug.assert(std.fs.path.isAbsolute(lib_dir_path)); // should be already handled in `findVia*`
var dir = std.fs.openDirAbsolute(lib_dir_path, .{}) catch return false;
- defer dir.close();
+ defer dir.close(io);
const stat = dir.statFile("vcruntime.lib") catch return false;
if (stat.kind != .file)
@@ -1012,12 +1021,12 @@ const MsvcLibDir = struct {
/// Find path to MSVC's `lib/` directory.
/// Caller owns the result.
- pub fn find(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, MsvcLibDirNotFound }![]const u8 {
- const full_path = MsvcLibDir.findViaCOM(allocator, arch) catch |err1| switch (err1) {
+ pub fn find(allocator: Allocator, io: Io, arch: std.Target.Cpu.Arch) error{ OutOfMemory, MsvcLibDirNotFound }![]const u8 {
+ const full_path = MsvcLibDir.findViaCOM(allocator, io, arch) catch |err1| switch (err1) {
error.OutOfMemory => return error.OutOfMemory,
- error.PathNotFound => MsvcLibDir.findViaRegistry(allocator, arch) catch |err2| switch (err2) {
+ error.PathNotFound => MsvcLibDir.findViaRegistry(allocator, io, arch) catch |err2| switch (err2) {
error.OutOfMemory => return error.OutOfMemory,
- error.PathNotFound => MsvcLibDir.findViaVs7Key(allocator, arch) catch |err3| switch (err3) {
+ error.PathNotFound => MsvcLibDir.findViaVs7Key(allocator, io, arch) catch |err3| switch (err3) {
error.OutOfMemory => return error.OutOfMemory,
error.PathNotFound => return error.MsvcLibDirNotFound,
},
diff --git a/lib/std/zig/llvm/Builder.zig b/lib/std/zig/llvm/Builder.zig
index 587ac82c70..9a52ae2c81 100644
--- a/lib/std/zig/llvm/Builder.zig
+++ b/lib/std/zig/llvm/Builder.zig
@@ -1,14 +1,17 @@
+const builtin = @import("builtin");
+const Builder = @This();
+
const std = @import("../../std.zig");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
-const bitcode_writer = @import("bitcode_writer.zig");
-const Builder = @This();
-const builtin = @import("builtin");
const DW = std.dwarf;
-const ir = @import("ir.zig");
const log = std.log.scoped(.llvm);
const Writer = std.Io.Writer;
+const bitcode_writer = @import("bitcode_writer.zig");
+const ir = @import("ir.zig");
+
gpa: Allocator,
strip: bool,
@@ -9579,11 +9582,11 @@ pub fn dump(b: *Builder) void {
b.printToFile(stderr, &buffer) catch {};
}
-pub fn printToFilePath(b: *Builder, dir: std.fs.Dir, path: []const u8) !void {
+pub fn printToFilePath(b: *Builder, io: Io, dir: std.fs.Dir, path: []const u8) !void {
var buffer: [4000]u8 = undefined;
- const file = try dir.createFile(path, .{});
- defer file.close();
- try b.printToFile(file, &buffer);
+ const file = try dir.createFile(io, path, .{});
+ defer file.close(io);
+ try b.printToFile(io, file, &buffer);
}
pub fn printToFile(b: *Builder, file: std.fs.File, buffer: []u8) !void {
diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig
index 290cd8d388..4f0c11797d 100644
--- a/lib/std/zig/system.zig
+++ b/lib/std/zig/system.zig
@@ -847,7 +847,7 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
error.Unexpected => |e| return e,
error.Canceled => |e| return e,
};
- defer file.close();
+ defer file.close(io);
// Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system.
var buffer: [8000]u8 = undefined;
@@ -1051,7 +1051,7 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ
else => |e| return e,
};
var is_elf_file = false;
- defer if (!is_elf_file) file.close();
+ defer if (!is_elf_file) file.close(io);
file_reader = .initAdapted(file, io, &file_reader_buffer);
file_name = undefined; // it aliases file_reader_buffer
diff --git a/lib/std/zig/system/linux.zig b/lib/std/zig/system/linux.zig
index 6b4f0cf6f9..668e5a1d99 100644
--- a/lib/std/zig/system/linux.zig
+++ b/lib/std/zig/system/linux.zig
@@ -447,7 +447,7 @@ pub fn detectNativeCpuAndFeatures(io: Io) ?Target.Cpu {
var file = fs.openFileAbsolute("/proc/cpuinfo", .{}) catch |err| switch (err) {
else => return null,
};
- defer file.close();
+ defer file.close(io);
var buffer: [4096]u8 = undefined; // "flags" lines can get pretty long.
var file_reader = file.reader(io, &buffer);
diff --git a/lib/std/zip.zig b/lib/std/zip.zig
index 583377e00a..c2dbaf5b81 100644
--- a/lib/std/zip.zig
+++ b/lib/std/zip.zig
@@ -554,17 +554,19 @@ pub const Iterator = struct {
return;
}
+ const io = stream.io;
+
const out_file = blk: {
if (std.fs.path.dirname(filename)) |dirname| {
var parent_dir = try dest.makeOpenPath(dirname, .{});
- defer parent_dir.close();
+ defer parent_dir.close(io);
const basename = std.fs.path.basename(filename);
break :blk try parent_dir.createFile(basename, .{ .exclusive = true });
}
break :blk try dest.createFile(filename, .{ .exclusive = true });
};
- defer out_file.close();
+ defer out_file.close(io);
var out_file_buffer: [1024]u8 = undefined;
var file_writer = out_file.writer(&out_file_buffer);
const local_data_file_offset: u64 =
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 931a0b2d14..df64dee19f 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -721,13 +721,13 @@ pub const Directories = struct {
/// This may be the same as `global_cache`.
local_cache: Cache.Directory,
- pub fn deinit(dirs: *Directories) void {
+ pub fn deinit(dirs: *Directories, io: Io) void {
// The local and global caches could be the same.
const close_local = dirs.local_cache.handle.fd != dirs.global_cache.handle.fd;
- dirs.global_cache.handle.close();
- if (close_local) dirs.local_cache.handle.close();
- dirs.zig_lib.handle.close();
+ dirs.global_cache.handle.close(io);
+ if (close_local) dirs.local_cache.handle.close(io);
+ dirs.zig_lib.handle.close(io);
}
/// Returns a `Directories` where `local_cache` is replaced with `global_cache`, intended for
@@ -1105,7 +1105,7 @@ pub const CObject = struct {
if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0;
const file = fs.cwd().openFile(file_name, .{}) catch break :source_line 0;
- defer file.close();
+ defer file.close(io);
var buffer: [1024]u8 = undefined;
var file_reader = file.reader(io, &buffer);
file_reader.seekTo(diag.src_loc.offset + 1 - diag.src_loc.column) catch break :source_line 0;
@@ -1180,7 +1180,7 @@ pub const CObject = struct {
var buffer: [1024]u8 = undefined;
const file = try fs.cwd().openFile(path, .{});
- defer file.close();
+ defer file.close(io);
var file_reader = file.reader(io, &buffer);
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface });
defer bc.deinit();
@@ -1617,13 +1617,13 @@ const CacheUse = union(CacheMode) {
}
};
- fn deinit(cu: CacheUse) void {
+ fn deinit(cu: CacheUse, io: Io) void {
switch (cu) {
.none => |none| {
assert(none.tmp_artifact_directory == null);
},
.incremental => |incremental| {
- incremental.artifact_directory.handle.close();
+ incremental.artifact_directory.handle.close(io);
},
.whole => |whole| {
assert(whole.tmp_artifact_directory == null);
@@ -2113,7 +2113,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
cache.addPrefix(options.dirs.zig_lib);
cache.addPrefix(options.dirs.local_cache);
cache.addPrefix(options.dirs.global_cache);
- errdefer cache.manifest_dir.close();
+ errdefer cache.manifest_dir.close(io);
// This is shared hasher state common to zig source and all C source files.
cache.hash.addBytes(build_options.version);
@@ -2157,7 +2157,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
var local_zir_dir = options.dirs.local_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = zir_sub_dir, .err = err } });
};
- errdefer local_zir_dir.close();
+ errdefer local_zir_dir.close(io);
const local_zir_cache: Cache.Directory = .{
.handle = local_zir_dir,
.path = try options.dirs.local_cache.join(arena, &.{zir_sub_dir}),
@@ -2165,7 +2165,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
var global_zir_dir = options.dirs.global_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .global, .sub = zir_sub_dir, .err = err } });
};
- errdefer global_zir_dir.close();
+ errdefer global_zir_dir.close(io);
const global_zir_cache: Cache.Directory = .{
.handle = global_zir_dir,
.path = try options.dirs.global_cache.join(arena, &.{zir_sub_dir}),
@@ -2436,7 +2436,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
var artifact_dir = options.dirs.local_cache.handle.makeOpenPath(artifact_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = artifact_sub_dir, .err = err } });
};
- errdefer artifact_dir.close();
+ errdefer artifact_dir.close(io);
const artifact_directory: Cache.Directory = .{
.handle = artifact_dir,
.path = try options.dirs.local_cache.join(arena, &.{artifact_sub_dir}),
@@ -2689,6 +2689,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
pub fn destroy(comp: *Compilation) void {
const gpa = comp.gpa;
+ const io = comp.io;
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
@@ -2760,7 +2761,7 @@ pub fn destroy(comp: *Compilation) void {
comp.clearMiscFailures();
- comp.cache_parent.manifest_dir.close();
+ comp.cache_parent.manifest_dir.close(io);
}
pub fn clearMiscFailures(comp: *Compilation) void {
@@ -2791,10 +2792,12 @@ pub fn hotCodeSwap(
}
fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
+ const io = comp.io;
+
switch (comp.cache_use) {
.none => |none| {
if (none.tmp_artifact_directory) |*tmp_dir| {
- tmp_dir.handle.close();
+ tmp_dir.handle.close(io);
none.tmp_artifact_directory = null;
if (dev.env == .bootstrap) {
// zig1 uses `CacheMode.none`, but it doesn't need to know how to delete
@@ -2834,7 +2837,7 @@ fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
comp.bin_file = null;
}
if (whole.tmp_artifact_directory) |*tmp_dir| {
- tmp_dir.handle.close();
+ tmp_dir.handle.close(io);
whole.tmp_artifact_directory = null;
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
comp.dirs.local_cache.handle.deleteTree(tmp_dir_sub_path) catch |err| {
@@ -3152,7 +3155,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// the file handle and re-open it in the follow up call to
// `makeWritable`.
if (lf.file) |f| {
- f.close();
+ f.close(io);
lf.file = null;
if (lf.closeDebugInfo()) break :w .lf_and_debug;
@@ -3165,7 +3168,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// Rename the temporary directory into place.
// Close tmp dir and link.File to avoid open handle during rename.
- whole.tmp_artifact_directory.?.handle.close();
+ whole.tmp_artifact_directory.?.handle.close(io);
whole.tmp_artifact_directory = null;
const s = fs.path.sep_str;
const tmp_dir_sub_path = "tmp" ++ s ++ std.fmt.hex(tmp_dir_rand_int);
@@ -5258,6 +5261,7 @@ fn workerDocsCopy(comp: *Compilation) void {
fn docsCopyFallible(comp: *Compilation) anyerror!void {
const zcu = comp.zcu orelse return comp.lockAndSetMiscFailure(.docs_copy, "no Zig code to document", .{});
+ const io = comp.io;
const docs_path = comp.resolveEmitPath(comp.emit_docs.?);
var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
@@ -5267,7 +5271,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
.{ docs_path, @errorName(err) },
);
};
- defer out_dir.close();
+ defer out_dir.close(io);
for (&[_][]const u8{ "docs/main.js", "docs/index.html" }) |sub_path| {
const basename = fs.path.basename(sub_path);
@@ -5287,7 +5291,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
.{ docs_path, @errorName(err) },
);
};
- defer tar_file.close();
+ defer tar_file.close(io);
var buffer: [1024]u8 = undefined;
var tar_file_writer = tar_file.writer(&buffer);
@@ -5331,7 +5335,7 @@ fn docsCopyModule(
} catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{f}': {t}", .{ root.fmt(comp), err });
};
- defer mod_dir.close();
+ defer mod_dir.close(io);
var walker = try mod_dir.walk(comp.gpa);
defer walker.deinit();
@@ -5355,7 +5359,7 @@ fn docsCopyModule(
root.fmt(comp), entry.path, err,
});
};
- defer file.close();
+ defer file.close(io);
const stat = try file.stat();
var file_reader: fs.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size);
@@ -5510,7 +5514,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
);
return error.AlreadyReported;
};
- defer out_dir.close();
+ defer out_dir.close(io);
crt_file.full_object_path.root_dir.handle.copyFile(
crt_file.full_object_path.sub_path,
@@ -5693,7 +5697,7 @@ pub fn translateC(
const tmp_sub_path = "tmp" ++ fs.path.sep_str ++ tmp_basename;
const cache_dir = comp.dirs.local_cache.handle;
var cache_tmp_dir = try cache_dir.makeOpenPath(tmp_sub_path, .{});
- defer cache_tmp_dir.close();
+ defer cache_tmp_dir.close(io);
const translated_path = try comp.dirs.local_cache.join(arena, &.{ tmp_sub_path, translated_basename });
const source_path = switch (source) {
@@ -6268,7 +6272,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// so we need a temporary filename.
const out_obj_path = try comp.tmpFilePath(arena, o_basename);
var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
- defer zig_cache_tmp_dir.close();
+ defer zig_cache_tmp_dir.close(io);
const out_diag_path = if (comp.clang_passthrough_mode or !ext.clangSupportsDiagnostics())
null
@@ -6433,7 +6437,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest });
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ defer o_dir.close(io);
const tmp_basename = fs.path.basename(out_obj_path);
try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename);
break :blk digest;
@@ -6477,8 +6481,6 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const tracy_trace = trace(@src());
defer tracy_trace.end();
- const io = comp.io;
-
const src_path = switch (win32_resource.src) {
.rc => |rc_src| rc_src.src_path,
.manifest => |src_path| src_path,
@@ -6487,6 +6489,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
log.debug("updating win32 resource: {s}", .{src_path});
+ const io = comp.io;
+
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@@ -6522,7 +6526,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const o_sub_path = try fs.path.join(arena, &.{ "o", &digest });
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ defer o_dir.close(io);
const in_rc_path = try comp.dirs.local_cache.join(comp.gpa, &.{
o_sub_path, rc_basename,
@@ -6610,7 +6614,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const digest = if (try man.hit()) man.final() else blk: {
var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
- defer zig_cache_tmp_dir.close();
+ defer zig_cache_tmp_dir.close(io);
const res_filename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext});
@@ -6681,7 +6685,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest });
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ defer o_dir.close(io);
const tmp_basename = fs.path.basename(out_res_path);
try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename);
break :blk digest;
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
index 9b7c83cc39..58f970abe5 100644
--- a/src/Package/Fetch.zig
+++ b/src/Package/Fetch.zig
@@ -513,7 +513,7 @@ fn runResource(
break :handle dir;
},
};
- defer tmp_directory.handle.close();
+ defer tmp_directory.handle.close(io);
// Fetch and unpack a resource into a temporary directory.
var unpack_result = try unpackResource(f, resource, uri_path, tmp_directory);
@@ -523,7 +523,7 @@ fn runResource(
// Apply btrfs workaround if needed. Reopen tmp_directory.
if (native_os == .linux and f.job_queue.work_around_btrfs_bug) {
// https://github.com/ziglang/zig/issues/17095
- pkg_path.root_dir.handle.close();
+ pkg_path.root_dir.handle.close(io);
pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
.iterate = true,
}) catch @panic("btrfs workaround failed");
@@ -885,7 +885,7 @@ const Resource = union(enum) {
file: fs.File.Reader,
http_request: HttpRequest,
git: Git,
- dir: fs.Dir,
+ dir: Io.Dir,
const Git = struct {
session: git.Session,
@@ -908,7 +908,7 @@ const Resource = union(enum) {
.git => |*git_resource| {
git_resource.fetch_stream.deinit();
},
- .dir => |*dir| dir.close(),
+ .dir => |*dir| dir.close(io),
}
resource.* = undefined;
}
@@ -1247,13 +1247,14 @@ fn unpackResource(
}
}
-fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!UnpackResult {
+fn unpackTarball(f: *Fetch, out_dir: Io.Dir, reader: *Io.Reader) RunError!UnpackResult {
const eb = &f.error_bundle;
const arena = f.arena.allocator();
+ const io = f.job_queue.io;
var diagnostics: std.tar.Diagnostics = .{ .allocator = arena };
- std.tar.pipeToFileSystem(out_dir, reader, .{
+ std.tar.pipeToFileSystem(io, out_dir, reader, .{
.diagnostics = &diagnostics,
.strip_components = 0,
.mode_mode = .ignore,
@@ -1280,7 +1281,7 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!Unpack
fn unzip(
f: *Fetch,
- out_dir: fs.Dir,
+ out_dir: Io.Dir,
reader: *Io.Reader,
) error{ ReadFailed, OutOfMemory, Canceled, FetchFailed }!UnpackResult {
// We write the entire contents to a file first because zip files
@@ -1314,7 +1315,7 @@ fn unzip(
),
};
};
- defer zip_file.close();
+ defer zip_file.close(io);
var zip_file_buffer: [4096]u8 = undefined;
var zip_file_reader = b: {
var zip_file_writer = zip_file.writer(&zip_file_buffer);
@@ -1349,7 +1350,7 @@ fn unzip(
return .{ .root_dir = diagnostics.root_dir };
}
-fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!UnpackResult {
+fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!UnpackResult {
const io = f.job_queue.io;
const arena = f.arena.allocator();
// TODO don't try to get a gpa from an arena. expose this dependency higher up
@@ -1363,9 +1364,9 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
// directory, since that isn't relevant for fetching a package.
{
var pack_dir = try out_dir.makeOpenPath(".git", .{});
- defer pack_dir.close();
+ defer pack_dir.close(io);
var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
- defer pack_file.close();
+ defer pack_file.close(io);
var pack_file_buffer: [4096]u8 = undefined;
var pack_file_reader = b: {
var pack_file_writer = pack_file.writer(&pack_file_buffer);
@@ -1376,7 +1377,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
};
var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
- defer index_file.close();
+ defer index_file.close(io);
var index_file_buffer: [2000]u8 = undefined;
var index_file_writer = index_file.writer(&index_file_buffer);
{
@@ -1393,7 +1394,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
try repository.init(gpa, object_format, &pack_file_reader, &index_file_reader);
defer repository.deinit();
var diagnostics: git.Diagnostics = .{ .allocator = arena };
- try repository.checkout(out_dir, resource.want_oid, &diagnostics);
+ try repository.checkout(io, out_dir, resource.want_oid, &diagnostics);
if (diagnostics.errors.items.len > 0) {
try res.allocErrors(arena, diagnostics.errors.items.len, "unable to unpack packfile");
@@ -1411,7 +1412,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
return res;
}
-fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void {
+fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void {
const gpa = f.arena.child_allocator;
// Recursive directory copy.
var it = try dir.walk(gpa);
@@ -1451,7 +1452,7 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void
}
}
-pub fn renameTmpIntoCache(cache_dir: fs.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
+pub fn renameTmpIntoCache(cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
assert(dest_dir_sub_path[1] == fs.path.sep);
var handled_missing_dir = false;
while (true) {
@@ -1660,15 +1661,15 @@ fn dumpHashInfo(all_files: []const *const HashedFile) !void {
try w.flush();
}
-fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile) void {
+fn workerHashFile(dir: Io.Dir, hashed_file: *HashedFile) void {
hashed_file.failure = hashFileFallible(dir, hashed_file);
}
-fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile) void {
+fn workerDeleteFile(dir: Io.Dir, deleted_file: *DeletedFile) void {
deleted_file.failure = deleteFileFallible(dir, deleted_file);
}
-fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
+fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var hasher = Package.Hash.Algo.init(.{});
hasher.update(hashed_file.normalized_path);
@@ -1677,7 +1678,7 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
switch (hashed_file.kind) {
.file => {
var file = try dir.openFile(hashed_file.fs_path, .{});
- defer file.close();
+ defer file.close(io);
// Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463
hasher.update(&.{ 0, 0 });
var file_header: FileHeader = .{};
@@ -1707,7 +1708,7 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
hashed_file.size = file_size;
}
-fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
+fn deleteFileFallible(dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
try dir.deleteFile(deleted_file.fs_path);
}
@@ -1724,8 +1725,8 @@ const DeletedFile = struct {
failure: Error!void,
const Error =
- fs.Dir.DeleteFileError ||
- fs.Dir.DeleteDirError;
+ Io.Dir.DeleteFileError ||
+ Io.Dir.DeleteDirError;
};
const HashedFile = struct {
@@ -1741,7 +1742,7 @@ const HashedFile = struct {
fs.File.ReadError ||
fs.File.StatError ||
fs.File.ChmodError ||
- fs.Dir.ReadLinkError;
+ Io.Dir.ReadLinkError;
const Kind = enum { file, link };
@@ -2074,7 +2075,7 @@ test "tarball with duplicate paths" {
defer tmp.cleanup();
const tarball_name = "duplicate_paths.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2107,7 +2108,7 @@ test "tarball with excluded duplicate paths" {
defer tmp.cleanup();
const tarball_name = "duplicate_paths_excluded.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2153,7 +2154,7 @@ test "tarball without root folder" {
defer tmp.cleanup();
const tarball_name = "no_root.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2186,7 +2187,7 @@ test "set executable bit based on file content" {
defer tmp.cleanup();
const tarball_name = "executables.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2210,7 +2211,7 @@ test "set executable bit based on file content" {
);
var out = try fb.packageDir();
- defer out.close();
+ defer out.close(io);
const S = std.posix.S;
// expect executable bit not set
try std.testing.expect((try out.statFile("file1")).mode & S.IXUSR == 0);
@@ -2231,11 +2232,11 @@ test "set executable bit based on file content" {
// -rwxrwxr-x 1 17 Apr script_with_shebang_without_exec_bit
}
-fn saveEmbedFile(comptime tarball_name: []const u8, dir: fs.Dir) !void {
+fn saveEmbedFile(io: Io, comptime tarball_name: []const u8, dir: Io.Dir) !void {
//const tarball_name = "duplicate_paths_excluded.tar.gz";
const tarball_content = @embedFile("Fetch/testdata/" ++ tarball_name);
var tmp_file = try dir.createFile(tarball_name, .{});
- defer tmp_file.close();
+ defer tmp_file.close(io);
try tmp_file.writeAll(tarball_content);
}
@@ -2250,7 +2251,7 @@ const TestFetchBuilder = struct {
self: *TestFetchBuilder,
allocator: std.mem.Allocator,
io: Io,
- cache_parent_dir: std.fs.Dir,
+ cache_parent_dir: std.Io.Dir,
path_or_url: []const u8,
) !*Fetch {
const cache_dir = try cache_parent_dir.makeOpenPath("zig-global-cache", .{});
@@ -2301,14 +2302,15 @@ const TestFetchBuilder = struct {
}
fn deinit(self: *TestFetchBuilder) void {
+ const io = self.job_queue.io;
self.fetch.deinit();
self.job_queue.deinit();
self.fetch.prog_node.end();
- self.global_cache_directory.handle.close();
+ self.global_cache_directory.handle.close(io);
self.http_client.deinit();
}
- fn packageDir(self: *TestFetchBuilder) !fs.Dir {
+ fn packageDir(self: *TestFetchBuilder) !Io.Dir {
const root = self.fetch.package_root;
return try root.root_dir.handle.openDir(root.sub_path, .{ .iterate = true });
}
@@ -2316,8 +2318,10 @@ const TestFetchBuilder = struct {
// Test helper, asserts thet package dir constains expected_files.
// expected_files must be sorted.
fn expectPackageFiles(self: *TestFetchBuilder, expected_files: []const []const u8) !void {
+ const io = self.job_queue.io;
+
var package_dir = try self.packageDir();
- defer package_dir.close();
+ defer package_dir.close(io);
var actual_files: std.ArrayList([]u8) = .empty;
defer actual_files.deinit(std.testing.allocator);
diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig
index a2ea870c3f..864865bd19 100644
--- a/src/Package/Fetch/git.zig
+++ b/src/Package/Fetch/git.zig
@@ -213,6 +213,7 @@ pub const Repository = struct {
/// Checks out the repository at `commit_oid` to `worktree`.
pub fn checkout(
repository: *Repository,
+ io: Io,
worktree: std.fs.Dir,
commit_oid: Oid,
diagnostics: *Diagnostics,
@@ -223,12 +224,13 @@ pub const Repository = struct {
if (commit_object.type != .commit) return error.NotACommit;
break :tree_oid try getCommitTree(repository.odb.format, commit_object.data);
};
- try repository.checkoutTree(worktree, tree_oid, "", diagnostics);
+ try repository.checkoutTree(io, worktree, tree_oid, "", diagnostics);
}
/// Checks out the tree at `tree_oid` to `worktree`.
fn checkoutTree(
repository: *Repository,
+ io: Io,
dir: std.fs.Dir,
tree_oid: Oid,
current_path: []const u8,
@@ -253,10 +255,10 @@ pub const Repository = struct {
.directory => {
try dir.makeDir(entry.name);
var subdir = try dir.openDir(entry.name, .{});
- defer subdir.close();
+ defer subdir.close(io);
const sub_path = try std.fs.path.join(repository.odb.allocator, &.{ current_path, entry.name });
defer repository.odb.allocator.free(sub_path);
- try repository.checkoutTree(subdir, entry.oid, sub_path, diagnostics);
+ try repository.checkoutTree(io, subdir, entry.oid, sub_path, diagnostics);
},
.file => {
try repository.odb.seekOid(entry.oid);
@@ -271,7 +273,7 @@ pub const Repository = struct {
} });
continue;
};
- defer file.close();
+ defer file.close(io);
try file.writeAll(file_object.data);
},
.symlink => {
@@ -1583,14 +1585,14 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
var git_dir = testing.tmpDir(.{});
defer git_dir.cleanup();
var pack_file = try git_dir.dir.createFile("testrepo.pack", .{ .read = true });
- defer pack_file.close();
+ defer pack_file.close(io);
try pack_file.writeAll(testrepo_pack);
var pack_file_buffer: [2000]u8 = undefined;
var pack_file_reader = pack_file.reader(io, &pack_file_buffer);
var index_file = try git_dir.dir.createFile("testrepo.idx", .{ .read = true });
- defer index_file.close();
+ defer index_file.close(io);
var index_file_buffer: [2000]u8 = undefined;
var index_file_writer = index_file.writer(&index_file_buffer);
try indexPack(testing.allocator, format, &pack_file_reader, &index_file_writer);
@@ -1621,7 +1623,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try repository.checkout(worktree.dir, commit_id, &diagnostics);
+ try repository.checkout(io, worktree.dir, commit_id, &diagnostics);
try testing.expect(diagnostics.errors.items.len == 0);
const expected_files: []const []const u8 = &.{
@@ -1713,20 +1715,20 @@ pub fn main() !void {
const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat;
var pack_file = try std.fs.cwd().openFile(args[2], .{});
- defer pack_file.close();
+ defer pack_file.close(io);
var pack_file_buffer: [4096]u8 = undefined;
var pack_file_reader = pack_file.reader(io, &pack_file_buffer);
const commit = try Oid.parse(format, args[3]);
var worktree = try std.fs.cwd().makeOpenPath(args[4], .{});
- defer worktree.close();
+ defer worktree.close(io);
var git_dir = try worktree.makeOpenPath(".git", .{});
- defer git_dir.close();
+ defer git_dir.close(io);
std.debug.print("Starting index...\n", .{});
var index_file = try git_dir.createFile("idx", .{ .read = true });
- defer index_file.close();
+ defer index_file.close(io);
var index_file_buffer: [4096]u8 = undefined;
var index_file_writer = index_file.writer(&index_file_buffer);
try indexPack(allocator, format, &pack_file_reader, &index_file_writer);
@@ -1738,7 +1740,7 @@ pub fn main() !void {
defer repository.deinit();
var diagnostics: Diagnostics = .{ .allocator = allocator };
defer diagnostics.deinit();
- try repository.checkout(worktree, commit, &diagnostics);
+ try repository.checkout(io, worktree, commit, &diagnostics);
for (diagnostics.errors.items) |err| {
std.debug.print("Diagnostic: {}\n", .{err});
diff --git a/src/Zcu.zig b/src/Zcu.zig
index 137b4d8b59..58d884afe3 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -1078,7 +1078,7 @@ pub const File = struct {
const dir, const sub_path = file.path.openInfo(zcu.comp.dirs);
break :f try dir.openFile(sub_path, .{});
};
- defer f.close();
+ defer f.close(io);
const stat = f.stat() catch |err| switch (err) {
error.Streaming => {
@@ -2813,8 +2813,8 @@ pub fn init(zcu: *Zcu, gpa: Allocator, io: Io, thread_count: usize) !void {
pub fn deinit(zcu: *Zcu) void {
const comp = zcu.comp;
- const gpa = comp.gpa;
const io = comp.io;
+ const gpa = zcu.gpa;
{
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
@@ -2835,8 +2835,8 @@ pub fn deinit(zcu: *Zcu) void {
}
zcu.embed_table.deinit(gpa);
- zcu.local_zir_cache.handle.close();
- zcu.global_zir_cache.handle.close();
+ zcu.local_zir_cache.handle.close(io);
+ zcu.global_zir_cache.handle.close(io);
for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
@@ -2900,7 +2900,7 @@ pub fn deinit(zcu: *Zcu) void {
if (zcu.resolved_references) |*r| r.deinit(gpa);
- if (zcu.comp.debugIncremental()) {
+ if (comp.debugIncremental()) {
zcu.incremental_debug_state.deinit(gpa);
}
}
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 2ad5bac01c..d2ca004058 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -96,7 +96,7 @@ pub fn updateFile(
const dir, const sub_path = file.path.openInfo(comp.dirs);
break :f try dir.openFile(sub_path, .{});
};
- defer source_file.close();
+ defer source_file.close(io);
const stat = try source_file.stat();
@@ -215,7 +215,7 @@ pub fn updateFile(
else => |e| return e, // Retryable errors are handled at callsite.
};
};
- defer cache_file.close();
+ defer cache_file.close(io);
// Under `--time-report`, ignore cache hits; do the work anyway for those juicy numbers.
const ignore_hit = comp.time_report != null;
@@ -2468,7 +2468,7 @@ fn updateEmbedFileInner(
const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs);
break :f try dir.openFile(sub_path, .{});
};
- defer file.close();
+ defer file.close(io);
const stat: Cache.File.Stat = .fromFs(try file.stat());
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 5dc55b74f6..cb4fe0459f 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -799,6 +799,7 @@ pub const Object = struct {
pub fn emit(o: *Object, pt: Zcu.PerThread, options: EmitOptions) error{ LinkFailure, OutOfMemory }!void {
const zcu = pt.zcu;
const comp = zcu.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
{
@@ -979,7 +980,7 @@ pub const Object = struct {
if (options.pre_bc_path) |path| {
var file = std.fs.cwd().createFile(path, .{}) catch |err|
return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
- defer file.close();
+ defer file.close(io);
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
@@ -992,7 +993,7 @@ pub const Object = struct {
if (options.post_bc_path) |path| {
var file = std.fs.cwd().createFile(path, .{}) catch |err|
return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
- defer file.close();
+ defer file.close(io);
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
diff --git a/src/fmt.zig b/src/fmt.zig
index 80925200d6..907c7885ad 100644
--- a/src/fmt.zig
+++ b/src/fmt.zig
@@ -187,7 +187,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
// On Windows, statFile does not work for directories
error.IsDir => dir: {
var dir = try fs.cwd().openDir(file_path, .{});
- defer dir.close();
+ defer dir.close(io);
break :dir try dir.stat();
},
else => |e| return e,
@@ -222,8 +222,10 @@ fn fmtPathDir(
parent_dir: fs.Dir,
parent_sub_path: []const u8,
) !void {
+ const io = fmt.io;
+
var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true });
- defer dir.close();
+ defer dir.close(io);
const stat = try dir.stat();
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
@@ -262,7 +264,7 @@ fn fmtPathFile(
const source_file = try dir.openFile(sub_path, .{});
var file_closed = false;
- errdefer if (!file_closed) source_file.close();
+ errdefer if (!file_closed) source_file.close(io);
const stat = try source_file.stat();
@@ -280,7 +282,7 @@ fn fmtPathFile(
};
defer gpa.free(source_code);
- source_file.close();
+ source_file.close(io);
file_closed = true;
// Add to set after no longer possible to get error.IsDir.
diff --git a/src/introspect.zig b/src/introspect.zig
index 8467b566c6..9b6797e7d8 100644
--- a/src/introspect.zig
+++ b/src/introspect.zig
@@ -1,18 +1,21 @@
-const std = @import("std");
const builtin = @import("builtin");
+const build_options = @import("build_options");
+
+const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
-const Allocator = mem.Allocator;
+const Allocator = std.mem.Allocator;
const os = std.os;
const fs = std.fs;
const Cache = std.Build.Cache;
+
const Compilation = @import("Compilation.zig");
const Package = @import("Package.zig");
-const build_options = @import("build_options");
/// Returns the sub_path that worked, or `null` if none did.
/// The path of the returned Directory is relative to `base`.
/// The handle of the returned Directory is open.
-fn testZigInstallPrefix(base_dir: fs.Dir) ?Cache.Directory {
+fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory {
const test_index_file = "std" ++ fs.path.sep_str ++ "std.zig";
zig_dir: {
@@ -20,31 +23,31 @@ fn testZigInstallPrefix(base_dir: fs.Dir) ?Cache.Directory {
const lib_zig = "lib" ++ fs.path.sep_str ++ "zig";
var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir;
const file = test_zig_dir.openFile(test_index_file, .{}) catch {
- test_zig_dir.close();
+ test_zig_dir.close(io);
break :zig_dir;
};
- file.close();
+ file.close(io);
return .{ .handle = test_zig_dir, .path = lib_zig };
}
// Try lib/std/std.zig
var test_zig_dir = base_dir.openDir("lib", .{}) catch return null;
const file = test_zig_dir.openFile(test_index_file, .{}) catch {
- test_zig_dir.close();
+ test_zig_dir.close(io);
return null;
};
- file.close();
+ file.close(io);
return .{ .handle = test_zig_dir, .path = "lib" };
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
-pub fn findZigLibDir(gpa: Allocator) !Cache.Directory {
+pub fn findZigLibDir(gpa: Allocator, io: Io) !Cache.Directory {
const cwd_path = try getResolvedCwd(gpa);
defer gpa.free(cwd_path);
const self_exe_path = try fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
- return findZigLibDirFromSelfExe(gpa, cwd_path, self_exe_path);
+ return findZigLibDirFromSelfExe(gpa, io, cwd_path, self_exe_path);
}
/// Like `std.process.getCwdAlloc`, but also resolves the path with `std.fs.path.resolve`. This
@@ -73,6 +76,7 @@ pub fn getResolvedCwd(gpa: Allocator) error{
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
pub fn findZigLibDirFromSelfExe(
allocator: Allocator,
+ io: Io,
/// The return value of `getResolvedCwd`.
/// Passed as an argument to avoid pointlessly repeating the call.
cwd_path: []const u8,
@@ -82,9 +86,9 @@ pub fn findZigLibDirFromSelfExe(
var cur_path: []const u8 = self_exe_path;
while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
var base_dir = cwd.openDir(dirname, .{}) catch continue;
- defer base_dir.close();
+ defer base_dir.close(io);
- const sub_directory = testZigInstallPrefix(base_dir) orelse continue;
+ const sub_directory = testZigInstallPrefix(io, base_dir) orelse continue;
const p = try fs.path.join(allocator, &.{ dirname, sub_directory.path.? });
defer allocator.free(p);
diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig
index afeb5b3282..8c5e0afe4b 100644
--- a/src/libs/freebsd.zig
+++ b/src/libs/freebsd.zig
@@ -449,7 +449,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -480,7 +480,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig
index 64d0fdbeac..bec20ff3d4 100644
--- a/src/libs/glibc.zig
+++ b/src/libs/glibc.zig
@@ -684,7 +684,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -715,7 +715,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig
index b3c018996a..005696e1fc 100644
--- a/src/libs/mingw.zig
+++ b/src/libs/mingw.zig
@@ -262,7 +262,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
cache.hash.addBytes(build_options.version);
cache.hash.addOptionalBytes(comp.dirs.zig_lib.path);
@@ -297,7 +297,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
var o_dir = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ defer o_dir.close(io);
const aro = @import("aro");
var diagnostics: aro.Diagnostics = .{
@@ -377,7 +377,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
{
const lib_final_file = try o_dir.createFile(final_lib_basename, .{ .truncate = true });
- defer lib_final_file.close();
+ defer lib_final_file.close(io);
var buffer: [1024]u8 = undefined;
var file_writer = lib_final_file.writer(&buffer);
try implib.writeCoffArchive(gpa, &file_writer.interface, members);
diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig
index 8d35e3bd71..67e6a2f903 100644
--- a/src/libs/netbsd.zig
+++ b/src/libs/netbsd.zig
@@ -390,7 +390,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -421,7 +421,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
diff --git a/src/link.zig b/src/link.zig
index 6ac96504c7..ef095987c9 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -687,7 +687,7 @@ pub const File = struct {
.lld => assert(base.file == null),
.elf => if (base.file) |f| {
dev.check(.elf_linker);
- f.close();
+ f.close(io);
base.file = null;
if (base.child_pid) |pid| {
@@ -701,7 +701,7 @@ pub const File = struct {
},
.macho, .wasm => if (base.file) |f| {
dev.checkAny(&.{ .coff_linker, .macho_linker, .plan9_linker, .wasm_linker });
- f.close();
+ f.close(io);
base.file = null;
if (base.child_pid) |pid| {
@@ -866,8 +866,9 @@ pub const File = struct {
}
pub fn destroy(base: *File) void {
+ const io = base.comp.io;
base.releaseLock();
- if (base.file) |f| f.close();
+ if (base.file) |f| f.close(io);
switch (base.tag) {
.plan9 => unreachable,
inline else => |tag| {
@@ -1060,9 +1061,10 @@ pub const File = struct {
/// Opens a path as an object file and parses it into the linker.
fn openLoadObject(base: *File, path: Path) anyerror!void {
if (base.tag == .lld) return;
+ const io = base.comp.io;
const diags = &base.comp.link_diags;
- const input = try openObjectInput(diags, path);
- errdefer input.object.file.close();
+ const input = try openObjectInput(io, diags, path);
+ errdefer input.object.file.close(io);
try loadInput(base, input);
}
@@ -1070,21 +1072,22 @@ pub const File = struct {
/// If `query` is non-null, allows GNU ld scripts.
fn openLoadArchive(base: *File, path: Path, opt_query: ?UnresolvedInput.Query) anyerror!void {
if (base.tag == .lld) return;
+ const io = base.comp.io;
if (opt_query) |query| {
- const archive = try openObject(path, query.must_link, query.hidden);
- errdefer archive.file.close();
+ const archive = try openObject(io, path, query.must_link, query.hidden);
+ errdefer archive.file.close(io);
loadInput(base, .{ .archive = archive }) catch |err| switch (err) {
error.BadMagic, error.UnexpectedEndOfFile => {
if (base.tag != .elf and base.tag != .elf2) return err;
try loadGnuLdScript(base, path, query, archive.file);
- archive.file.close();
+ archive.file.close(io);
return;
},
else => return err,
};
} else {
- const archive = try openObject(path, false, false);
- errdefer archive.file.close();
+ const archive = try openObject(io, path, false, false);
+ errdefer archive.file.close(io);
try loadInput(base, .{ .archive = archive });
}
}
@@ -1093,13 +1096,14 @@ pub const File = struct {
/// Handles GNU ld scripts.
fn openLoadDso(base: *File, path: Path, query: UnresolvedInput.Query) anyerror!void {
if (base.tag == .lld) return;
- const dso = try openDso(path, query.needed, query.weak, query.reexport);
- errdefer dso.file.close();
+ const io = base.comp.io;
+ const dso = try openDso(io, path, query.needed, query.weak, query.reexport);
+ errdefer dso.file.close(io);
loadInput(base, .{ .dso = dso }) catch |err| switch (err) {
error.BadMagic, error.UnexpectedEndOfFile => {
if (base.tag != .elf and base.tag != .elf2) return err;
try loadGnuLdScript(base, path, query, dso.file);
- dso.file.close();
+ dso.file.close(io);
return;
},
else => return err,
@@ -1735,6 +1739,7 @@ pub fn hashInputs(man: *Cache.Manifest, link_inputs: []const Input) !void {
pub fn resolveInputs(
gpa: Allocator,
arena: Allocator,
+ io: Io,
target: *const std.Target,
/// This function mutates this array but does not take ownership.
/// Allocated with `gpa`.
@@ -1784,6 +1789,7 @@ pub fn resolveInputs(
for (lib_directories) |lib_directory| switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1810,6 +1816,7 @@ pub fn resolveInputs(
for (lib_directories) |lib_directory| switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1837,6 +1844,7 @@ pub fn resolveInputs(
switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1855,6 +1863,7 @@ pub fn resolveInputs(
switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1886,6 +1895,7 @@ pub fn resolveInputs(
if (try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1903,6 +1913,7 @@ pub fn resolveInputs(
switch ((try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1930,6 +1941,7 @@ pub fn resolveInputs(
if (try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1969,6 +1981,7 @@ const fatal = std.process.fatal;
fn resolveLibInput(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated via `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated via `gpa`.
@@ -1998,7 +2011,7 @@ fn resolveLibInput(
error.FileNotFound => break :tbd,
else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2013,7 +2026,7 @@ fn resolveLibInput(
}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
+ switch (try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
.path = test_path,
.query = name_query.query,
}, link_mode, color)) {
@@ -2036,7 +2049,7 @@ fn resolveLibInput(
test_path, @errorName(e),
}),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2052,7 +2065,7 @@ fn resolveLibInput(
error.FileNotFound => break :mingw,
else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2087,6 +2100,7 @@ fn finishResolveLibInput(
fn resolvePathInput(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated with `gpa`.
@@ -2098,12 +2112,12 @@ fn resolvePathInput(
color: std.zig.Color,
) Allocator.Error!?ResolveLibInputResult {
switch (Compilation.classifyFileExt(pq.path.sub_path)) {
- .static_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color),
- .shared_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
+ .static_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color),
+ .shared_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
.object => {
var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) });
- errdefer file.close();
+ errdefer file.close(io);
try resolved_inputs.append(gpa, .{ .object = .{
.path = pq.path,
.file = file,
@@ -2115,7 +2129,7 @@ fn resolvePathInput(
.res => {
var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) });
- errdefer file.close();
+ errdefer file.close(io);
try resolved_inputs.append(gpa, .{ .res = .{
.path = pq.path,
.file = file,
@@ -2129,6 +2143,7 @@ fn resolvePathInput(
fn resolvePathInputLib(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated with `gpa`.
@@ -2155,7 +2170,7 @@ fn resolvePathInputLib(
@tagName(link_mode), std.fmt.alt(test_path, .formatEscapeChar), @errorName(e),
}),
};
- errdefer file.close();
+ errdefer file.close(io);
try ld_script_bytes.resize(gpa, @max(std.elf.MAGIC.len, std.elf.ARMAG.len));
const n = file.preadAll(ld_script_bytes.items, 0) catch |err| fatal("failed to read '{f}': {s}", .{
std.fmt.alt(test_path, .formatEscapeChar), @errorName(err),
@@ -2223,7 +2238,7 @@ fn resolvePathInputLib(
} });
}
}
- file.close();
+ file.close(io);
return .ok;
}
@@ -2233,13 +2248,13 @@ fn resolvePathInputLib(
@tagName(link_mode), test_path, @errorName(e),
}),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
}
-pub fn openObject(path: Path, must_link: bool, hidden: bool) !Input.Object {
+pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Object {
var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- errdefer file.close();
+ errdefer file.close(io);
return .{
.path = path,
.file = file,
@@ -2248,9 +2263,9 @@ pub fn openObject(path: Path, must_link: bool, hidden: bool) !Input.Object {
};
}
-pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso {
+pub fn openDso(io: Io, path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso {
var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- errdefer file.close();
+ errdefer file.close(io);
return .{
.path = path,
.file = file,
@@ -2260,20 +2275,20 @@ pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso
};
}
-pub fn openObjectInput(diags: *Diags, path: Path) error{LinkFailure}!Input {
- return .{ .object = openObject(path, false, false) catch |err| {
+pub fn openObjectInput(io: Io, diags: *Diags, path: Path) error{LinkFailure}!Input {
+ return .{ .object = openObject(io, path, false, false) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
-pub fn openArchiveInput(diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
- return .{ .archive = openObject(path, must_link, hidden) catch |err| {
+pub fn openArchiveInput(io: Io, diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
+ return .{ .archive = openObject(io, path, must_link, hidden) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
-pub fn openDsoInput(diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
- return .{ .dso = openDso(path, needed, weak, reexport) catch |err| {
+pub fn openDsoInput(io: Io, diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
+ return .{ .dso = openDso(io, path, needed, weak, reexport) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
diff --git a/src/link/C.zig b/src/link/C.zig
index ce48e85851..04c92443e5 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -124,6 +124,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*C {
+ const io = comp.io;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .c);
const optimize_mode = comp.root_mod.optimize_mode;
@@ -139,7 +140,7 @@ pub fn createEmpty(
// Truncation is done on `flush`.
.truncate = false,
});
- errdefer file.close();
+ errdefer file.close(io);
const c_file = try arena.create(C);
@@ -763,6 +764,7 @@ pub fn flushEmitH(zcu: *Zcu) !void {
if (true) return; // emit-h is regressed
const emit_h = zcu.emit_h orelse return;
+ const io = zcu.comp.io;
// We collect a list of buffers to write, and write them all at once with pwritev 😎
const num_buffers = emit_h.decl_table.count() + 1;
@@ -795,7 +797,7 @@ pub fn flushEmitH(zcu: *Zcu) !void {
// make it easier on the file system by doing 1 reallocation instead of two.
.truncate = false,
});
- defer file.close();
+ defer file.close(io);
try file.setEndPos(file_size);
try file.pwritevAll(all_buffers.items, 0);
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 69acbe034b..ae7d631f09 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -406,10 +406,12 @@ pub fn open(
}
pub fn deinit(self: *Elf) void {
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
for (self.file_handles.items) |fh| {
- fh.close();
+ fh.close(io);
}
self.file_handles.deinit(gpa);
diff --git a/src/link/Lld.zig b/src/link/Lld.zig
index 2345090482..66b032e0a9 100644
--- a/src/link/Lld.zig
+++ b/src/link/Lld.zig
@@ -1628,7 +1628,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err|
log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) });
{
- defer rsp_file.close();
+ defer rsp_file.close(io);
var rsp_file_buffer: [1024]u8 = undefined;
var rsp_file_writer = rsp_file.writer(&rsp_file_buffer);
const rsp_writer = &rsp_file_writer.interface;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 2c4ffd6632..471465cea1 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -267,14 +267,16 @@ pub fn open(
}
pub fn deinit(self: *MachO) void {
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
if (self.d_sym) |*d_sym| {
d_sym.deinit();
}
for (self.file_handles.items) |handle| {
- handle.close();
+ handle.close(io);
}
self.file_handles.deinit(gpa);
@@ -3257,8 +3259,10 @@ const InitMetadataOptions = struct {
};
pub fn closeDebugInfo(self: *MachO) bool {
+ const comp = self.base.comp;
+ const io = comp.io;
const d_sym = &(self.d_sym orelse return false);
- d_sym.file.?.close();
+ d_sym.file.?.close(io);
d_sym.file = null;
return true;
}
@@ -3269,7 +3273,9 @@ pub fn reopenDebugInfo(self: *MachO) !void {
assert(!self.base.comp.config.use_llvm);
assert(self.base.comp.config.debug_format == .dwarf);
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const sep = fs.path.sep_str;
const d_sym_path = try std.fmt.allocPrint(
gpa,
@@ -3279,7 +3285,7 @@ pub fn reopenDebugInfo(self: *MachO) !void {
defer gpa.free(d_sym_path);
var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{});
- defer d_sym_bundle.close();
+ defer d_sym_bundle.close(io);
self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{
.truncate = false,
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 5d7b9b88c3..8a97f2844f 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -1,5 +1,28 @@
+const DebugSymbols = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const fs = std.fs;
+const log = std.log.scoped(.link_dsym);
+const macho = std.macho;
+const makeStaticString = MachO.makeStaticString;
+const math = std.math;
+const mem = std.mem;
+const Writer = std.Io.Writer;
+const Allocator = std.mem.Allocator;
+
+const link = @import("../../link.zig");
+const MachO = @import("../MachO.zig");
+const StringTable = @import("../StringTable.zig");
+const Type = @import("../../Type.zig");
+const trace = @import("../../tracy.zig").trace;
+const load_commands = @import("load_commands.zig");
+const padToIdeal = MachO.padToIdeal;
+
+io: Io,
allocator: Allocator,
-file: ?fs.File,
+file: ?Io.File,
symtab_cmd: macho.symtab_command = .{},
uuid_cmd: macho.uuid_command = .{ .uuid = [_]u8{0} ** 16 },
@@ -208,7 +231,8 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
pub fn deinit(self: *DebugSymbols) void {
const gpa = self.allocator;
- if (self.file) |file| file.close();
+ const io = self.io;
+ if (self.file) |file| file.close(io);
self.segments.deinit(gpa);
self.sections.deinit(gpa);
self.relocs.deinit(gpa);
@@ -443,25 +467,3 @@ pub fn getSection(self: DebugSymbols, sect: u8) macho.section_64 {
assert(sect < self.sections.items.len);
return self.sections.items[sect];
}
-
-const DebugSymbols = @This();
-
-const std = @import("std");
-const build_options = @import("build_options");
-const assert = std.debug.assert;
-const fs = std.fs;
-const link = @import("../../link.zig");
-const load_commands = @import("load_commands.zig");
-const log = std.log.scoped(.link_dsym);
-const macho = std.macho;
-const makeStaticString = MachO.makeStaticString;
-const math = std.math;
-const mem = std.mem;
-const padToIdeal = MachO.padToIdeal;
-const trace = @import("../../tracy.zig").trace;
-const Writer = std.Io.Writer;
-
-const Allocator = mem.Allocator;
-const MachO = @import("../MachO.zig");
-const StringTable = @import("../StringTable.zig");
-const Type = @import("../../Type.zig");
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 92307ec40c..160e6cdcc6 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -3032,7 +3032,7 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
const io = wasm.base.comp.io;
const gc_sections = wasm.base.gc_sections;
- defer obj.file.close();
+ defer obj.file.close(io);
var file_reader = obj.file.reader(io, &.{});
@@ -3060,7 +3060,7 @@ fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void {
const io = wasm.base.comp.io;
const gc_sections = wasm.base.gc_sections;
- defer obj.file.close();
+ defer obj.file.close(io);
var file_reader = obj.file.reader(io, &.{});
diff --git a/src/main.zig b/src/main.zig
index a897f2a847..3ca64881f8 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -328,21 +328,21 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.prepend_global_cache_path = true,
});
} else if (mem.eql(u8, cmd, "init")) {
- return cmdInit(gpa, arena, cmd_args);
+ return cmdInit(gpa, arena, io, cmd_args);
} else if (mem.eql(u8, cmd, "targets")) {
dev.check(.targets_command);
const host = std.zig.resolveTargetQueryOrFatal(io, .{});
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
- try @import("print_targets.zig").cmdTargets(arena, cmd_args, &stdout_writer.interface, &host);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
+ try @import("print_targets.zig").cmdTargets(arena, io, cmd_args, &stdout_writer.interface, &host);
return stdout_writer.interface.flush();
} else if (mem.eql(u8, cmd, "version")) {
dev.check(.version_command);
- try fs.File.stdout().writeAll(build_options.version ++ "\n");
+ try Io.File.stdout().writeAll(build_options.version ++ "\n");
return;
} else if (mem.eql(u8, cmd, "env")) {
dev.check(.env_command);
const host = std.zig.resolveTargetQueryOrFatal(io, .{});
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
try @import("print_env.zig").cmdEnv(
arena,
&stdout_writer.interface,
@@ -358,10 +358,10 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
});
} else if (mem.eql(u8, cmd, "zen")) {
dev.check(.zen_command);
- return fs.File.stdout().writeAll(info_zen);
+ return Io.File.stdout().writeAll(info_zen);
} else if (mem.eql(u8, cmd, "help") or mem.eql(u8, cmd, "-h") or mem.eql(u8, cmd, "--help")) {
dev.check(.help_command);
- return fs.File.stdout().writeAll(usage);
+ return Io.File.stdout().writeAll(usage);
} else if (mem.eql(u8, cmd, "ast-check")) {
return cmdAstCheck(arena, io, cmd_args);
} else if (mem.eql(u8, cmd, "detect-cpu")) {
@@ -698,7 +698,7 @@ const Emit = union(enum) {
yes: []const u8,
const OutputToCacheReason = enum { listen, @"zig run", @"zig test" };
- fn resolve(emit: Emit, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit {
+ fn resolve(io: Io, emit: Emit, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit {
return switch (emit) {
.no => .no,
.yes_default_path => if (output_to_cache != null) .yes_cache else .{ .yes_path = default_basename },
@@ -716,7 +716,7 @@ const Emit = union(enum) {
var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
- dir.close();
+ dir.close(io);
}
break :e .{ .yes_path = path };
},
@@ -1034,7 +1034,7 @@ fn buildOutputType(
};
} else if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_build_generic);
+ try Io.File.stdout().writeAll(usage_build_generic);
return cleanExit();
} else if (mem.eql(u8, arg, "--")) {
if (arg_mode == .run) {
@@ -2834,9 +2834,9 @@ fn buildOutputType(
} else if (mem.eql(u8, arg, "-V")) {
warn("ignoring request for supported emulations: unimplemented", .{});
} else if (mem.eql(u8, arg, "-v")) {
- try fs.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
+ try Io.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
} else if (mem.eql(u8, arg, "--version")) {
- try fs.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
+ try Io.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
process.exit(0);
} else {
fatal("unsupported linker arg: {s}", .{arg});
@@ -3251,8 +3251,8 @@ fn buildOutputType(
}
}
- var cleanup_emit_bin_dir: ?fs.Dir = null;
- defer if (cleanup_emit_bin_dir) |*dir| dir.close();
+ var cleanup_emit_bin_dir: ?Io.Dir = null;
+ defer if (cleanup_emit_bin_dir) |*dir| dir.close(io);
// For `zig run` and `zig test`, we don't want to put the binary in the cwd by default. So, if
// the binary is requested with no explicit path (as is the default), we emit to the cache.
@@ -3307,7 +3307,7 @@ fn buildOutputType(
var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
- dir.close();
+ dir.close(io);
}
break :emit .{ .yes_path = path };
},
@@ -3390,7 +3390,7 @@ fn buildOutputType(
// will be a hash of its contents — so multiple invocations of
// `zig cc -` will result in the same temp file name.
var f = try dirs.local_cache.handle.createFile(dump_path, .{});
- defer f.close();
+ defer f.close(io);
// Re-using the hasher from Cache, since the functional requirements
// for the hashing algorithm here and in the cache are the same.
@@ -3399,7 +3399,7 @@ fn buildOutputType(
var file_writer = f.writer(&.{});
var buffer: [1000]u8 = undefined;
var hasher = file_writer.interface.hashed(Cache.Hasher.init("0123456789abcdef"), &buffer);
- var stdin_reader = fs.File.stdin().readerStreaming(io, &.{});
+ var stdin_reader = Io.File.stdin().readerStreaming(io, &.{});
_ = hasher.writer.sendFileAll(&stdin_reader, .unlimited) catch |err| switch (err) {
error.WriteFailed => fatal("failed to write {s}: {t}", .{ dump_path, file_writer.err.? }),
else => fatal("failed to pipe stdin to {s}: {t}", .{ dump_path, err }),
@@ -3630,13 +3630,13 @@ fn buildOutputType(
if (show_builtin) {
const builtin_opts = comp.root_mod.getBuiltinOptions(comp.config);
const source = try builtin_opts.generate(arena);
- return fs.File.stdout().writeAll(source);
+ return Io.File.stdout().writeAll(source);
}
switch (listen) {
.none => {},
.stdio => {
- var stdin_reader = fs.File.stdin().reader(io, &stdin_buffer);
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdin_reader = Io.File.stdin().reader(io, &stdin_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
try serve(
comp,
&stdin_reader.interface,
@@ -4034,6 +4034,7 @@ fn createModule(
link.resolveInputs(
gpa,
arena,
+ io,
target,
&unresolved_link_inputs,
&create_module.link_inputs,
@@ -4689,8 +4690,8 @@ fn cmdTranslateC(
@errorName(err),
});
};
- defer zig_file.close();
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ defer zig_file.close(io);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
var file_reader = zig_file.reader(io, &.{});
_ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited);
try stdout_writer.interface.flush();
@@ -4728,7 +4729,7 @@ const usage_init =
\\
;
-fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !void {
dev.check(.init_command);
var template: enum { example, minimal } = .example;
@@ -4740,7 +4741,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (mem.eql(u8, arg, "-m") or mem.eql(u8, arg, "--minimal")) {
template = .minimal;
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_init);
+ try Io.File.stdout().writeAll(usage_init);
return cleanExit();
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
@@ -4759,7 +4760,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
switch (template) {
.example => {
- var templates = findTemplates(gpa, arena);
+ var templates = findTemplates(gpa, arena, io);
defer templates.deinit();
const s = fs.path.sep_str;
@@ -4789,7 +4790,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
return cleanExit();
},
.minimal => {
- writeSimpleTemplateFile(Package.Manifest.basename,
+ writeSimpleTemplateFile(io, Package.Manifest.basename,
\\.{{
\\ .name = .{s},
\\ .version = "0.0.1",
@@ -4806,7 +4807,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
else => fatal("failed to create '{s}': {s}", .{ Package.Manifest.basename, @errorName(err) }),
error.PathAlreadyExists => fatal("refusing to overwrite '{s}'", .{Package.Manifest.basename}),
};
- writeSimpleTemplateFile(Package.build_zig_basename,
+ writeSimpleTemplateFile(io, Package.build_zig_basename,
\\const std = @import("std");
\\
\\pub fn build(b: *std.Build) void {{
@@ -5203,8 +5204,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
.parent = root_mod,
});
- var cleanup_build_dir: ?fs.Dir = null;
- defer if (cleanup_build_dir) |*dir| dir.close();
+ var cleanup_build_dir: ?Io.Dir = null;
+ defer if (cleanup_build_dir) |*dir| dir.close(io);
if (dev.env.supports(.fetch_command)) {
const fetch_prog_node = root_prog_node.start("Fetch Packages", 0);
@@ -5296,6 +5297,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
try job_queue.createDependenciesSource(&source_buf);
const deps_mod = try createDependenciesModule(
arena,
+ io,
source_buf.items,
root_mod,
dirs,
@@ -5357,6 +5359,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
}
} else try createEmptyDependenciesModule(
arena,
+ io,
root_mod,
dirs,
config,
@@ -5623,7 +5626,7 @@ fn jitCmd(
defer comp.destroy();
if (options.server) {
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(&stdout_buffer);
var server: std.zig.Server = .{
.out = &stdout_writer.interface,
.in = undefined, // won't be receiving messages
@@ -6156,7 +6159,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_ast_check);
+ try Io.File.stdout().writeAll(usage_ast_check);
return cleanExit();
} else if (mem.eql(u8, arg, "-t")) {
want_output_text = true;
@@ -6187,9 +6190,9 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
break :file fs.cwd().openFile(p, .{}) catch |err| {
fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
};
- } else fs.File.stdin();
- defer if (zig_source_path != null) f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ } else Io.File.stdin();
+ defer if (zig_source_path != null) f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :s std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err| {
fatal("unable to load file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
};
@@ -6207,7 +6210,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const tree = try Ast.parse(arena, source, mode);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer);
const stdout_bw = &stdout_writer.interface;
switch (mode) {
.zig => {
@@ -6330,7 +6333,7 @@ fn cmdDetectCpu(io: Io, args: []const []const u8) !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(detect_cpu_usage);
+ try Io.File.stdout().writeAll(detect_cpu_usage);
return cleanExit();
} else if (mem.eql(u8, arg, "--llvm")) {
use_llvm = true;
@@ -6422,7 +6425,7 @@ fn detectNativeCpuWithLLVM(
}
fn printCpu(cpu: std.Target.Cpu) !void {
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer);
const stdout_bw = &stdout_writer.interface;
if (cpu.model.llvm_name) |llvm_name| {
@@ -6471,7 +6474,7 @@ fn cmdDumpLlvmInts(
const dl = tm.createTargetDataLayout();
const context = llvm.Context.create();
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer);
const stdout_bw = &stdout_writer.interface;
for ([_]u16{ 1, 8, 16, 32, 64, 128, 256 }) |bits| {
const int_type = context.intType(bits);
@@ -6494,10 +6497,10 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void {
var f = fs.cwd().openFile(cache_file, .{}) catch |err| {
fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) });
};
- defer f.close();
+ defer f.close(io);
const zir = try Zcu.loadZirCache(arena, io, f);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer);
const stdout_bw = &stdout_writer.interface;
{
const instruction_bytes = zir.instructions.len *
@@ -6540,16 +6543,16 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
const old_source = source: {
var f = fs.cwd().openFile(old_source_path, .{}) catch |err|
fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
- defer f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ defer f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
};
const new_source = source: {
var f = fs.cwd().openFile(new_source_path, .{}) catch |err|
fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
- defer f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ defer f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
fatal("unable to read new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
};
@@ -6581,7 +6584,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty;
try Zcu.mapOldZirToNew(arena, old_zir, new_zir, &inst_map);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(&stdout_buffer);
const stdout_bw = &stdout_writer.interface;
{
try stdout_bw.print("Instruction mappings:\n", .{});
@@ -6912,7 +6915,7 @@ fn cmdFetch(
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_fetch);
+ try Io.File.stdout().writeAll(usage_fetch);
return cleanExit();
} else if (mem.eql(u8, arg, "--global-cache-dir")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
@@ -6958,7 +6961,7 @@ fn cmdFetch(
.path = p,
};
};
- defer global_cache_directory.handle.close();
+ defer global_cache_directory.handle.close(io);
var job_queue: Package.Fetch.JobQueue = .{
.io = io,
@@ -7021,7 +7024,7 @@ fn cmdFetch(
const name = switch (save) {
.no => {
- var stdout = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout = Io.File.stdout().writerStreaming(&stdout_buffer);
try stdout.interface.print("{s}\n", .{package_hash_slice});
try stdout.interface.flush();
return cleanExit();
@@ -7043,7 +7046,7 @@ fn cmdFetch(
// The name to use in case the manifest file needs to be created now.
const init_root_name = fs.path.basename(build_root.directory.path orelse cwd_path);
- var manifest, var ast = try loadManifest(gpa, arena, .{
+ var manifest, var ast = try loadManifest(gpa, arena, io, .{
.root_name = try sanitizeExampleName(arena, init_root_name),
.dir = build_root.directory.handle,
.color = color,
@@ -7168,6 +7171,7 @@ fn cmdFetch(
fn createEmptyDependenciesModule(
arena: Allocator,
+ io: Io,
main_mod: *Package.Module,
dirs: Compilation.Directories,
global_options: Compilation.Config,
@@ -7176,6 +7180,7 @@ fn createEmptyDependenciesModule(
try Package.Fetch.JobQueue.createEmptyDependenciesSource(&source);
_ = try createDependenciesModule(
arena,
+ io,
source.items,
main_mod,
dirs,
@@ -7187,6 +7192,7 @@ fn createEmptyDependenciesModule(
/// build runner to obtain via `@import("@dependencies")`.
fn createDependenciesModule(
arena: Allocator,
+ io: Io,
source: []const u8,
main_mod: *Package.Module,
dirs: Compilation.Directories,
@@ -7198,7 +7204,7 @@ fn createDependenciesModule(
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
{
var tmp_dir = try dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
- defer tmp_dir.close();
+ defer tmp_dir.close(io);
try tmp_dir.writeFile(.{ .sub_path = basename, .data = source });
}
@@ -7232,10 +7238,10 @@ fn createDependenciesModule(
const BuildRoot = struct {
directory: Cache.Directory,
build_zig_basename: []const u8,
- cleanup_build_dir: ?fs.Dir,
+ cleanup_build_dir: ?Io.Dir,
- fn deinit(br: *BuildRoot) void {
- if (br.cleanup_build_dir) |*dir| dir.close();
+ fn deinit(br: *BuildRoot, io: Io) void {
+ if (br.cleanup_build_dir) |*dir| dir.close(io);
br.* = undefined;
}
};
@@ -7304,13 +7310,14 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
const LoadManifestOptions = struct {
root_name: []const u8,
- dir: fs.Dir,
+ dir: Io.Dir,
color: Color,
};
fn loadManifest(
gpa: Allocator,
arena: Allocator,
+ io: Io,
options: LoadManifestOptions,
) !struct { Package.Manifest, Ast } {
const manifest_bytes = while (true) {
@@ -7322,7 +7329,7 @@ fn loadManifest(
0,
) catch |err| switch (err) {
error.FileNotFound => {
- writeSimpleTemplateFile(Package.Manifest.basename,
+ writeSimpleTemplateFile(io, Package.Manifest.basename,
\\.{{
\\ .name = .{s},
\\ .version = "{s}",
@@ -7374,12 +7381,12 @@ fn loadManifest(
const Templates = struct {
zig_lib_directory: Cache.Directory,
- dir: fs.Dir,
+ dir: Io.Dir,
buffer: std.array_list.Managed(u8),
- fn deinit(templates: *Templates) void {
- templates.zig_lib_directory.handle.close();
- templates.dir.close();
+ fn deinit(templates: *Templates, io: Io) void {
+ templates.zig_lib_directory.handle.close(io);
+ templates.dir.close(io);
templates.buffer.deinit();
templates.* = undefined;
}
@@ -7387,7 +7394,7 @@ const Templates = struct {
fn write(
templates: *Templates,
arena: Allocator,
- out_dir: fs.Dir,
+ out_dir: Io.Dir,
root_name: []const u8,
template_path: []const u8,
fingerprint: Package.Fingerprint,
@@ -7435,23 +7442,23 @@ const Templates = struct {
});
}
};
-fn writeSimpleTemplateFile(file_name: []const u8, comptime fmt: []const u8, args: anytype) !void {
+fn writeSimpleTemplateFile(io: Io, file_name: []const u8, comptime fmt: []const u8, args: anytype) !void {
const f = try fs.cwd().createFile(file_name, .{ .exclusive = true });
- defer f.close();
+ defer f.close(io);
var buf: [4096]u8 = undefined;
var fw = f.writer(&buf);
try fw.interface.print(fmt, args);
try fw.interface.flush();
}
-fn findTemplates(gpa: Allocator, arena: Allocator) Templates {
+fn findTemplates(gpa: Allocator, arena: Allocator, io: Io) Templates {
const cwd_path = introspect.getResolvedCwd(arena) catch |err| {
fatal("unable to get cwd: {s}", .{@errorName(err)});
};
const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
fatal("unable to find self exe path: {s}", .{@errorName(err)});
};
- var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, cwd_path, self_exe_path) catch |err| {
+ var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, io, cwd_path, self_exe_path) catch |err| {
fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
};
diff --git a/src/print_targets.zig b/src/print_targets.zig
index d9118b901b..2f80187de1 100644
--- a/src/print_targets.zig
+++ b/src/print_targets.zig
@@ -12,6 +12,7 @@ const introspect = @import("introspect.zig");
pub fn cmdTargets(
allocator: Allocator,
+ io: Io,
args: []const []const u8,
out: *std.Io.Writer,
native_target: *const Target,
@@ -20,7 +21,7 @@ pub fn cmdTargets(
var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| {
fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)});
};
- defer zig_lib_directory.handle.close();
+ defer zig_lib_directory.handle.close(io);
defer allocator.free(zig_lib_directory.path.?);
const abilists_contents = zig_lib_directory.handle.readFileAlloc(