aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Air/print.zig28
-rw-r--r--src/Builtin.zig5
-rw-r--r--src/Compilation.zig436
-rw-r--r--src/InternPool.zig60
-rw-r--r--src/Package/Fetch.zig246
-rw-r--r--src/Package/Fetch/git.zig90
-rw-r--r--src/Sema.zig8
-rw-r--r--src/Zcu.zig37
-rw-r--r--src/Zcu/PerThread.zig87
-rw-r--r--src/codegen/aarch64/Select.zig10
-rw-r--r--src/codegen/llvm.zig38
-rw-r--r--src/crash_report.zig13
-rw-r--r--src/fmt.zig70
-rw-r--r--src/introspect.zig98
-rw-r--r--src/libs/freebsd.zig28
-rw-r--r--src/libs/glibc.zig32
-rw-r--r--src/libs/mingw.zig65
-rw-r--r--src/libs/mingw/def.zig32
-rw-r--r--src/libs/netbsd.zig26
-rw-r--r--src/link.zig224
-rw-r--r--src/link/C.zig17
-rw-r--r--src/link/Coff.zig74
-rw-r--r--src/link/Dwarf.zig107
-rw-r--r--src/link/Elf.zig113
-rw-r--r--src/link/Elf/Archive.zig53
-rw-r--r--src/link/Elf/AtomList.zig12
-rw-r--r--src/link/Elf/Object.zig112
-rw-r--r--src/link/Elf/SharedObject.zig53
-rw-r--r--src/link/Elf/ZigObject.zig28
-rw-r--r--src/link/Elf/file.zig35
-rw-r--r--src/link/Elf/relocatable.zig68
-rw-r--r--src/link/Elf2.zig78
-rw-r--r--src/link/Lld.zig64
-rw-r--r--src/link/MachO.zig235
-rw-r--r--src/link/MachO/Archive.zig7
-rw-r--r--src/link/MachO/CodeSignature.zig41
-rw-r--r--src/link/MachO/DebugSymbols.zig91
-rw-r--r--src/link/MachO/Dylib.zig20
-rw-r--r--src/link/MachO/Object.zig153
-rw-r--r--src/link/MachO/ZigObject.zig21
-rw-r--r--src/link/MachO/fat.zig18
-rw-r--r--src/link/MachO/file.zig3
-rw-r--r--src/link/MachO/hasher.zig48
-rw-r--r--src/link/MachO/relocatable.zig30
-rw-r--r--src/link/MachO/uuid.zig33
-rw-r--r--src/link/MappedFile.zig87
-rw-r--r--src/link/Queue.zig4
-rw-r--r--src/link/SpirV.zig8
-rw-r--r--src/link/Wasm.zig58
-rw-r--r--src/link/Wasm/Flush.zig14
-rw-r--r--src/link/tapi.zig14
-rw-r--r--src/main.zig499
-rw-r--r--src/print_env.zig17
-rw-r--r--src/print_targets.zig19
54 files changed, 2135 insertions, 1732 deletions
diff --git a/src/Air/print.zig b/src/Air/print.zig
index 95c8a1fcda..98b0a0b242 100644
--- a/src/Air/print.zig
+++ b/src/Air/print.zig
@@ -9,7 +9,7 @@ const Type = @import("../Type.zig");
const Air = @import("../Air.zig");
const InternPool = @import("../InternPool.zig");
-pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
+pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air.Liveness) !void {
comptime assert(build_options.enable_debug_extensions);
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
@@ -24,7 +24,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air
liveness_special_bytes + tomb_bytes;
// zig fmt: off
- stream.print(
+ try stream.print(
\\# Total AIR+Liveness bytes: {Bi}
\\# AIR Instructions: {d} ({Bi})
\\# AIR Extra Data: {d} ({Bi})
@@ -39,7 +39,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air
tomb_bytes,
if (liveness) |l| l.extra.len else 0, liveness_extra_bytes,
if (liveness) |l| l.special.count() else 0, liveness_special_bytes,
- }) catch return;
+ });
// zig fmt: on
var writer: Writer = .{
@@ -50,7 +50,7 @@ pub fn write(air: Air, stream: *std.Io.Writer, pt: Zcu.PerThread, liveness: ?Air
.indent = 2,
.skip_body = false,
};
- writer.writeBody(stream, air.getMainBody()) catch return;
+ try writer.writeBody(stream, air.getMainBody());
}
pub fn writeInst(
@@ -73,15 +73,23 @@ pub fn writeInst(
}
pub fn dump(air: Air, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
- const stderr_bw, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- air.write(stderr_bw, pt, liveness);
+ const comp = pt.zcu.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ air.write(w, pt, liveness);
}
pub fn dumpInst(air: Air, inst: Air.Inst.Index, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
- const stderr_bw, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- air.writeInst(stderr_bw, inst, pt, liveness);
+ const comp = pt.zcu.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ air.writeInst(w, inst, pt, liveness);
}
const Writer = struct {
diff --git a/src/Builtin.zig b/src/Builtin.zig
index b0077f2276..a097e88734 100644
--- a/src/Builtin.zig
+++ b/src/Builtin.zig
@@ -313,8 +313,9 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
assert(file.source != null);
const root_dir, const sub_path = file.path.openInfo(comp.dirs);
+ const io = comp.io;
- if (root_dir.statFile(sub_path)) |stat| {
+ if (root_dir.statFile(io, sub_path, .{})) |stat| {
if (stat.size != file.source.?.len) {
std.log.warn(
"the cached file '{f}' had the wrong size. Expected {d}, found {d}. " ++
@@ -342,7 +343,7 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
}
// `make_path` matters because the dir hasn't actually been created yet.
- var af = try root_dir.atomicFile(sub_path, .{ .make_path = true, .write_buffer = &.{} });
+ var af = try root_dir.atomicFile(io, sub_path, .{ .make_path = true, .write_buffer = &.{} });
defer af.deinit();
try af.file_writer.interface.writeAll(file.source.?);
af.finish() catch |err| switch (err) {
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 931a0b2d14..8e005992ec 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -446,11 +446,11 @@ pub const Path = struct {
}
/// Given a `Path`, returns the directory handle and sub path to be used to open the path.
- pub fn openInfo(p: Path, dirs: Directories) struct { fs.Dir, []const u8 } {
+ pub fn openInfo(p: Path, dirs: Directories) struct { Io.Dir, []const u8 } {
const dir = switch (p.root) {
.none => {
const cwd_sub_path = absToCwdRelative(p.sub_path, dirs.cwd);
- return .{ fs.cwd(), cwd_sub_path };
+ return .{ Io.Dir.cwd(), cwd_sub_path };
},
.zig_lib => dirs.zig_lib.handle,
.global_cache => dirs.global_cache.handle,
@@ -721,13 +721,13 @@ pub const Directories = struct {
/// This may be the same as `global_cache`.
local_cache: Cache.Directory,
- pub fn deinit(dirs: *Directories) void {
+ pub fn deinit(dirs: *Directories, io: Io) void {
// The local and global caches could be the same.
- const close_local = dirs.local_cache.handle.fd != dirs.global_cache.handle.fd;
+ const close_local = dirs.local_cache.handle.handle != dirs.global_cache.handle.handle;
- dirs.global_cache.handle.close();
- if (close_local) dirs.local_cache.handle.close();
- dirs.zig_lib.handle.close();
+ dirs.global_cache.handle.close(io);
+ if (close_local) dirs.local_cache.handle.close(io);
+ dirs.zig_lib.handle.close(io);
}
/// Returns a `Directories` where `local_cache` is replaced with `global_cache`, intended for
@@ -745,6 +745,7 @@ pub const Directories = struct {
/// Uses `std.process.fatal` on error conditions.
pub fn init(
arena: Allocator,
+ io: Io,
override_zig_lib: ?[]const u8,
override_global_cache: ?[]const u8,
local_cache_strat: union(enum) {
@@ -768,30 +769,30 @@ pub const Directories = struct {
};
const zig_lib: Cache.Directory = d: {
- if (override_zig_lib) |path| break :d openUnresolved(arena, cwd, path, .@"zig lib");
+ if (override_zig_lib) |path| break :d openUnresolved(arena, io, cwd, path, .@"zig lib");
if (wasi) break :d openWasiPreopen(wasi_preopens, "/lib");
- break :d introspect.findZigLibDirFromSelfExe(arena, cwd, self_exe_path) catch |err| {
- fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
+ break :d introspect.findZigLibDirFromSelfExe(arena, io, cwd, self_exe_path) catch |err| {
+ fatal("unable to find zig installation directory '{s}': {t}", .{ self_exe_path, err });
};
};
const global_cache: Cache.Directory = d: {
- if (override_global_cache) |path| break :d openUnresolved(arena, cwd, path, .@"global cache");
+ if (override_global_cache) |path| break :d openUnresolved(arena, io, cwd, path, .@"global cache");
if (wasi) break :d openWasiPreopen(wasi_preopens, "/cache");
const path = introspect.resolveGlobalCacheDir(arena) catch |err| {
- fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
+ fatal("unable to resolve zig cache directory: {t}", .{err});
};
- break :d openUnresolved(arena, cwd, path, .@"global cache");
+ break :d openUnresolved(arena, io, cwd, path, .@"global cache");
};
const local_cache: Cache.Directory = switch (local_cache_strat) {
- .override => |path| openUnresolved(arena, cwd, path, .@"local cache"),
+ .override => |path| openUnresolved(arena, io, cwd, path, .@"local cache"),
.search => d: {
- const maybe_path = introspect.resolveSuitableLocalCacheDir(arena, cwd) catch |err| {
- fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
+ const maybe_path = introspect.resolveSuitableLocalCacheDir(arena, io, cwd) catch |err| {
+ fatal("unable to resolve zig cache directory: {t}", .{err});
};
const path = maybe_path orelse break :d global_cache;
- break :d openUnresolved(arena, cwd, path, .@"local cache");
+ break :d openUnresolved(arena, io, cwd, path, .@"local cache");
},
.global => global_cache,
};
@@ -814,18 +815,24 @@ pub const Directories = struct {
return .{
.path = if (std.mem.eql(u8, name, ".")) null else name,
.handle = .{
- .fd = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}),
+ .handle = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}),
},
};
}
- fn openUnresolved(arena: Allocator, cwd: []const u8, unresolved_path: []const u8, thing: enum { @"zig lib", @"global cache", @"local cache" }) Cache.Directory {
+ fn openUnresolved(
+ arena: Allocator,
+ io: Io,
+ cwd: []const u8,
+ unresolved_path: []const u8,
+ thing: enum { @"zig lib", @"global cache", @"local cache" },
+ ) Cache.Directory {
const path = introspect.resolvePath(arena, cwd, &.{unresolved_path}) catch |err| {
fatal("unable to resolve {s} directory: {s}", .{ @tagName(thing), @errorName(err) });
};
const nonempty_path = if (path.len == 0) "." else path;
const handle_or_err = switch (thing) {
- .@"zig lib" => fs.cwd().openDir(nonempty_path, .{}),
- .@"global cache", .@"local cache" => fs.cwd().makeOpenPath(nonempty_path, .{}),
+ .@"zig lib" => Io.Dir.cwd().openDir(io, nonempty_path, .{}),
+ .@"global cache", .@"local cache" => Io.Dir.cwd().createDirPathOpen(io, nonempty_path, .{}),
};
return .{
.path = if (path.len == 0) null else path,
@@ -912,8 +919,8 @@ pub const CrtFile = struct {
lock: Cache.Lock,
full_object_path: Cache.Path,
- pub fn deinit(self: *CrtFile, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *CrtFile, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.full_object_path.sub_path);
self.* = undefined;
}
@@ -1104,8 +1111,8 @@ pub const CObject = struct {
const source_line = source_line: {
if (diag.src_loc.offset == 0 or diag.src_loc.column == 0) break :source_line 0;
- const file = fs.cwd().openFile(file_name, .{}) catch break :source_line 0;
- defer file.close();
+ const file = Io.Dir.cwd().openFile(io, file_name, .{}) catch break :source_line 0;
+ defer file.close(io);
var buffer: [1024]u8 = undefined;
var file_reader = file.reader(io, &buffer);
file_reader.seekTo(diag.src_loc.offset + 1 - diag.src_loc.column) catch break :source_line 0;
@@ -1179,8 +1186,8 @@ pub const CObject = struct {
};
var buffer: [1024]u8 = undefined;
- const file = try fs.cwd().openFile(path, .{});
- defer file.close();
+ const file = try Io.Dir.cwd().openFile(io, path, .{});
+ defer file.close(io);
var file_reader = file.reader(io, &buffer);
var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface });
defer bc.deinit();
@@ -1310,7 +1317,7 @@ pub const CObject = struct {
};
/// Returns if there was failure.
- pub fn clearStatus(self: *CObject, gpa: Allocator) bool {
+ pub fn clearStatus(self: *CObject, gpa: Allocator, io: Io) bool {
switch (self.status) {
.new => return false,
.failure, .failure_retryable => {
@@ -1319,15 +1326,15 @@ pub const CObject = struct {
},
.success => |*success| {
gpa.free(success.object_path.sub_path);
- success.lock.release();
+ success.lock.release(io);
self.status = .new;
return false;
},
}
}
- pub fn destroy(self: *CObject, gpa: Allocator) void {
- _ = self.clearStatus(gpa);
+ pub fn destroy(self: *CObject, gpa: Allocator, io: Io) void {
+ _ = self.clearStatus(gpa, io);
gpa.destroy(self);
}
};
@@ -1357,7 +1364,7 @@ pub const Win32Resource = struct {
},
/// Returns true if there was failure.
- pub fn clearStatus(self: *Win32Resource, gpa: Allocator) bool {
+ pub fn clearStatus(self: *Win32Resource, gpa: Allocator, io: Io) bool {
switch (self.status) {
.new => return false,
.failure, .failure_retryable => {
@@ -1366,15 +1373,15 @@ pub const Win32Resource = struct {
},
.success => |*success| {
gpa.free(success.res_path);
- success.lock.release();
+ success.lock.release(io);
self.status = .new;
return false;
},
}
}
- pub fn destroy(self: *Win32Resource, gpa: Allocator) void {
- _ = self.clearStatus(gpa);
+ pub fn destroy(self: *Win32Resource, gpa: Allocator, io: Io) void {
+ _ = self.clearStatus(gpa, io);
gpa.destroy(self);
}
};
@@ -1603,9 +1610,9 @@ const CacheUse = union(CacheMode) {
/// Prevents other processes from clobbering files in the output directory.
lock: ?Cache.Lock,
- fn releaseLock(whole: *Whole) void {
+ fn releaseLock(whole: *Whole, io: Io) void {
if (whole.lock) |*lock| {
- lock.release();
+ lock.release(io);
whole.lock = null;
}
}
@@ -1617,17 +1624,17 @@ const CacheUse = union(CacheMode) {
}
};
- fn deinit(cu: CacheUse) void {
+ fn deinit(cu: CacheUse, io: Io) void {
switch (cu) {
.none => |none| {
assert(none.tmp_artifact_directory == null);
},
.incremental => |incremental| {
- incremental.artifact_directory.handle.close();
+ incremental.artifact_directory.handle.close(io);
},
.whole => |whole| {
assert(whole.tmp_artifact_directory == null);
- whole.releaseLock();
+ whole.releaseLock(io);
},
}
}
@@ -1872,7 +1879,7 @@ pub const CreateDiagnostic = union(enum) {
pub const CreateCachePath = struct {
which: enum { local, global },
sub: []const u8,
- err: (fs.Dir.MakeError || fs.Dir.OpenError || fs.Dir.StatFileError),
+ err: (Io.Dir.CreateDirError || Io.Dir.OpenError || Io.Dir.StatFileError),
};
pub fn format(diag: CreateDiagnostic, w: *Writer) Writer.Error!void {
switch (diag) {
@@ -1896,13 +1903,17 @@ pub const CreateDiagnostic = union(enum) {
return error.CreateFail;
}
};
-pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) error{
+
+pub const CreateError = error{
OutOfMemory,
+ Canceled,
Unexpected,
CurrentWorkingDirectoryUnlinked,
/// An error has been stored to `diag`.
CreateFail,
-}!*Compilation {
+};
+
+pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) CreateError!*Compilation {
const output_mode = options.config.output_mode;
const is_dyn_lib = switch (output_mode) {
.Obj, .Exe => false,
@@ -1950,6 +1961,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
const libc_dirs = std.zig.LibCDirs.detect(
arena,
+ io,
options.dirs.zig_lib.path.?,
target,
options.root_mod.resolved_target.is_native_abi,
@@ -2080,13 +2092,17 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
}
if (options.verbose_llvm_cpu_features) {
- if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: {
- const stderr_w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- stderr_w.print("compilation: {s}\n", .{options.root_name}) catch break :print;
- stderr_w.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print;
- stderr_w.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print;
- stderr_w.print(" features: {s}\n", .{cf}) catch {};
+ if (options.root_mod.resolved_target.llvm_cpu_features) |cf| {
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ printVerboseLlvmCpuFeatures(w, arena, options.root_name, target, cf) catch |err| switch (err) {
+ error.WriteFailed => switch (stderr.file_writer.err.?) {
+ error.Canceled => |e| return e,
+ else => {},
+ },
+ error.OutOfMemory => |e| return e,
+ };
}
}
@@ -2104,16 +2120,16 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
cache.* = .{
.gpa = gpa,
.io = io,
- .manifest_dir = options.dirs.local_cache.handle.makeOpenPath("h", .{}) catch |err| {
+ .manifest_dir = options.dirs.local_cache.handle.createDirPathOpen(io, "h", .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = "h", .err = err } });
},
};
// These correspond to std.zig.Server.Message.PathPrefix.
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(options.dirs.zig_lib);
cache.addPrefix(options.dirs.local_cache);
cache.addPrefix(options.dirs.global_cache);
- errdefer cache.manifest_dir.close();
+ errdefer cache.manifest_dir.close(io);
// This is shared hasher state common to zig source and all C source files.
cache.hash.addBytes(build_options.version);
@@ -2154,18 +2170,18 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
// to redundantly happen for each AstGen operation.
const zir_sub_dir = "z";
- var local_zir_dir = options.dirs.local_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| {
+ var local_zir_dir = options.dirs.local_cache.handle.createDirPathOpen(io, zir_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = zir_sub_dir, .err = err } });
};
- errdefer local_zir_dir.close();
+ errdefer local_zir_dir.close(io);
const local_zir_cache: Cache.Directory = .{
.handle = local_zir_dir,
.path = try options.dirs.local_cache.join(arena, &.{zir_sub_dir}),
};
- var global_zir_dir = options.dirs.global_cache.handle.makeOpenPath(zir_sub_dir, .{}) catch |err| {
+ var global_zir_dir = options.dirs.global_cache.handle.createDirPathOpen(io, zir_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .global, .sub = zir_sub_dir, .err = err } });
};
- errdefer global_zir_dir.close();
+ errdefer global_zir_dir.close(io);
const global_zir_cache: Cache.Directory = .{
.handle = global_zir_dir,
.path = try options.dirs.global_cache.join(arena, &.{zir_sub_dir}),
@@ -2433,10 +2449,10 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
const digest = hash.final();
const artifact_sub_dir = "o" ++ fs.path.sep_str ++ digest;
- var artifact_dir = options.dirs.local_cache.handle.makeOpenPath(artifact_sub_dir, .{}) catch |err| {
+ var artifact_dir = options.dirs.local_cache.handle.createDirPathOpen(io, artifact_sub_dir, .{}) catch |err| {
return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = artifact_sub_dir, .err = err } });
};
- errdefer artifact_dir.close();
+ errdefer artifact_dir.close(io);
const artifact_directory: Cache.Directory = .{
.handle = artifact_dir,
.path = try options.dirs.local_cache.join(arena, &.{artifact_sub_dir}),
@@ -2687,12 +2703,26 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
return comp;
}
+fn printVerboseLlvmCpuFeatures(
+ w: *Writer,
+ arena: Allocator,
+ root_name: []const u8,
+ target: *const std.Target,
+ cf: [*:0]const u8,
+) (Writer.Error || Allocator.Error)!void {
+ try w.print("compilation: {s}\n", .{root_name});
+ try w.print(" target: {s}\n", .{try target.zigTriple(arena)});
+ try w.print(" cpu: {s}\n", .{target.cpu.model.name});
+ try w.print(" features: {s}\n", .{cf});
+}
+
pub fn destroy(comp: *Compilation) void {
const gpa = comp.gpa;
+ const io = comp.io;
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
- comp.cache_use.deinit();
+ comp.cache_use.deinit(io);
for (&comp.work_queues) |*work_queue| work_queue.deinit(gpa);
comp.c_object_work_queue.deinit(gpa);
@@ -2705,36 +2735,36 @@ pub fn destroy(comp: *Compilation) void {
var it = comp.crt_files.iterator();
while (it.next()) |entry| {
gpa.free(entry.key_ptr.*);
- entry.value_ptr.deinit(gpa);
+ entry.value_ptr.deinit(gpa, io);
}
comp.crt_files.deinit(gpa);
}
- if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa);
- if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa);
- if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa);
- if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa);
+ if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa, io);
+ if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.glibc_so_files) |*glibc_file| {
- glibc_file.deinit(gpa);
+ glibc_file.deinit(gpa, io);
}
if (comp.freebsd_so_files) |*freebsd_file| {
- freebsd_file.deinit(gpa);
+ freebsd_file.deinit(gpa, io);
}
if (comp.netbsd_so_files) |*netbsd_file| {
- netbsd_file.deinit(gpa);
+ netbsd_file.deinit(gpa, io);
}
for (comp.c_object_table.keys()) |key| {
- key.destroy(gpa);
+ key.destroy(gpa, io);
}
comp.c_object_table.deinit(gpa);
@@ -2744,7 +2774,7 @@ pub fn destroy(comp: *Compilation) void {
comp.failed_c_objects.deinit(gpa);
for (comp.win32_resource_table.keys()) |key| {
- key.destroy(gpa);
+ key.destroy(gpa, io);
}
comp.win32_resource_table.deinit(gpa);
@@ -2760,7 +2790,7 @@ pub fn destroy(comp: *Compilation) void {
comp.clearMiscFailures();
- comp.cache_parent.manifest_dir.close();
+ comp.cache_parent.manifest_dir.close(io);
}
pub fn clearMiscFailures(comp: *Compilation) void {
@@ -2791,10 +2821,12 @@ pub fn hotCodeSwap(
}
fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
+ const io = comp.io;
+
switch (comp.cache_use) {
.none => |none| {
if (none.tmp_artifact_directory) |*tmp_dir| {
- tmp_dir.handle.close();
+ tmp_dir.handle.close(io);
none.tmp_artifact_directory = null;
if (dev.env == .bootstrap) {
// zig1 uses `CacheMode.none`, but it doesn't need to know how to delete
@@ -2813,12 +2845,9 @@ fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
return;
}
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
- comp.dirs.local_cache.handle.deleteTree(tmp_dir_sub_path) catch |err| {
- log.warn("failed to delete temporary directory '{s}{c}{s}': {s}", .{
- comp.dirs.local_cache.path orelse ".",
- fs.path.sep,
- tmp_dir_sub_path,
- @errorName(err),
+ comp.dirs.local_cache.handle.deleteTree(io, tmp_dir_sub_path) catch |err| {
+ log.warn("failed to delete temporary directory '{s}{c}{s}': {t}", .{
+ comp.dirs.local_cache.path orelse ".", fs.path.sep, tmp_dir_sub_path, err,
});
};
}
@@ -2834,15 +2863,12 @@ fn cleanupAfterUpdate(comp: *Compilation, tmp_dir_rand_int: u64) void {
comp.bin_file = null;
}
if (whole.tmp_artifact_directory) |*tmp_dir| {
- tmp_dir.handle.close();
+ tmp_dir.handle.close(io);
whole.tmp_artifact_directory = null;
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
- comp.dirs.local_cache.handle.deleteTree(tmp_dir_sub_path) catch |err| {
- log.warn("failed to delete temporary directory '{s}{c}{s}': {s}", .{
- comp.dirs.local_cache.path orelse ".",
- fs.path.sep,
- tmp_dir_sub_path,
- @errorName(err),
+ comp.dirs.local_cache.handle.deleteTree(io, tmp_dir_sub_path) catch |err| {
+ log.warn("failed to delete temporary directory '{s}{c}{s}': {t}", .{
+ comp.dirs.local_cache.path orelse ".", fs.path.sep, tmp_dir_sub_path, err,
});
};
}
@@ -2891,7 +2917,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
tmp_dir_rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path});
- const handle = comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}) catch |err| {
+ const handle = comp.dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{}) catch |err| {
return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err });
};
break :d .{ .path = path, .handle = handle };
@@ -2901,7 +2927,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
.whole => |whole| {
assert(comp.bin_file == null);
// We are about to obtain this lock, so here we give other processes a chance first.
- whole.releaseLock();
+ whole.releaseLock(io);
man = comp.cache_parent.obtain();
whole.cache_manifest = &man;
@@ -2972,7 +2998,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
tmp_dir_rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(tmp_dir_rand_int);
const path = try comp.dirs.local_cache.join(arena, &.{tmp_dir_sub_path});
- const handle = comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{}) catch |err| {
+ const handle = comp.dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{}) catch |err| {
return comp.setMiscFailure(.open_output, "failed to create output directory '{s}': {t}", .{ path, err });
};
break :d .{ .path = path, .handle = handle };
@@ -3087,17 +3113,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
}
if (build_options.enable_debug_extensions and comp.verbose_intern_pool) {
- std.debug.print("intern pool stats for '{s}':\n", .{
- comp.root_name,
- });
+ std.debug.print("intern pool stats for '{s}':\n", .{comp.root_name});
zcu.intern_pool.dump();
}
if (build_options.enable_debug_extensions and comp.verbose_generic_instances) {
- std.debug.print("generic instances for '{s}:0x{x}':\n", .{
- comp.root_name,
- @intFromPtr(zcu),
- });
+ std.debug.print("generic instances for '{s}:0x{x}':\n", .{ comp.root_name, @intFromPtr(zcu) });
zcu.intern_pool.dumpGenericInstances(gpa);
}
}
@@ -3152,7 +3173,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// the file handle and re-open it in the follow up call to
// `makeWritable`.
if (lf.file) |f| {
- f.close();
+ f.close(io);
lf.file = null;
if (lf.closeDebugInfo()) break :w .lf_and_debug;
@@ -3165,12 +3186,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
// Rename the temporary directory into place.
// Close tmp dir and link.File to avoid open handle during rename.
- whole.tmp_artifact_directory.?.handle.close();
+ whole.tmp_artifact_directory.?.handle.close(io);
whole.tmp_artifact_directory = null;
const s = fs.path.sep_str;
const tmp_dir_sub_path = "tmp" ++ s ++ std.fmt.hex(tmp_dir_rand_int);
const o_sub_path = "o" ++ s ++ hex_digest;
- renameTmpIntoCache(comp.dirs.local_cache, tmp_dir_sub_path, o_sub_path) catch |err| {
+ renameTmpIntoCache(io, comp.dirs.local_cache, tmp_dir_sub_path, o_sub_path) catch |err| {
return comp.setMiscFailure(
.rename_results,
"failed to rename compilation results ('{f}{s}') into local cache ('{f}{s}'): {t}",
@@ -3300,11 +3321,8 @@ pub fn resolveEmitPathFlush(
},
}
}
-fn flush(
- comp: *Compilation,
- arena: Allocator,
- tid: Zcu.PerThread.Id,
-) Allocator.Error!void {
+
+fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id) (Io.Cancelable || Allocator.Error)!void {
const io = comp.io;
if (comp.zcu) |zcu| {
if (zcu.llvm_object) |llvm_object| {
@@ -3370,7 +3388,7 @@ fn flush(
// This is needed before reading the error flags.
lf.flush(arena, tid, comp.link_prog_node) catch |err| switch (err) {
error.LinkFailure => {}, // Already reported.
- error.OutOfMemory => return error.OutOfMemory,
+ error.OutOfMemory, error.Canceled => |e| return e,
};
}
if (comp.zcu) |zcu| {
@@ -3389,17 +3407,19 @@ fn flush(
/// implementation at the bottom of this function.
/// This function is only called when CacheMode is `whole`.
fn renameTmpIntoCache(
+ io: Io,
cache_directory: Cache.Directory,
tmp_dir_sub_path: []const u8,
o_sub_path: []const u8,
) !void {
var seen_eaccess = false;
while (true) {
- fs.rename(
+ Io.Dir.rename(
cache_directory.handle,
tmp_dir_sub_path,
cache_directory.handle,
o_sub_path,
+ io,
) catch |err| switch (err) {
// On Windows, rename fails with `AccessDenied` rather than `PathAlreadyExists`.
// See https://github.com/ziglang/zig/issues/8362
@@ -3407,17 +3427,17 @@ fn renameTmpIntoCache(
.windows => {
if (seen_eaccess) return error.AccessDenied;
seen_eaccess = true;
- try cache_directory.handle.deleteTree(o_sub_path);
+ try cache_directory.handle.deleteTree(io, o_sub_path);
continue;
},
else => return error.AccessDenied,
},
error.PathAlreadyExists => {
- try cache_directory.handle.deleteTree(o_sub_path);
+ try cache_directory.handle.deleteTree(io, o_sub_path);
continue;
},
error.FileNotFound => {
- try cache_directory.handle.makePath("o");
+ try cache_directory.handle.createDirPath(io, "o");
continue;
},
else => |e| return e,
@@ -3592,6 +3612,7 @@ fn emitFromCObject(
new_ext: []const u8,
unresolved_emit_path: []const u8,
) Allocator.Error!void {
+ const io = comp.io;
// The dirname and stem (i.e. everything but the extension), of the sub path of the C object.
// We'll append `new_ext` to it to get the path to the right thing (asm, LLVM IR, etc).
const c_obj_dir_and_stem: []const u8 = p: {
@@ -3601,23 +3622,18 @@ fn emitFromCObject(
};
const src_path: Cache.Path = .{
.root_dir = c_obj_path.root_dir,
- .sub_path = try std.fmt.allocPrint(arena, "{s}{s}", .{
- c_obj_dir_and_stem,
- new_ext,
- }),
+ .sub_path = try std.fmt.allocPrint(arena, "{s}{s}", .{ c_obj_dir_and_stem, new_ext }),
};
const emit_path = comp.resolveEmitPath(unresolved_emit_path);
- src_path.root_dir.handle.copyFile(
+ Io.Dir.copyFile(
+ src_path.root_dir.handle,
src_path.sub_path,
emit_path.root_dir.handle,
emit_path.sub_path,
+ io,
.{},
- ) catch |err| log.err("unable to copy '{f}' to '{f}': {s}", .{
- src_path,
- emit_path,
- @errorName(err),
- });
+ ) catch |err| log.err("unable to copy '{f}' to '{f}': {t}", .{ src_path, emit_path, err });
}
/// Having the file open for writing is problematic as far as executing the
@@ -3673,6 +3689,7 @@ pub fn saveState(comp: *Compilation) !void {
const lf = comp.bin_file orelse return;
const gpa = comp.gpa;
+ const io = comp.io;
var bufs = std.array_list.Managed([]const u8).init(gpa);
defer bufs.deinit();
@@ -3893,7 +3910,7 @@ pub fn saveState(comp: *Compilation) !void {
// Using an atomic file prevents a crash or power failure from corrupting
// the previous incremental compilation state.
var write_buffer: [1024]u8 = undefined;
- var af = try lf.emit.root_dir.handle.atomicFile(basename, .{ .write_buffer = &write_buffer });
+ var af = try lf.emit.root_dir.handle.atomicFile(io, basename, .{ .write_buffer = &write_buffer });
defer af.deinit();
try af.file_writer.interface.writeVecAll(bufs.items);
try af.finish();
@@ -4251,12 +4268,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
// However, we haven't reported any such error.
// This is a compiler bug.
print_ctx: {
- var stderr_w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- stderr_w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx;
- stderr_w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx;
+ const stderr = std.debug.lockStderr(&.{}).terminal();
+ defer std.debug.unlockStderr();
+ const w = stderr.writer;
+ w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx;
+ w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx;
while (ref) |r| {
- stderr_w.print("referenced by: {f}{s}\n", .{
+ w.print("referenced by: {f}{s}\n", .{
zcu.fmtAnalUnit(r.referencer),
if (zcu.transitive_failed_analysis.contains(r.referencer)) " [transitive failure]" else "",
}) catch break :print_ctx;
@@ -5038,7 +5056,9 @@ fn dispatchPrelinkWork(comp: *Compilation, main_progress_node: std.Progress.Node
}
prelink_group.wait(io);
- comp.link_queue.finishPrelinkQueue(comp);
+ comp.link_queue.finishPrelinkQueue(comp) catch |err| switch (err) {
+ error.Canceled => return,
+ };
}
const JobError = Allocator.Error || Io.Cancelable;
@@ -5211,13 +5231,10 @@ fn processOneJob(
}
}
-fn createDepFile(
- comp: *Compilation,
- depfile: []const u8,
- binfile: Cache.Path,
-) anyerror!void {
+fn createDepFile(comp: *Compilation, depfile: []const u8, binfile: Cache.Path) anyerror!void {
+ const io = comp.io;
var buf: [4096]u8 = undefined;
- var af = try std.fs.cwd().atomicFile(depfile, .{ .write_buffer = &buf });
+ var af = try Io.Dir.cwd().atomicFile(io, depfile, .{ .write_buffer = &buf });
defer af.deinit();
comp.writeDepFile(binfile, &af.file_writer.interface) catch return af.file_writer.err.?;
@@ -5258,39 +5275,35 @@ fn workerDocsCopy(comp: *Compilation) void {
fn docsCopyFallible(comp: *Compilation) anyerror!void {
const zcu = comp.zcu orelse return comp.lockAndSetMiscFailure(.docs_copy, "no Zig code to document", .{});
+ const io = comp.io;
const docs_path = comp.resolveEmitPath(comp.emit_docs.?);
- var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
+ var out_dir = docs_path.root_dir.handle.createDirPathOpen(io, docs_path.sub_path, .{}) catch |err| {
return comp.lockAndSetMiscFailure(
.docs_copy,
"unable to create output directory '{f}': {s}",
.{ docs_path, @errorName(err) },
);
};
- defer out_dir.close();
+ defer out_dir.close(io);
for (&[_][]const u8{ "docs/main.js", "docs/index.html" }) |sub_path| {
const basename = fs.path.basename(sub_path);
- comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, .{}) catch |err| {
- comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {s}", .{
- sub_path,
- @errorName(err),
- });
- return;
- };
+ comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, io, .{}) catch |err|
+ return comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {t}", .{ sub_path, err });
}
- var tar_file = out_dir.createFile("sources.tar", .{}) catch |err| {
+ var tar_file = out_dir.createFile(io, "sources.tar", .{}) catch |err| {
return comp.lockAndSetMiscFailure(
.docs_copy,
"unable to create '{f}/sources.tar': {s}",
.{ docs_path, @errorName(err) },
);
};
- defer tar_file.close();
+ defer tar_file.close(io);
var buffer: [1024]u8 = undefined;
- var tar_file_writer = tar_file.writer(&buffer);
+ var tar_file_writer = tar_file.writer(io, &buffer);
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty;
defer seen_table.deinit(comp.gpa);
@@ -5321,17 +5334,17 @@ fn docsCopyModule(
comp: *Compilation,
module: *Package.Module,
name: []const u8,
- tar_file_writer: *fs.File.Writer,
+ tar_file_writer: *Io.File.Writer,
) !void {
const io = comp.io;
const root = module.root;
var mod_dir = d: {
const root_dir, const sub_path = root.openInfo(comp.dirs);
- break :d root_dir.openDir(sub_path, .{ .iterate = true });
+ break :d root_dir.openDir(io, sub_path, .{ .iterate = true });
} catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{f}': {t}", .{ root.fmt(comp), err });
};
- defer mod_dir.close();
+ defer mod_dir.close(io);
var walker = try mod_dir.walk(comp.gpa);
defer walker.deinit();
@@ -5341,7 +5354,7 @@ fn docsCopyModule(
var buffer: [1024]u8 = undefined;
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
switch (entry.kind) {
.file => {
if (!std.mem.endsWith(u8, entry.basename, ".zig")) continue;
@@ -5350,14 +5363,14 @@ fn docsCopyModule(
},
else => continue,
}
- var file = mod_dir.openFile(entry.path, .{}) catch |err| {
+ var file = mod_dir.openFile(io, entry.path, .{}) catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open {f}{s}: {t}", .{
root.fmt(comp), entry.path, err,
});
};
- defer file.close();
- const stat = try file.stat();
- var file_reader: fs.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size);
+ defer file.close(io);
+ const stat = try file.stat(io);
+ var file_reader: Io.File.Reader = .initSize(file, io, &buffer, stat.size);
archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{
@@ -5496,13 +5509,13 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
try comp.updateSubCompilation(sub_compilation, .docs_wasm, prog_node);
var crt_file = try sub_compilation.toCrtFile();
- defer crt_file.deinit(gpa);
+ defer crt_file.deinit(gpa, io);
const docs_bin_file = crt_file.full_object_path;
assert(docs_bin_file.sub_path.len > 0); // emitted binary is not a directory
const docs_path = comp.resolveEmitPath(comp.emit_docs.?);
- var out_dir = docs_path.root_dir.handle.makeOpenPath(docs_path.sub_path, .{}) catch |err| {
+ var out_dir = docs_path.root_dir.handle.createDirPathOpen(io, docs_path.sub_path, .{}) catch |err| {
comp.lockAndSetMiscFailure(
.docs_copy,
"unable to create output directory '{f}': {t}",
@@ -5510,12 +5523,14 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
);
return error.AlreadyReported;
};
- defer out_dir.close();
+ defer out_dir.close(io);
- crt_file.full_object_path.root_dir.handle.copyFile(
+ Io.Dir.copyFile(
+ crt_file.full_object_path.root_dir.handle,
crt_file.full_object_path.sub_path,
out_dir,
"main.wasm",
+ io,
.{},
) catch |err| {
comp.lockAndSetMiscFailure(.docs_copy, "unable to copy '{f}' to '{f}': {t}", .{
@@ -5692,8 +5707,8 @@ pub fn translateC(
const tmp_basename = std.fmt.hex(std.crypto.random.int(u64));
const tmp_sub_path = "tmp" ++ fs.path.sep_str ++ tmp_basename;
const cache_dir = comp.dirs.local_cache.handle;
- var cache_tmp_dir = try cache_dir.makeOpenPath(tmp_sub_path, .{});
- defer cache_tmp_dir.close();
+ var cache_tmp_dir = try cache_dir.createDirPathOpen(io, tmp_sub_path, .{});
+ defer cache_tmp_dir.close(io);
const translated_path = try comp.dirs.local_cache.join(arena, &.{ tmp_sub_path, translated_basename });
const source_path = switch (source) {
@@ -5702,7 +5717,7 @@ pub fn translateC(
const out_h_sub_path = tmp_sub_path ++ fs.path.sep_str ++ cimport_basename;
const out_h_path = try comp.dirs.local_cache.join(arena, &.{out_h_sub_path});
if (comp.verbose_cimport) log.info("writing C import source to {s}", .{out_h_path});
- try cache_dir.writeFile(.{ .sub_path = out_h_sub_path, .data = c_src });
+ try cache_dir.writeFile(io, .{ .sub_path = out_h_sub_path, .data = c_src });
break :path out_h_path;
},
.path => |p| p,
@@ -5749,7 +5764,7 @@ pub fn translateC(
try argv.appendSlice(comp.global_cc_argv);
try argv.appendSlice(owner_mod.cc_argv);
try argv.appendSlice(&.{ source_path, "-o", translated_path });
- if (comp.verbose_cimport) dump_argv(argv.items);
+ if (comp.verbose_cimport) try dumpArgv(io, argv.items);
}
var stdout: []u8 = undefined;
@@ -5775,7 +5790,7 @@ pub fn translateC(
}
// Just to save disk space, we delete the file because it is never needed again.
- cache_tmp_dir.deleteFile(dep_basename) catch |err| {
+ cache_tmp_dir.deleteFile(io, dep_basename) catch |err| {
log.warn("failed to delete '{s}': {t}", .{ dep_file_path, err });
};
}
@@ -5805,7 +5820,7 @@ pub fn translateC(
const o_sub_path = "o" ++ fs.path.sep_str ++ hex_digest;
if (comp.verbose_cimport) log.info("renaming {s} to {s}", .{ tmp_sub_path, o_sub_path });
- try renameTmpIntoCache(comp.dirs.local_cache, tmp_sub_path, o_sub_path);
+ try renameTmpIntoCache(io, comp.dirs.local_cache, tmp_sub_path, o_sub_path);
return .{
.digest = bin_digest,
@@ -6144,7 +6159,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
const gpa = comp.gpa;
const io = comp.io;
- if (c_object.clearStatus(gpa)) {
+ if (c_object.clearStatus(gpa, io)) {
// There was previous failure.
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -6257,7 +6272,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
}
if (comp.verbose_cc) {
- dump_argv(argv.items);
+ try dumpArgv(io, argv.items);
}
const err = std.process.execv(arena, argv.items);
@@ -6267,8 +6282,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// We can't know the digest until we do the C compiler invocation,
// so we need a temporary filename.
const out_obj_path = try comp.tmpFilePath(arena, o_basename);
- var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
- defer zig_cache_tmp_dir.close();
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, "tmp", .{});
+ defer zig_cache_tmp_dir.close(io);
const out_diag_path = if (comp.clang_passthrough_mode or !ext.clangSupportsDiagnostics())
null
@@ -6303,15 +6318,15 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
}
if (comp.verbose_cc) {
- dump_argv(argv.items);
+ try dumpArgv(io, argv.items);
}
// Just to save disk space, we delete the files that are never needed again.
- defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(diag_file_path)) catch |err| switch (err) {
+ defer if (out_diag_path) |diag_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(diag_file_path)) catch |err| switch (err) {
error.FileNotFound => {}, // the file wasn't created due to an error we reported
else => log.warn("failed to delete '{s}': {s}", .{ diag_file_path, @errorName(err) }),
};
- defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(fs.path.basename(dep_file_path)) catch |err| switch (err) {
+ defer if (out_dep_path) |dep_file_path| zig_cache_tmp_dir.deleteFile(io, fs.path.basename(dep_file_path)) catch |err| switch (err) {
error.FileNotFound => {}, // the file wasn't created due to an error we reported
else => log.warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) }),
};
@@ -6322,7 +6337,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
- const term = child.spawnAndWait() catch |err| {
+ const term = child.spawnAndWait(io) catch |err| {
return comp.failCObj(c_object, "failed to spawn zig clang (passthrough mode) {s}: {s}", .{ argv.items[0], @errorName(err) });
};
switch (term) {
@@ -6340,12 +6355,12 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
- try child.spawn();
+ try child.spawn(io);
var stderr_reader = child.stderr.?.readerStreaming(io, &.{});
const stderr = try stderr_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32)));
- const term = child.wait() catch |err| {
+ const term = child.wait(io) catch |err| {
return comp.failCObj(c_object, "failed to spawn zig clang {s}: {s}", .{ argv.items[0], @errorName(err) });
};
@@ -6387,7 +6402,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
if (comp.file_system_inputs != null) {
// Use the same file size limit as the cache code does for dependency files.
- const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, gpa, .limited(Cache.manifest_file_size_max));
+ const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, gpa, .limited(Cache.manifest_file_size_max));
defer gpa.free(dep_file_contents);
var str_buf: std.ArrayList(u8) = .empty;
@@ -6432,10 +6447,10 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// Rename into place.
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const tmp_basename = fs.path.basename(out_obj_path);
- try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename);
+ try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename, io);
break :blk digest;
};
@@ -6477,8 +6492,6 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const tracy_trace = trace(@src());
defer tracy_trace.end();
- const io = comp.io;
-
const src_path = switch (win32_resource.src) {
.rc => |rc_src| rc_src.src_path,
.manifest => |src_path| src_path,
@@ -6487,11 +6500,13 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
log.debug("updating win32 resource: {s}", .{src_path});
+ const io = comp.io;
+
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- if (win32_resource.clearStatus(comp.gpa)) {
+ if (win32_resource.clearStatus(comp.gpa, io)) {
// There was previous failure.
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -6521,8 +6536,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &.{ "o", &digest });
- var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const in_rc_path = try comp.dirs.local_cache.join(comp.gpa, &.{
o_sub_path, rc_basename,
@@ -6559,7 +6574,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
resource_id, resource_type, fmtRcEscape(src_path),
});
- try o_dir.writeFile(.{ .sub_path = rc_basename, .data = input });
+ try o_dir.writeFile(io, .{ .sub_path = rc_basename, .data = input });
var argv = std.array_list.Managed([]const u8).init(comp.gpa);
defer argv.deinit();
@@ -6609,8 +6624,8 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const rc_basename_noext = src_basename[0 .. src_basename.len - fs.path.extension(src_basename).len];
const digest = if (try man.hit()) man.final() else blk: {
- var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
- defer zig_cache_tmp_dir.close();
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, "tmp", .{});
+ defer zig_cache_tmp_dir.close(io);
const res_filename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext});
@@ -6652,7 +6667,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// Read depfile and update cache manifest
{
const dep_basename = fs.path.basename(out_dep_path);
- const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, arena, .limited(50 * 1024 * 1024));
+ const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(io, dep_basename, arena, .limited(50 * 1024 * 1024));
defer arena.free(dep_file_contents);
const value = try std.json.parseFromSliceLeaky(std.json.Value, arena, dep_file_contents, .{});
@@ -6680,10 +6695,10 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// Rename into place.
const digest = man.final();
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.local_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const tmp_basename = fs.path.basename(out_res_path);
- try fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename);
+ try Io.Dir.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename, io);
break :blk digest;
};
@@ -6716,6 +6731,7 @@ fn spawnZigRc(
argv: []const []const u8,
child_progress_node: std.Progress.Node,
) !void {
+ const io = comp.io;
var node_name: std.ArrayList(u8) = .empty;
defer node_name.deinit(arena);
@@ -6725,8 +6741,8 @@ fn spawnZigRc(
child.stderr_behavior = .Pipe;
child.progress_node = child_progress_node;
- child.spawn() catch |err| {
- return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) });
+ child.spawn(io) catch |err| {
+ return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {t}", .{ argv[0], err });
};
var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{
@@ -6758,7 +6774,7 @@ fn spawnZigRc(
// Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace)
const stderr = poller.reader(.stderr);
- const term = child.wait() catch |err| {
+ const term = child.wait(io) catch |err| {
return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) });
};
@@ -7765,17 +7781,25 @@ pub fn lockAndSetMiscFailure(
return setMiscFailure(comp, tag, format, args);
}
-pub fn dump_argv(argv: []const []const u8) void {
+pub fn dumpArgv(io: Io, argv: []const []const u8) Io.Cancelable!void {
var buffer: [64]u8 = undefined;
- const stderr, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
- nosuspend {
- for (argv, 0..) |arg, i| {
- if (i != 0) stderr.writeByte(' ') catch return;
- stderr.writeAll(arg) catch return;
- }
- stderr.writeByte('\n') catch return;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ return dumpArgvWriter(w, argv) catch |err| switch (err) {
+ error.WriteFailed => switch (stderr.file_writer.err.?) {
+ error.Canceled => return error.Canceled,
+ else => return,
+ },
+ };
+}
+
+fn dumpArgvWriter(w: *Io.Writer, argv: []const []const u8) Io.Writer.Error!void {
+ for (argv, 0..) |arg, i| {
+ if (i != 0) try w.writeByte(' ');
+ try w.writeAll(arg);
}
+ try w.writeByte('\n');
}
pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend {
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 5568c493d9..69a64cbc45 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -1,20 +1,21 @@
//! All interned objects have both a value and a type.
//! This data structure is self-contained.
+const InternPool = @This();
const builtin = @import("builtin");
+
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Cache = std.Build.Cache;
-const Io = std.Io;
const Limb = std.math.big.Limb;
const Hash = std.hash.Wyhash;
+const Zir = std.zig.Zir;
-const InternPool = @This();
const Zcu = @import("Zcu.zig");
-const Zir = std.zig.Zir;
/// One item per thread, indexed by `tid`, which is dense and unique per thread.
locals: []Local,
@@ -11166,11 +11167,15 @@ pub fn mutateVarInit(ip: *InternPool, io: Io, index: Index, init_index: Index) v
}
pub fn dump(ip: *const InternPool) void {
- dumpStatsFallible(ip, std.heap.page_allocator) catch return;
- dumpAllFallible(ip) catch return;
+ var buffer: [4096]u8 = undefined;
+ const stderr = std.debug.lockStderr(&buffer);
+ defer std.debug.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ dumpStatsFallible(ip, w, std.heap.page_allocator) catch return;
+ dumpAllFallible(ip, w) catch return;
}
-fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
+fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !void {
var items_len: usize = 0;
var extra_len: usize = 0;
var limbs_len: usize = 0;
@@ -11423,18 +11428,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
};
counts.sort(SortContext{ .map = &counts });
const len = @min(50, counts.count());
- std.debug.print(" top 50 tags:\n", .{});
+ try w.print(" top 50 tags:\n", .{});
for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| {
- std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{
- @tagName(tag), stats.count, stats.bytes,
- });
+ try w.print(" {t}: {d} occurrences, {d} total bytes\n", .{ tag, stats.count, stats.bytes });
}
}
-fn dumpAllFallible(ip: *const InternPool) anyerror!void {
- var buffer: [4096]u8 = undefined;
- const stderr_bw, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
+fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void {
for (ip.locals, 0..) |*local, tid| {
const items = local.shared.items.view();
for (
@@ -11443,12 +11443,12 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
0..,
) |tag, data, index| {
const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip);
- try stderr_bw.print("${d} = {s}(", .{ i, @tagName(tag) });
+ try w.print("${d} = {s}(", .{ i, @tagName(tag) });
switch (tag) {
.removed => {},
- .simple_type => try stderr_bw.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
- .simple_value => try stderr_bw.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
+ .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
+ .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
.type_int_signed,
.type_int_unsigned,
@@ -11521,23 +11521,27 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.func_coerced,
.union_value,
.memoized_call,
- => try stderr_bw.print("{d}", .{data}),
+ => try w.print("{d}", .{data}),
.opt_null,
.type_slice,
.only_possible_value,
- => try stderr_bw.print("${d}", .{data}),
+ => try w.print("${d}", .{data}),
}
- try stderr_bw.writeAll(")\n");
+ try w.writeAll(")\n");
}
}
}
pub fn dumpGenericInstances(ip: *const InternPool, allocator: Allocator) void {
- ip.dumpGenericInstancesFallible(allocator) catch return;
+ var buffer: [4096]u8 = undefined;
+ const stderr = std.debug.lockStderr(&buffer);
+ defer std.debug.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ ip.dumpGenericInstancesFallible(allocator, w) catch return;
}
-pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) anyerror!void {
+pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator, w: *Io.Writer) !void {
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@@ -11564,10 +11568,6 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
}
}
- var buffer: [4096]u8 = undefined;
- const stderr_bw, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
-
const SortContext = struct {
values: []std.ArrayList(Index),
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
@@ -11579,19 +11579,19 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
var it = instances.iterator();
while (it.next()) |entry| {
const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav);
- try stderr_bw.print("{f} ({d}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
+ try w.print("{f} ({d}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
for (entry.value_ptr.items) |index| {
const unwrapped_index = index.unwrap(ip);
const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip));
const owner_nav = ip.getNav(func.owner_nav);
- try stderr_bw.print(" {f}: (", .{owner_nav.name.fmt(ip)});
+ try w.print(" {f}: (", .{owner_nav.name.fmt(ip)});
for (func.comptime_args.get(ip)) |arg| {
if (arg != .none) {
const key = ip.indexToKey(arg);
- try stderr_bw.print(" {} ", .{key});
+ try w.print(" {} ", .{key});
}
}
- try stderr_bw.writeAll(")\n");
+ try w.writeAll(")\n");
}
}
}
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
index 9b7c83cc39..f8e4b83293 100644
--- a/src/Package/Fetch.zig
+++ b/src/Package/Fetch.zig
@@ -383,14 +383,14 @@ pub fn run(f: *Fetch) RunError!void {
},
.remote => |remote| remote,
.path_or_url => |path_or_url| {
- if (fs.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| {
+ if (Io.Dir.cwd().openDir(io, path_or_url, .{ .iterate = true })) |dir| {
var resource: Resource = .{ .dir = dir };
return f.runResource(path_or_url, &resource, null);
} else |dir_err| {
var server_header_buffer: [init_resource_buffer_size]u8 = undefined;
const file_err = if (dir_err == error.NotDir) e: {
- if (fs.cwd().openFile(path_or_url, .{})) |file| {
+ if (Io.Dir.cwd().openFile(io, path_or_url, .{})) |file| {
var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) };
return f.runResource(path_or_url, &resource, null);
} else |err| break :e err;
@@ -418,7 +418,7 @@ pub fn run(f: *Fetch) RunError!void {
const prefixed_pkg_sub_path = prefixed_pkg_sub_path_buffer[0 .. 2 + hash_slice.len];
const prefix_len: usize = if (f.job_queue.read_only) "p/".len else 0;
const pkg_sub_path = prefixed_pkg_sub_path[prefix_len..];
- if (cache_root.handle.access(pkg_sub_path, .{})) |_| {
+ if (cache_root.handle.access(io, pkg_sub_path, .{})) |_| {
assert(f.lazy_status != .unavailable);
f.package_root = .{
.root_dir = cache_root,
@@ -500,12 +500,12 @@ fn runResource(
var tmp_directory: Cache.Directory = .{
.path = tmp_directory_path,
.handle = handle: {
- const dir = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
- .iterate = true,
+ const dir = cache_root.handle.createDirPathOpen(io, tmp_dir_sub_path, .{
+ .open_options = .{ .iterate = true },
}) catch |err| {
try eb.addRootErrorMessage(.{
- .msg = try eb.printString("unable to create temporary directory '{s}': {s}", .{
- tmp_directory_path, @errorName(err),
+ .msg = try eb.printString("unable to create temporary directory '{s}': {t}", .{
+ tmp_directory_path, err,
}),
});
return error.FetchFailed;
@@ -513,7 +513,7 @@ fn runResource(
break :handle dir;
},
};
- defer tmp_directory.handle.close();
+ defer tmp_directory.handle.close(io);
// Fetch and unpack a resource into a temporary directory.
var unpack_result = try unpackResource(f, resource, uri_path, tmp_directory);
@@ -523,9 +523,9 @@ fn runResource(
// Apply btrfs workaround if needed. Reopen tmp_directory.
if (native_os == .linux and f.job_queue.work_around_btrfs_bug) {
// https://github.com/ziglang/zig/issues/17095
- pkg_path.root_dir.handle.close();
- pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
- .iterate = true,
+ pkg_path.root_dir.handle.close(io);
+ pkg_path.root_dir.handle = cache_root.handle.createDirPathOpen(io, tmp_dir_sub_path, .{
+ .open_options = .{ .iterate = true },
}) catch @panic("btrfs workaround failed");
}
@@ -567,7 +567,7 @@ fn runResource(
.root_dir = cache_root,
.sub_path = try std.fmt.allocPrint(arena, "p" ++ s ++ "{s}", .{computed_package_hash.toSlice()}),
};
- renameTmpIntoCache(cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| {
+ renameTmpIntoCache(io, cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| {
const src = try cache_root.join(arena, &.{tmp_dir_sub_path});
const dest = try cache_root.join(arena, &.{f.package_root.sub_path});
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
@@ -578,7 +578,7 @@ fn runResource(
};
// Remove temporary directory root if not already renamed to global cache.
if (!std.mem.eql(u8, package_sub_path, tmp_dir_sub_path)) {
- cache_root.handle.deleteDir(tmp_dir_sub_path) catch {};
+ cache_root.handle.deleteDir(io, tmp_dir_sub_path) catch {};
}
// Validate the computed hash against the expected hash. If invalid, this
@@ -637,8 +637,9 @@ pub fn computedPackageHash(f: *const Fetch) Package.Hash {
/// `computeHash` gets a free check for the existence of `build.zig`, but when
/// not computing a hash, we need to do a syscall to check for it.
fn checkBuildFileExistence(f: *Fetch) RunError!void {
+ const io = f.job_queue.io;
const eb = &f.error_bundle;
- if (f.package_root.access(Package.build_zig_basename, .{})) |_| {
+ if (f.package_root.access(io, Package.build_zig_basename, .{})) |_| {
f.has_build_zig = true;
} else |err| switch (err) {
error.FileNotFound => {},
@@ -655,9 +656,11 @@ fn checkBuildFileExistence(f: *Fetch) RunError!void {
/// This function populates `f.manifest` or leaves it `null`.
fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
+ const io = f.job_queue.io;
const eb = &f.error_bundle;
const arena = f.arena.allocator();
const manifest_bytes = pkg_root.root_dir.handle.readFileAllocOptions(
+ io,
try fs.path.join(arena, &.{ pkg_root.sub_path, Manifest.basename }),
arena,
.limited(Manifest.max_bytes),
@@ -882,10 +885,10 @@ fn fail(f: *Fetch, msg_tok: std.zig.Ast.TokenIndex, msg_str: u32) RunError {
}
const Resource = union(enum) {
- file: fs.File.Reader,
+ file: Io.File.Reader,
http_request: HttpRequest,
git: Git,
- dir: fs.Dir,
+ dir: Io.Dir,
const Git = struct {
session: git.Session,
@@ -908,7 +911,7 @@ const Resource = union(enum) {
.git => |*git_resource| {
git_resource.fetch_stream.deinit();
},
- .dir => |*dir| dir.close(),
+ .dir => |*dir| dir.close(io),
}
resource.* = undefined;
}
@@ -995,7 +998,7 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u
if (ascii.eqlIgnoreCase(uri.scheme, "file")) {
const path = try uri.path.toRawMaybeAlloc(arena);
- const file = f.parent_package_root.openFile(path, .{}) catch |err| {
+ const file = f.parent_package_root.openFile(io, path, .{}) catch |err| {
return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{
f.parent_package_root, path, err,
}));
@@ -1247,13 +1250,14 @@ fn unpackResource(
}
}
-fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!UnpackResult {
+fn unpackTarball(f: *Fetch, out_dir: Io.Dir, reader: *Io.Reader) RunError!UnpackResult {
const eb = &f.error_bundle;
const arena = f.arena.allocator();
+ const io = f.job_queue.io;
var diagnostics: std.tar.Diagnostics = .{ .allocator = arena };
- std.tar.pipeToFileSystem(out_dir, reader, .{
+ std.tar.pipeToFileSystem(io, out_dir, reader, .{
.diagnostics = &diagnostics,
.strip_components = 0,
.mode_mode = .ignore,
@@ -1280,7 +1284,7 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!Unpack
fn unzip(
f: *Fetch,
- out_dir: fs.Dir,
+ out_dir: Io.Dir,
reader: *Io.Reader,
) error{ ReadFailed, OutOfMemory, Canceled, FetchFailed }!UnpackResult {
// We write the entire contents to a file first because zip files
@@ -1302,7 +1306,7 @@ fn unzip(
const random_integer = std.crypto.random.int(u64);
zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer);
- break cache_root.handle.createFile(&zip_path, .{
+ break cache_root.handle.createFile(io, &zip_path, .{
.exclusive = true,
.read = true,
}) catch |err| switch (err) {
@@ -1314,10 +1318,10 @@ fn unzip(
),
};
};
- defer zip_file.close();
+ defer zip_file.close(io);
var zip_file_buffer: [4096]u8 = undefined;
var zip_file_reader = b: {
- var zip_file_writer = zip_file.writer(&zip_file_buffer);
+ var zip_file_writer = zip_file.writer(io, &zip_file_buffer);
_ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
@@ -1330,7 +1334,7 @@ fn unzip(
f.location_tok,
try eb.printString("failed writing temporary zip file: {t}", .{err}),
);
- break :b zip_file_writer.moveToReader(io);
+ break :b zip_file_writer.moveToReader();
};
var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() };
@@ -1343,13 +1347,13 @@ fn unzip(
.diagnostics = &diagnostics,
}) catch |err| return f.fail(f.location_tok, try eb.printString("zip extract failed: {t}", .{err}));
- cache_root.handle.deleteFile(&zip_path) catch |err|
+ cache_root.handle.deleteFile(io, &zip_path) catch |err|
return f.fail(f.location_tok, try eb.printString("delete temporary zip failed: {t}", .{err}));
return .{ .root_dir = diagnostics.root_dir };
}
-fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!UnpackResult {
+fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!UnpackResult {
const io = f.job_queue.io;
const arena = f.arena.allocator();
// TODO don't try to get a gpa from an arena. expose this dependency higher up
@@ -1362,23 +1366,23 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
// we do not attempt to replicate the exact structure of a real .git
// directory, since that isn't relevant for fetching a package.
{
- var pack_dir = try out_dir.makeOpenPath(".git", .{});
- defer pack_dir.close();
- var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
- defer pack_file.close();
+ var pack_dir = try out_dir.createDirPathOpen(io, ".git", .{});
+ defer pack_dir.close(io);
+ var pack_file = try pack_dir.createFile(io, "pkg.pack", .{ .read = true });
+ defer pack_file.close(io);
var pack_file_buffer: [4096]u8 = undefined;
var pack_file_reader = b: {
- var pack_file_writer = pack_file.writer(&pack_file_buffer);
+ var pack_file_writer = pack_file.writer(io, &pack_file_buffer);
const fetch_reader = &resource.fetch_stream.reader;
_ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
try pack_file_writer.interface.flush();
- break :b pack_file_writer.moveToReader(io);
+ break :b pack_file_writer.moveToReader();
};
- var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
- defer index_file.close();
+ var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true });
+ defer index_file.close(io);
var index_file_buffer: [2000]u8 = undefined;
- var index_file_writer = index_file.writer(&index_file_buffer);
+ var index_file_writer = index_file.writer(io, &index_file_buffer);
{
const index_prog_node = f.prog_node.start("Index pack", 0);
defer index_prog_node.end();
@@ -1393,7 +1397,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
try repository.init(gpa, object_format, &pack_file_reader, &index_file_reader);
defer repository.deinit();
var diagnostics: git.Diagnostics = .{ .allocator = arena };
- try repository.checkout(out_dir, resource.want_oid, &diagnostics);
+ try repository.checkout(io, out_dir, resource.want_oid, &diagnostics);
if (diagnostics.errors.items.len > 0) {
try res.allocErrors(arena, diagnostics.errors.items.len, "unable to unpack packfile");
@@ -1407,41 +1411,37 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
}
}
- try out_dir.deleteTree(".git");
+ try out_dir.deleteTree(io, ".git");
return res;
}
-fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void {
+fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void {
const gpa = f.arena.child_allocator;
+ const io = f.job_queue.io;
// Recursive directory copy.
var it = try dir.walk(gpa);
defer it.deinit();
- while (try it.next()) |entry| {
+ while (try it.next(io)) |entry| {
switch (entry.kind) {
.directory => {}, // omit empty directories
.file => {
- dir.copyFile(
- entry.path,
- tmp_dir,
- entry.path,
- .{},
- ) catch |err| switch (err) {
+ dir.copyFile(entry.path, tmp_dir, entry.path, io, .{}) catch |err| switch (err) {
error.FileNotFound => {
- if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
- try dir.copyFile(entry.path, tmp_dir, entry.path, .{});
+ if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.createDirPath(io, dirname);
+ try dir.copyFile(entry.path, tmp_dir, entry.path, io, .{});
},
else => |e| return e,
};
},
.sym_link => {
var buf: [fs.max_path_bytes]u8 = undefined;
- const link_name = try dir.readLink(entry.path, &buf);
+ const link_name = buf[0..try dir.readLink(io, entry.path, &buf)];
// TODO: if this would create a symlink to outside
// the destination directory, fail with an error instead.
- tmp_dir.symLink(link_name, entry.path, .{}) catch |err| switch (err) {
+ tmp_dir.symLink(io, link_name, entry.path, .{}) catch |err| switch (err) {
error.FileNotFound => {
- if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
- try tmp_dir.symLink(link_name, entry.path, .{});
+ if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.createDirPath(io, dirname);
+ try tmp_dir.symLink(io, link_name, entry.path, .{});
},
else => |e| return e,
};
@@ -1451,14 +1451,14 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void
}
}
-pub fn renameTmpIntoCache(cache_dir: fs.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
+pub fn renameTmpIntoCache(io: Io, cache_dir: Io.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
assert(dest_dir_sub_path[1] == fs.path.sep);
var handled_missing_dir = false;
while (true) {
- cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) {
+ cache_dir.rename(tmp_dir_sub_path, cache_dir, dest_dir_sub_path, io) catch |err| switch (err) {
error.FileNotFound => {
if (handled_missing_dir) return err;
- cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) {
+ cache_dir.createDir(io, dest_dir_sub_path[0..1], .default_dir) catch |mkd_err| switch (mkd_err) {
error.PathAlreadyExists => handled_missing_dir = true,
else => |e| return e,
};
@@ -1466,7 +1466,7 @@ pub fn renameTmpIntoCache(cache_dir: fs.Dir, tmp_dir_sub_path: []const u8, dest_
},
error.PathAlreadyExists, error.AccessDenied => {
// Package has been already downloaded and may already be in use on the system.
- cache_dir.deleteTree(tmp_dir_sub_path) catch {
+ cache_dir.deleteTree(io, tmp_dir_sub_path) catch {
// Garbage files leftover in zig-cache/tmp/ is, as they say
// on Star Trek, "operating within normal parameters".
};
@@ -1519,7 +1519,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
var group: Io.Group = .init;
defer group.wait(io);
- while (walker.next() catch |err| {
+ while (walker.next(io) catch |err| {
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
"unable to walk temporary directory '{f}': {s}",
.{ pkg_path, @errorName(err) },
@@ -1542,7 +1542,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
.fs_path = fs_path,
.failure = undefined, // to be populated by the worker
};
- group.async(io, workerDeleteFile, .{ root_dir, deleted_file });
+ group.async(io, workerDeleteFile, .{ io, root_dir, deleted_file });
try deleted_files.append(deleted_file);
continue;
}
@@ -1570,7 +1570,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
.failure = undefined, // to be populated by the worker
.size = undefined, // to be populated by the worker
};
- group.async(io, workerHashFile, .{ root_dir, hashed_file });
+ group.async(io, workerHashFile, .{ io, root_dir, hashed_file });
try all_files.append(hashed_file);
}
}
@@ -1588,7 +1588,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
var i: usize = 0;
while (i < sus_dirs.count()) : (i += 1) {
const sus_dir = sus_dirs.keys()[i];
- root_dir.deleteDir(sus_dir) catch |err| switch (err) {
+ root_dir.deleteDir(io, sus_dir) catch |err| switch (err) {
error.DirNotEmpty => continue,
error.FileNotFound => continue,
else => |e| {
@@ -1638,7 +1638,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
assert(!f.job_queue.recursive);
// Print something to stdout that can be text diffed to figure out why
// the package hash is different.
- dumpHashInfo(all_files.items) catch |err| {
+ dumpHashInfo(io, all_files.items) catch |err| {
std.debug.print("unable to write to stdout: {s}\n", .{@errorName(err)});
std.process.exit(1);
};
@@ -1650,9 +1650,9 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
};
}
-fn dumpHashInfo(all_files: []const *const HashedFile) !void {
+fn dumpHashInfo(io: Io, all_files: []const *const HashedFile) !void {
var stdout_buffer: [1024]u8 = undefined;
- var stdout_writer: fs.File.Writer = .initStreaming(.stdout(), &stdout_buffer);
+ var stdout_writer: Io.File.Writer = .initStreaming(.stdout(), io, &stdout_buffer);
const w = &stdout_writer.interface;
for (all_files) |hashed_file| {
try w.print("{t}: {x}: {s}\n", .{ hashed_file.kind, &hashed_file.hash, hashed_file.normalized_path });
@@ -1660,15 +1660,15 @@ fn dumpHashInfo(all_files: []const *const HashedFile) !void {
try w.flush();
}
-fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile) void {
- hashed_file.failure = hashFileFallible(dir, hashed_file);
+fn workerHashFile(io: Io, dir: Io.Dir, hashed_file: *HashedFile) void {
+ hashed_file.failure = hashFileFallible(io, dir, hashed_file);
}
-fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile) void {
- deleted_file.failure = deleteFileFallible(dir, deleted_file);
+fn workerDeleteFile(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) void {
+ deleted_file.failure = deleteFileFallible(io, dir, deleted_file);
}
-fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
+fn hashFileFallible(io: Io, dir: Io.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var hasher = Package.Hash.Algo.init(.{});
hasher.update(hashed_file.normalized_path);
@@ -1676,24 +1676,24 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
switch (hashed_file.kind) {
.file => {
- var file = try dir.openFile(hashed_file.fs_path, .{});
- defer file.close();
+ var file = try dir.openFile(io, hashed_file.fs_path, .{});
+ defer file.close(io);
// Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463
hasher.update(&.{ 0, 0 });
var file_header: FileHeader = .{};
while (true) {
- const bytes_read = try file.read(&buf);
+ const bytes_read = try file.readPositional(io, &.{&buf}, file_size);
if (bytes_read == 0) break;
file_size += bytes_read;
hasher.update(buf[0..bytes_read]);
file_header.update(buf[0..bytes_read]);
}
if (file_header.isExecutable()) {
- try setExecutable(file);
+ try setExecutable(io, file);
}
},
.link => {
- const link_name = try dir.readLink(hashed_file.fs_path, &buf);
+ const link_name = buf[0..try dir.readLink(io, hashed_file.fs_path, &buf)];
if (fs.path.sep != canonical_sep) {
// Package hashes are intended to be consistent across
// platforms which means we must normalize path separators
@@ -1707,16 +1707,13 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
hashed_file.size = file_size;
}
-fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
- try dir.deleteFile(deleted_file.fs_path);
+fn deleteFileFallible(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
+ try dir.deleteFile(io, deleted_file.fs_path);
}
-fn setExecutable(file: fs.File) !void {
- if (!std.fs.has_executable_bit) return;
-
- const S = std.posix.S;
- const mode = fs.File.default_mode | S.IXUSR | S.IXGRP | S.IXOTH;
- try file.chmod(mode);
+fn setExecutable(io: Io, file: Io.File) !void {
+ if (!Io.File.Permissions.has_executable_bit) return;
+ try file.setPermissions(io, .executable_file);
}
const DeletedFile = struct {
@@ -1724,8 +1721,8 @@ const DeletedFile = struct {
failure: Error!void,
const Error =
- fs.Dir.DeleteFileError ||
- fs.Dir.DeleteDirError;
+ Io.Dir.DeleteFileError ||
+ Io.Dir.DeleteDirError;
};
const HashedFile = struct {
@@ -1737,11 +1734,11 @@ const HashedFile = struct {
size: u64,
const Error =
- fs.File.OpenError ||
- fs.File.ReadError ||
- fs.File.StatError ||
- fs.File.ChmodError ||
- fs.Dir.ReadLinkError;
+ Io.File.OpenError ||
+ Io.File.ReadPositionalError ||
+ Io.File.StatError ||
+ Io.File.SetPermissionsError ||
+ Io.Dir.ReadLinkError;
const Kind = enum { file, link };
@@ -2043,7 +2040,7 @@ const UnpackResult = struct {
defer errors.deinit(gpa);
var aw: Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
- try errors.renderToWriter(.{}, &aw.writer, .no_color);
+ try errors.renderToWriter(.{}, &aw.writer);
try std.testing.expectEqualStrings(
\\error: unable to unpack
\\ note: unable to create symlink from 'dir2/file2' to 'filename': SymlinkError
@@ -2074,7 +2071,7 @@ test "tarball with duplicate paths" {
defer tmp.cleanup();
const tarball_name = "duplicate_paths.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2107,7 +2104,7 @@ test "tarball with excluded duplicate paths" {
defer tmp.cleanup();
const tarball_name = "duplicate_paths_excluded.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2153,7 +2150,7 @@ test "tarball without root folder" {
defer tmp.cleanup();
const tarball_name = "no_root.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2178,7 +2175,7 @@ test "tarball without root folder" {
}
test "set executable bit based on file content" {
- if (!std.fs.has_executable_bit) return error.SkipZigTest;
+ if (!Io.File.Permissions.has_executable_bit) return error.SkipZigTest;
const gpa = std.testing.allocator;
const io = std.testing.io;
@@ -2186,7 +2183,7 @@ test "set executable bit based on file content" {
defer tmp.cleanup();
const tarball_name = "executables.tar.gz";
- try saveEmbedFile(tarball_name, tmp.dir);
+ try saveEmbedFile(io, tarball_name, tmp.dir);
const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
defer gpa.free(tarball_path);
@@ -2210,16 +2207,16 @@ test "set executable bit based on file content" {
);
var out = try fb.packageDir();
- defer out.close();
+ defer out.close(io);
const S = std.posix.S;
// expect executable bit not set
- try std.testing.expect((try out.statFile("file1")).mode & S.IXUSR == 0);
- try std.testing.expect((try out.statFile("script_without_shebang")).mode & S.IXUSR == 0);
+ try std.testing.expect((try out.statFile(io, "file1", .{})).permissions.toMode() & S.IXUSR == 0);
+ try std.testing.expect((try out.statFile(io, "script_without_shebang", .{})).permissions.toMode() & S.IXUSR == 0);
// expect executable bit set
- try std.testing.expect((try out.statFile("hello")).mode & S.IXUSR != 0);
- try std.testing.expect((try out.statFile("script")).mode & S.IXUSR != 0);
- try std.testing.expect((try out.statFile("script_with_shebang_without_exec_bit")).mode & S.IXUSR != 0);
- try std.testing.expect((try out.statFile("hello_ln")).mode & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "hello", .{})).permissions.toMode() & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "script", .{})).permissions.toMode() & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "script_with_shebang_without_exec_bit", .{})).permissions.toMode() & S.IXUSR != 0);
+ try std.testing.expect((try out.statFile(io, "hello_ln", .{})).permissions.toMode() & S.IXUSR != 0);
//
// $ ls -al zig-cache/tmp/OCz9ovUcstDjTC_U/zig-global-cache/p/1220fecb4c06a9da8673c87fe8810e15785f1699212f01728eadce094d21effeeef3
@@ -2231,12 +2228,12 @@ test "set executable bit based on file content" {
// -rwxrwxr-x 1 17 Apr script_with_shebang_without_exec_bit
}
-fn saveEmbedFile(comptime tarball_name: []const u8, dir: fs.Dir) !void {
+fn saveEmbedFile(io: Io, comptime tarball_name: []const u8, dir: Io.Dir) !void {
//const tarball_name = "duplicate_paths_excluded.tar.gz";
const tarball_content = @embedFile("Fetch/testdata/" ++ tarball_name);
- var tmp_file = try dir.createFile(tarball_name, .{});
- defer tmp_file.close();
- try tmp_file.writeAll(tarball_content);
+ var tmp_file = try dir.createFile(io, tarball_name, .{});
+ defer tmp_file.close(io);
+ try tmp_file.writeStreamingAll(io, tarball_content);
}
// Builds Fetch with required dependencies, clears dependencies on deinit().
@@ -2250,10 +2247,10 @@ const TestFetchBuilder = struct {
self: *TestFetchBuilder,
allocator: std.mem.Allocator,
io: Io,
- cache_parent_dir: std.fs.Dir,
+ cache_parent_dir: std.Io.Dir,
path_or_url: []const u8,
) !*Fetch {
- const cache_dir = try cache_parent_dir.makeOpenPath("zig-global-cache", .{});
+ const cache_dir = try cache_parent_dir.createDirPathOpen(io, "zig-global-cache", .{});
self.http_client = .{ .allocator = allocator, .io = io };
self.global_cache_directory = .{ .handle = cache_dir, .path = null };
@@ -2301,35 +2298,40 @@ const TestFetchBuilder = struct {
}
fn deinit(self: *TestFetchBuilder) void {
+ const io = self.job_queue.io;
self.fetch.deinit();
self.job_queue.deinit();
self.fetch.prog_node.end();
- self.global_cache_directory.handle.close();
+ self.global_cache_directory.handle.close(io);
self.http_client.deinit();
}
- fn packageDir(self: *TestFetchBuilder) !fs.Dir {
+ fn packageDir(self: *TestFetchBuilder) !Io.Dir {
+ const io = self.job_queue.io;
const root = self.fetch.package_root;
- return try root.root_dir.handle.openDir(root.sub_path, .{ .iterate = true });
+ return try root.root_dir.handle.openDir(io, root.sub_path, .{ .iterate = true });
}
// Test helper, asserts thet package dir constains expected_files.
// expected_files must be sorted.
fn expectPackageFiles(self: *TestFetchBuilder, expected_files: []const []const u8) !void {
+ const io = self.job_queue.io;
+ const gpa = std.testing.allocator;
+
var package_dir = try self.packageDir();
- defer package_dir.close();
+ defer package_dir.close(io);
var actual_files: std.ArrayList([]u8) = .empty;
- defer actual_files.deinit(std.testing.allocator);
- defer for (actual_files.items) |file| std.testing.allocator.free(file);
- var walker = try package_dir.walk(std.testing.allocator);
+ defer actual_files.deinit(gpa);
+ defer for (actual_files.items) |file| gpa.free(file);
+ var walker = try package_dir.walk(gpa);
defer walker.deinit();
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
- const path = try std.testing.allocator.dupe(u8, entry.path);
- errdefer std.testing.allocator.free(path);
+ const path = try gpa.dupe(u8, entry.path);
+ errdefer gpa.free(path);
std.mem.replaceScalar(u8, path, std.fs.path.sep, '/');
- try actual_files.append(std.testing.allocator, path);
+ try actual_files.append(gpa, path);
}
std.mem.sortUnstable([]u8, actual_files.items, {}, struct {
fn lessThan(_: void, a: []u8, b: []u8) bool {
@@ -2346,17 +2348,19 @@ const TestFetchBuilder = struct {
// Test helper, asserts that fetch has failed with `msg` error message.
fn expectFetchErrors(self: *TestFetchBuilder, notes_len: usize, msg: []const u8) !void {
+ const gpa = std.testing.allocator;
+
var errors = try self.fetch.error_bundle.toOwnedBundle("");
- defer errors.deinit(std.testing.allocator);
+ defer errors.deinit(gpa);
const em = errors.getErrorMessage(errors.getMessages()[0]);
try std.testing.expectEqual(1, em.count);
if (notes_len > 0) {
try std.testing.expectEqual(notes_len, em.notes_len);
}
- var aw: Io.Writer.Allocating = .init(std.testing.allocator);
+ var aw: Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
- try errors.renderToWriter(.{}, &aw.writer, .no_color);
+ try errors.renderToWriter(.{}, &aw.writer);
try std.testing.expectEqualStrings(msg, aw.written());
}
};
diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig
index a2ea870c3f..18e15f457f 100644
--- a/src/Package/Fetch/git.zig
+++ b/src/Package/Fetch/git.zig
@@ -198,8 +198,8 @@ pub const Repository = struct {
repo: *Repository,
allocator: Allocator,
format: Oid.Format,
- pack_file: *std.fs.File.Reader,
- index_file: *std.fs.File.Reader,
+ pack_file: *Io.File.Reader,
+ index_file: *Io.File.Reader,
) !void {
repo.* = .{ .odb = undefined };
try repo.odb.init(allocator, format, pack_file, index_file);
@@ -213,7 +213,8 @@ pub const Repository = struct {
/// Checks out the repository at `commit_oid` to `worktree`.
pub fn checkout(
repository: *Repository,
- worktree: std.fs.Dir,
+ io: Io,
+ worktree: Io.Dir,
commit_oid: Oid,
diagnostics: *Diagnostics,
) !void {
@@ -223,13 +224,14 @@ pub const Repository = struct {
if (commit_object.type != .commit) return error.NotACommit;
break :tree_oid try getCommitTree(repository.odb.format, commit_object.data);
};
- try repository.checkoutTree(worktree, tree_oid, "", diagnostics);
+ try repository.checkoutTree(io, worktree, tree_oid, "", diagnostics);
}
/// Checks out the tree at `tree_oid` to `worktree`.
fn checkoutTree(
repository: *Repository,
- dir: std.fs.Dir,
+ io: Io,
+ dir: Io.Dir,
tree_oid: Oid,
current_path: []const u8,
diagnostics: *Diagnostics,
@@ -251,18 +253,18 @@ pub const Repository = struct {
while (try tree_iter.next()) |entry| {
switch (entry.type) {
.directory => {
- try dir.makeDir(entry.name);
- var subdir = try dir.openDir(entry.name, .{});
- defer subdir.close();
+ try dir.createDir(io, entry.name, .default_dir);
+ var subdir = try dir.openDir(io, entry.name, .{});
+ defer subdir.close(io);
const sub_path = try std.fs.path.join(repository.odb.allocator, &.{ current_path, entry.name });
defer repository.odb.allocator.free(sub_path);
- try repository.checkoutTree(subdir, entry.oid, sub_path, diagnostics);
+ try repository.checkoutTree(io, subdir, entry.oid, sub_path, diagnostics);
},
.file => {
try repository.odb.seekOid(entry.oid);
const file_object = try repository.odb.readObject();
if (file_object.type != .blob) return error.InvalidFile;
- var file = dir.createFile(entry.name, .{ .exclusive = true }) catch |e| {
+ var file = dir.createFile(io, entry.name, .{ .exclusive = true }) catch |e| {
const file_name = try std.fs.path.join(diagnostics.allocator, &.{ current_path, entry.name });
errdefer diagnostics.allocator.free(file_name);
try diagnostics.errors.append(diagnostics.allocator, .{ .unable_to_create_file = .{
@@ -271,15 +273,15 @@ pub const Repository = struct {
} });
continue;
};
- defer file.close();
- try file.writeAll(file_object.data);
+ defer file.close(io);
+ try file.writePositionalAll(io, file_object.data, 0);
},
.symlink => {
try repository.odb.seekOid(entry.oid);
const symlink_object = try repository.odb.readObject();
if (symlink_object.type != .blob) return error.InvalidFile;
const link_name = symlink_object.data;
- dir.symLink(link_name, entry.name, .{}) catch |e| {
+ dir.symLink(io, link_name, entry.name, .{}) catch |e| {
const file_name = try std.fs.path.join(diagnostics.allocator, &.{ current_path, entry.name });
errdefer diagnostics.allocator.free(file_name);
const link_name_dup = try diagnostics.allocator.dupe(u8, link_name);
@@ -294,7 +296,7 @@ pub const Repository = struct {
.gitlink => {
// Consistent with git archive behavior, create the directory but
// do nothing else
- try dir.makeDir(entry.name);
+ try dir.createDir(io, entry.name, .default_dir);
},
}
}
@@ -370,9 +372,9 @@ pub const Repository = struct {
/// [pack-format](https://git-scm.com/docs/pack-format).
const Odb = struct {
format: Oid.Format,
- pack_file: *std.fs.File.Reader,
+ pack_file: *Io.File.Reader,
index_header: IndexHeader,
- index_file: *std.fs.File.Reader,
+ index_file: *Io.File.Reader,
cache: ObjectCache = .{},
allocator: Allocator,
@@ -381,8 +383,8 @@ const Odb = struct {
odb: *Odb,
allocator: Allocator,
format: Oid.Format,
- pack_file: *std.fs.File.Reader,
- index_file: *std.fs.File.Reader,
+ pack_file: *Io.File.Reader,
+ index_file: *Io.File.Reader,
) !void {
try pack_file.seekTo(0);
try index_file.seekTo(0);
@@ -1270,8 +1272,8 @@ const IndexEntry = struct {
pub fn indexPack(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
- index_writer: *std.fs.File.Writer,
+ pack: *Io.File.Reader,
+ index_writer: *Io.File.Writer,
) !void {
try pack.seekTo(0);
@@ -1370,7 +1372,7 @@ pub fn indexPack(
fn indexPackFirstPass(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
+ pack: *Io.File.Reader,
index_entries: *std.AutoHashMapUnmanaged(Oid, IndexEntry),
pending_deltas: *std.ArrayList(IndexEntry),
) !Oid {
@@ -1423,7 +1425,7 @@ fn indexPackFirstPass(
fn indexPackHashDelta(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
+ pack: *Io.File.Reader,
delta: IndexEntry,
index_entries: std.AutoHashMapUnmanaged(Oid, IndexEntry),
cache: *ObjectCache,
@@ -1475,7 +1477,7 @@ fn indexPackHashDelta(
fn resolveDeltaChain(
allocator: Allocator,
format: Oid.Format,
- pack: *std.fs.File.Reader,
+ pack: *Io.File.Reader,
base_object: Object,
delta_offsets: []const u64,
cache: *ObjectCache,
@@ -1582,17 +1584,17 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
var git_dir = testing.tmpDir(.{});
defer git_dir.cleanup();
- var pack_file = try git_dir.dir.createFile("testrepo.pack", .{ .read = true });
- defer pack_file.close();
- try pack_file.writeAll(testrepo_pack);
+ var pack_file = try git_dir.dir.createFile(io, "testrepo.pack", .{ .read = true });
+ defer pack_file.close(io);
+ try pack_file.writeStreamingAll(io, testrepo_pack);
var pack_file_buffer: [2000]u8 = undefined;
var pack_file_reader = pack_file.reader(io, &pack_file_buffer);
- var index_file = try git_dir.dir.createFile("testrepo.idx", .{ .read = true });
- defer index_file.close();
+ var index_file = try git_dir.dir.createFile(io, "testrepo.idx", .{ .read = true });
+ defer index_file.close(io);
var index_file_buffer: [2000]u8 = undefined;
- var index_file_writer = index_file.writer(&index_file_buffer);
+ var index_file_writer = index_file.writer(io, &index_file_buffer);
try indexPack(testing.allocator, format, &pack_file_reader, &index_file_writer);
// Arbitrary size limit on files read while checking the repository contents
@@ -1600,7 +1602,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
const max_file_size = 8192;
if (!skip_checksums) {
- const index_file_data = try git_dir.dir.readFileAlloc("testrepo.idx", testing.allocator, .limited(max_file_size));
+ const index_file_data = try git_dir.dir.readFileAlloc(io, "testrepo.idx", testing.allocator, .limited(max_file_size));
defer testing.allocator.free(index_file_data);
// testrepo.idx is generated by Git. The index created by this file should
// match it exactly. Running `git verify-pack -v testrepo.pack` can verify
@@ -1621,7 +1623,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
- try repository.checkout(worktree.dir, commit_id, &diagnostics);
+ try repository.checkout(io, worktree.dir, commit_id, &diagnostics);
try testing.expect(diagnostics.errors.items.len == 0);
const expected_files: []const []const u8 = &.{
@@ -1646,7 +1648,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
defer for (actual_files.items) |file| testing.allocator.free(file);
var walker = try worktree.dir.walk(testing.allocator);
defer walker.deinit();
- while (try walker.next()) |entry| {
+ while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
const path = try testing.allocator.dupe(u8, entry.path);
errdefer testing.allocator.free(path);
@@ -1676,7 +1678,7 @@ fn runRepositoryTest(io: Io, comptime format: Oid.Format, head_commit: []const u
\\revision 19
\\
;
- const actual_file_contents = try worktree.dir.readFileAlloc("file", testing.allocator, .limited(max_file_size));
+ const actual_file_contents = try worktree.dir.readFileAlloc(io, "file", testing.allocator, .limited(max_file_size));
defer testing.allocator.free(actual_file_contents);
try testing.expectEqualStrings(expected_file_contents, actual_file_contents);
}
@@ -1700,7 +1702,7 @@ test "SHA-256 packfile indexing and checkout" {
pub fn main() !void {
const allocator = std.heap.smp_allocator;
- var threaded: Io.Threaded = .init(allocator);
+ var threaded: Io.Threaded = .init(allocator, .{});
defer threaded.deinit();
const io = threaded.io();
@@ -1712,23 +1714,23 @@ pub fn main() !void {
const format = std.meta.stringToEnum(Oid.Format, args[1]) orelse return error.InvalidFormat;
- var pack_file = try std.fs.cwd().openFile(args[2], .{});
- defer pack_file.close();
+ var pack_file = try Io.Dir.cwd().openFile(io, args[2], .{});
+ defer pack_file.close(io);
var pack_file_buffer: [4096]u8 = undefined;
var pack_file_reader = pack_file.reader(io, &pack_file_buffer);
const commit = try Oid.parse(format, args[3]);
- var worktree = try std.fs.cwd().makeOpenPath(args[4], .{});
- defer worktree.close();
+ var worktree = try Io.Dir.cwd().createDirPathOpen(io, args[4], .{});
+ defer worktree.close(io);
- var git_dir = try worktree.makeOpenPath(".git", .{});
- defer git_dir.close();
+ var git_dir = try worktree.createDirPathOpen(io, ".git", .{});
+ defer git_dir.close(io);
std.debug.print("Starting index...\n", .{});
- var index_file = try git_dir.createFile("idx", .{ .read = true });
- defer index_file.close();
+ var index_file = try git_dir.createFile(io, "idx", .{ .read = true });
+ defer index_file.close(io);
var index_file_buffer: [4096]u8 = undefined;
- var index_file_writer = index_file.writer(&index_file_buffer);
+ var index_file_writer = index_file.writer(io, &index_file_buffer);
try indexPack(allocator, format, &pack_file_reader, &index_file_writer);
std.debug.print("Starting checkout...\n", .{});
@@ -1738,7 +1740,7 @@ pub fn main() !void {
defer repository.deinit();
var diagnostics: Diagnostics = .{ .allocator = allocator };
defer diagnostics.deinit();
- try repository.checkout(worktree, commit, &diagnostics);
+ try repository.checkout(io, worktree, commit, &diagnostics);
for (diagnostics.errors.items) |err| {
std.debug.print("Diagnostic: {}\n", .{err});
diff --git a/src/Sema.zig b/src/Sema.zig
index fec6850c4c..298de783b8 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -2668,16 +2668,18 @@ fn failWithTypeMismatch(sema: *Sema, block: *Block, src: LazySrcLoc, expected: T
pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg) error{ AnalysisFail, OutOfMemory } {
@branchHint(.cold);
- const gpa = sema.gpa;
const zcu = sema.pt.zcu;
+ const comp = zcu.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
- if (build_options.enable_debug_extensions and zcu.comp.debug_compile_errors) {
+ if (build_options.enable_debug_extensions and comp.debug_compile_errors) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
wip_errors.init(gpa) catch @panic("out of memory");
Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, false) catch @panic("out of memory");
std.debug.print("compile error during Sema:\n", .{});
var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory");
- error_bundle.renderToStdErr(.{}, .auto);
+ error_bundle.renderToStderr(io, .{}, .auto) catch @panic("failed to print to stderr");
std.debug.panicExtra(@returnAddress(), "unexpected compile error occurred", .{});
}
diff --git a/src/Zcu.zig b/src/Zcu.zig
index 137b4d8b59..07fb1bdc94 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -1076,11 +1076,11 @@ pub const File = struct {
var f = f: {
const dir, const sub_path = file.path.openInfo(zcu.comp.dirs);
- break :f try dir.openFile(sub_path, .{});
+ break :f try dir.openFile(io, sub_path, .{});
};
- defer f.close();
+ defer f.close(io);
- const stat = f.stat() catch |err| switch (err) {
+ const stat = f.stat(io) catch |err| switch (err) {
error.Streaming => {
// Since `file.stat` is populated, this was previously a file stream; since it is
// now not a file stream, it must have changed.
@@ -1200,7 +1200,7 @@ pub const EmbedFile = struct {
/// `.none` means the file was not loaded, so `stat` is undefined.
val: InternPool.Index,
/// If this is `null` and `val` is `.none`, the file has never been loaded.
- err: ?(std.fs.File.OpenError || std.fs.File.StatError || std.fs.File.ReadError || error{UnexpectedEof}),
+ err: ?(Io.File.OpenError || Io.File.StatError || Io.File.Reader.Error || error{UnexpectedEof}),
stat: Cache.File.Stat,
pub const Index = enum(u32) {
@@ -2813,8 +2813,8 @@ pub fn init(zcu: *Zcu, gpa: Allocator, io: Io, thread_count: usize) !void {
pub fn deinit(zcu: *Zcu) void {
const comp = zcu.comp;
- const gpa = comp.gpa;
const io = comp.io;
+ const gpa = zcu.gpa;
{
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
@@ -2835,8 +2835,8 @@ pub fn deinit(zcu: *Zcu) void {
}
zcu.embed_table.deinit(gpa);
- zcu.local_zir_cache.handle.close();
- zcu.global_zir_cache.handle.close();
+ zcu.local_zir_cache.handle.close(io);
+ zcu.global_zir_cache.handle.close(io);
for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
@@ -2900,7 +2900,7 @@ pub fn deinit(zcu: *Zcu) void {
if (zcu.resolved_references) |*r| r.deinit(gpa);
- if (zcu.comp.debugIncremental()) {
+ if (comp.debugIncremental()) {
zcu.incremental_debug_state.deinit(gpa);
}
}
@@ -2927,7 +2927,7 @@ comptime {
}
}
-pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: std.fs.File) !Zir {
+pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: Io.File) !Zir {
var buffer: [2000]u8 = undefined;
var file_reader = cache_file.reader(io, &buffer);
return result: {
@@ -2986,7 +2986,12 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader
return zir;
}
-pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.Stat, zir: Zir) (std.fs.File.WriteError || Allocator.Error)!void {
+pub fn saveZirCache(
+ gpa: Allocator,
+ cache_file_writer: *Io.File.Writer,
+ stat: Io.File.Stat,
+ zir: Zir,
+) (Io.File.Writer.Error || Allocator.Error)!void {
const safety_buffer = if (data_has_safety_tag)
try gpa.alloc([8]u8, zir.instructions.len)
else
@@ -3020,13 +3025,12 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S
zir.string_bytes,
@ptrCast(zir.extra),
};
- var cache_fw = cache_file.writer(&.{});
- cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
- error.WriteFailed => return cache_fw.err.?,
+ cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
+ error.WriteFailed => return cache_file_writer.err.?,
};
}
-pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void {
+pub fn saveZoirCache(cache_file_writer: *Io.File.Writer, stat: Io.File.Stat, zoir: Zoir) Io.File.Writer.Error!void {
const header: Zoir.Header = .{
.nodes_len = @intCast(zoir.nodes.len),
.extra_len = @intCast(zoir.extra.len),
@@ -3050,9 +3054,8 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir
@ptrCast(zoir.compile_errors),
@ptrCast(zoir.error_notes),
};
- var cache_fw = cache_file.writer(&.{});
- cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
- error.WriteFailed => return cache_fw.err.?,
+ cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
+ error.WriteFailed => return cache_file_writer.err.?,
};
}
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 2ad5bac01c..103cbaaaae 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -94,11 +94,11 @@ pub fn updateFile(
// In any case we need to examine the stat of the file to determine the course of action.
var source_file = f: {
const dir, const sub_path = file.path.openInfo(comp.dirs);
- break :f try dir.openFile(sub_path, .{});
+ break :f try dir.openFile(io, sub_path, .{});
};
- defer source_file.close();
+ defer source_file.close(io);
- const stat = try source_file.stat();
+ const stat = try source_file.stat(io);
const want_local_cache = switch (file.path.root) {
.none, .local_cache => true,
@@ -118,7 +118,7 @@ pub fn updateFile(
const zir_dir = cache_directory.handle;
// Determine whether we need to reload the file from disk and redo parsing and AstGen.
- var lock: std.fs.File.Lock = switch (file.status) {
+ var lock: Io.File.Lock = switch (file.status) {
.never_loaded, .retryable_failure => lock: {
// First, load the cached ZIR code, if any.
log.debug("AstGen checking cache: {f} (local={}, digest={s})", .{
@@ -170,7 +170,7 @@ pub fn updateFile(
// version. Likewise if we're working on AstGen and another process asks for
// the cached file, they'll get it.
const cache_file = while (true) {
- break zir_dir.createFile(&hex_digest, .{
+ break zir_dir.createFile(io, &hex_digest, .{
.read = true,
.truncate = false,
.lock = lock,
@@ -196,7 +196,7 @@ pub fn updateFile(
cache_directory,
});
}
- break zir_dir.createFile(&hex_digest, .{
+ break zir_dir.createFile(io, &hex_digest, .{
.read = true,
.truncate = false,
.lock = lock,
@@ -215,7 +215,7 @@ pub fn updateFile(
else => |e| return e, // Retryable errors are handled at callsite.
};
};
- defer cache_file.close();
+ defer cache_file.close(io);
// Under `--time-report`, ignore cache hits; do the work anyway for those juicy numbers.
const ignore_hit = comp.time_report != null;
@@ -238,18 +238,13 @@ pub fn updateFile(
if (builtin.os.tag == .wasi or lock == .exclusive) break true;
// Otherwise, unlock to give someone a chance to get the exclusive lock
// and then upgrade to an exclusive lock.
- cache_file.unlock();
+ cache_file.unlock(io);
lock = .exclusive;
- try cache_file.lock(lock);
+ try cache_file.lock(io, lock);
};
if (need_update) {
- // The cache is definitely stale so delete the contents to avoid an underwrite later.
- cache_file.setEndPos(0) catch |err| switch (err) {
- error.FileTooBig => unreachable, // 0 is not too big
- else => |e| return e,
- };
- try cache_file.seekTo(0);
+ var cache_file_writer: Io.File.Writer = .init(cache_file, io, &.{});
if (stat.size > std.math.maxInt(u32))
return error.FileTooBig;
@@ -278,22 +273,28 @@ pub fn updateFile(
switch (file.getMode()) {
.zig => {
file.zir = try AstGen.generate(gpa, file.tree.?);
- Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) {
+ Zcu.saveZirCache(gpa, &cache_file_writer, stat, file.zir.?) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
- else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {s}", .{
- file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
+ else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {t}", .{
+ file.path.fmt(comp), cache_directory, &hex_digest, err,
}),
};
},
.zon => {
file.zoir = try ZonGen.generate(gpa, file.tree.?, .{});
- Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| {
- log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {s}", .{
- file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
+ Zcu.saveZoirCache(&cache_file_writer, stat, file.zoir.?) catch |err| {
+ log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {t}", .{
+ file.path.fmt(comp), cache_directory, &hex_digest, err,
});
};
},
}
+
+ cache_file_writer.end() catch |err| switch (err) {
+ error.WriteFailed => return cache_file_writer.err.?,
+ else => |e| return e,
+ };
+
if (timer.finish()) |ns_astgen| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -346,8 +347,8 @@ pub fn updateFile(
fn loadZirZoirCache(
zcu: *Zcu,
- cache_file: std.fs.File,
- stat: std.fs.File.Stat,
+ cache_file: Io.File,
+ stat: Io.File.Stat,
file: *Zcu.File,
comptime mode: Ast.Mode,
) !enum { success, invalid, truncated, stale } {
@@ -2466,11 +2467,11 @@ fn updateEmbedFileInner(
var file = f: {
const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs);
- break :f try dir.openFile(sub_path, .{});
+ break :f try dir.openFile(io, sub_path, .{});
};
- defer file.close();
+ defer file.close(io);
- const stat: Cache.File.Stat = .fromFs(try file.stat());
+ const stat: Cache.File.Stat = .fromFs(try file.stat(io));
if (ef.val != .none) {
const old_stat = ef.stat;
@@ -4524,12 +4525,14 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) Ru
.stage2_llvm,
=> {},
},
+ error.Canceled => |e| return e,
}
return error.AlreadyReported;
};
}
fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{
OutOfMemory,
+ Canceled,
CodegenFail,
NoLinkFile,
BackendDoesNotProduceMir,
@@ -4555,12 +4558,16 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
null;
defer if (liveness) |*l| l.deinit(gpa);
- if (build_options.enable_debug_extensions and comp.verbose_air) {
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- stderr.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)}) catch {};
- air.write(stderr, pt, liveness);
- stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {};
+ if (build_options.enable_debug_extensions and comp.verbose_air) p: {
+ const io = comp.io;
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ printVerboseAir(pt, liveness, fqn, air, &stderr.file_writer.interface) catch |err| switch (err) {
+ error.WriteFailed => switch (stderr.file_writer.err.?) {
+ error.Canceled => |e| return e,
+ else => break :p,
+ },
+ };
}
if (std.debug.runtime_safety) verify_liveness: {
@@ -4575,7 +4582,7 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
verify.verify() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
- else => return zcu.codegenFail(nav, "invalid liveness: {s}", .{@errorName(err)}),
+ else => return zcu.codegenFail(nav, "invalid liveness: {t}", .{err}),
};
}
@@ -4611,3 +4618,17 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
=> return zcu.codegenFail(nav, "unable to codegen: {s}", .{@errorName(err)}),
};
}
+
+fn printVerboseAir(
+ pt: Zcu.PerThread,
+ liveness: ?Air.Liveness,
+ fqn: InternPool.NullTerminatedString,
+ air: *const Air,
+ w: *Io.Writer,
+) Io.Writer.Error!void {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ try w.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)});
+ try air.write(w, pt, liveness);
+ try w.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)});
+}
diff --git a/src/codegen/aarch64/Select.zig b/src/codegen/aarch64/Select.zig
index f390d83f03..49de055b47 100644
--- a/src/codegen/aarch64/Select.zig
+++ b/src/codegen/aarch64/Select.zig
@@ -11273,15 +11273,17 @@ fn initValueAdvanced(
return @enumFromInt(isel.values.items.len);
}
pub fn dumpValues(isel: *Select, which: enum { only_referenced, all }) void {
- errdefer |err| @panic(@errorName(err));
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
-
const zcu = isel.pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = ip.getNav(isel.nav_index);
+ errdefer |err| @panic(@errorName(err));
+
+ const locked_stderr = std.debug.lockStderr(&.{});
+ defer std.debug.unlockStderr();
+ const stderr = &locked_stderr.file_writer.interface;
+
var reverse_live_values: std.AutoArrayHashMapUnmanaged(Value.Index, std.ArrayList(Air.Inst.Index)) = .empty;
defer {
for (reverse_live_values.values()) |*list| list.deinit(gpa);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 5dc55b74f6..fca89ea4fc 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1,19 +1,22 @@
-const std = @import("std");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.codegen);
const math = std.math;
const DW = std.dwarf;
-
const Builder = std.zig.llvm.Builder;
+
+const build_options = @import("build_options");
const llvm = if (build_options.have_llvm)
@import("llvm/bindings.zig")
else
@compileError("LLVM unavailable");
+
const link = @import("../link.zig");
const Compilation = @import("../Compilation.zig");
-const build_options = @import("build_options");
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Package = @import("../Package.zig");
@@ -799,6 +802,7 @@ pub const Object = struct {
pub fn emit(o: *Object, pt: Zcu.PerThread, options: EmitOptions) error{ LinkFailure, OutOfMemory }!void {
const zcu = pt.zcu;
const comp = zcu.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
{
@@ -961,10 +965,10 @@ pub const Object = struct {
const context, const module = emit: {
if (options.pre_ir_path) |path| {
if (std.mem.eql(u8, path, "-")) {
- o.builder.dump();
+ o.builder.dump(io);
} else {
- o.builder.printToFilePath(std.fs.cwd(), path) catch |err| {
- log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
+ o.builder.printToFilePath(io, Io.Dir.cwd(), path) catch |err| {
+ log.err("failed printing LLVM module to \"{s}\": {t}", .{ path, err });
};
}
}
@@ -977,26 +981,26 @@ pub const Object = struct {
o.builder.clearAndFree();
if (options.pre_bc_path) |path| {
- var file = std.fs.cwd().createFile(path, .{}) catch |err|
- return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
- defer file.close();
+ var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err|
+ return diags.fail("failed to create '{s}': {t}", .{ path, err });
+ defer file.close(io);
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
- file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
- return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
+ file.writeStreamingAll(io, ptr[0..(bitcode.len * 4)]) catch |err|
+ return diags.fail("failed to write to '{s}': {t}", .{ path, err });
}
if (options.asm_path == null and options.bin_path == null and
options.post_ir_path == null and options.post_bc_path == null) return;
if (options.post_bc_path) |path| {
- var file = std.fs.cwd().createFile(path, .{}) catch |err|
- return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
- defer file.close();
+ var file = Io.Dir.cwd().createFile(io, path, .{}) catch |err|
+ return diags.fail("failed to create '{s}': {t}", .{ path, err });
+ defer file.close(io);
const ptr: [*]const u8 = @ptrCast(bitcode.ptr);
- file.writeAll(ptr[0..(bitcode.len * 4)]) catch |err|
- return diags.fail("failed to write to '{s}': {s}", .{ path, @errorName(err) });
+ file.writeStreamingAll(io, ptr[0..(bitcode.len * 4)]) catch |err|
+ return diags.fail("failed to write to '{s}': {t}", .{ path, err });
}
if (!build_options.have_llvm or !comp.config.use_lib_llvm) {
@@ -2710,7 +2714,7 @@ pub const Object = struct {
}
fn allocTypeName(o: *Object, pt: Zcu.PerThread, ty: Type) Allocator.Error![:0]const u8 {
- var aw: std.Io.Writer.Allocating = .init(o.gpa);
+ var aw: Io.Writer.Allocating = .init(o.gpa);
defer aw.deinit();
ty.print(&aw.writer, pt, null) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
diff --git a/src/crash_report.zig b/src/crash_report.zig
index d525d4b3b5..e56bc7cec5 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -95,19 +95,20 @@ fn dumpCrashContext() Io.Writer.Error!void {
// TODO: this does mean that a different thread could grab the stderr mutex between the context
// and the actual panic printing, which would be quite confusing.
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
+ const stderr = std.debug.lockStderr(&.{});
+ defer std.debug.unlockStderr();
+ const w = &stderr.file_writer.interface;
- try stderr.writeAll("Compiler crash context:\n");
+ try w.writeAll("Compiler crash context:\n");
if (CodegenFunc.current) |*cg| {
const func_nav = cg.zcu.funcInfo(cg.func_index).owner_nav;
const func_fqn = cg.zcu.intern_pool.getNav(func_nav).fqn;
- try stderr.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)});
+ try w.print("Generating function '{f}'\n\n", .{func_fqn.fmt(&cg.zcu.intern_pool)});
} else if (AnalyzeBody.current) |anal| {
- try dumpCrashContextSema(anal, stderr, &S.crash_heap);
+ try dumpCrashContextSema(anal, w, &S.crash_heap);
} else {
- try stderr.writeAll("(no context)\n\n");
+ try w.writeAll("(no context)\n\n");
}
}
fn dumpCrashContextSema(anal: *AnalyzeBody, stderr: *Io.Writer, crash_heap: []u8) Io.Writer.Error!void {
diff --git a/src/fmt.zig b/src/fmt.zig
index 80925200d6..b1903aad53 100644
--- a/src/fmt.zig
+++ b/src/fmt.zig
@@ -37,9 +37,9 @@ const Fmt = struct {
arena: Allocator,
io: Io,
out_buffer: std.Io.Writer.Allocating,
- stdout_writer: *fs.File.Writer,
+ stdout_writer: *Io.File.Writer,
- const SeenMap = std.AutoHashMap(fs.File.INode, void);
+ const SeenMap = std.AutoHashMap(Io.File.INode, void);
};
pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !void {
@@ -59,8 +59,8 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_fmt);
- return process.cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_fmt);
+ return process.cleanExit(io);
} else if (mem.eql(u8, arg, "--color")) {
if (i + 1 >= args.len) {
fatal("expected [auto|on|off] after --color", .{});
@@ -99,9 +99,9 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
fatal("cannot use --stdin with positional arguments", .{});
}
- const stdin: fs.File = .stdin();
+ const stdin: Io.File = .stdin();
var stdio_buffer: [1024]u8 = undefined;
- var file_reader: fs.File.Reader = stdin.reader(io, &stdio_buffer);
+ var file_reader: Io.File.Reader = stdin.reader(io, &stdio_buffer);
const source_code = std.zig.readSourceFileToEndAlloc(gpa, &file_reader) catch |err| {
fatal("unable to read stdin: {}", .{err});
};
@@ -124,7 +124,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
try wip_errors.addZirErrorMessages(zir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(2);
}
} else {
@@ -138,12 +138,12 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(2);
}
}
} else if (tree.errors.len != 0) {
- try std.zig.printAstErrorsToStderr(gpa, tree, "<stdin>", color);
+ std.zig.printAstErrorsToStderr(gpa, io, tree, "<stdin>", color) catch {};
process.exit(2);
}
const formatted = try tree.renderAlloc(gpa);
@@ -154,7 +154,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
process.exit(code);
}
- return fs.File.stdout().writeAll(formatted);
+ return Io.File.stdout().writeStreamingAll(io, formatted);
}
if (input_files.items.len == 0) {
@@ -162,7 +162,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
}
var stdout_buffer: [4096]u8 = undefined;
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var fmt: Fmt = .{
.gpa = gpa,
@@ -182,13 +182,13 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
// Mark any excluded files/directories as already seen,
// so that they are skipped later during actual processing
for (excluded_files.items) |file_path| {
- const stat = fs.cwd().statFile(file_path) catch |err| switch (err) {
+ const stat = Io.Dir.cwd().statFile(io, file_path, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
// On Windows, statFile does not work for directories
error.IsDir => dir: {
- var dir = try fs.cwd().openDir(file_path, .{});
- defer dir.close();
- break :dir try dir.stat();
+ var dir = try Io.Dir.cwd().openDir(io, file_path, .{});
+ defer dir.close(io);
+ break :dir try dir.stat(io);
},
else => |e| return e,
};
@@ -196,7 +196,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
}
for (input_files.items) |file_path| {
- try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path);
+ try fmtPath(&fmt, file_path, check_flag, Io.Dir.cwd(), file_path);
}
try fmt.stdout_writer.interface.flush();
if (fmt.any_error) {
@@ -204,7 +204,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
}
}
-fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) !void {
+fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: Io.Dir, sub_path: []const u8) !void {
fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) {
error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path),
else => {
@@ -219,17 +219,19 @@ fn fmtPathDir(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
- parent_dir: fs.Dir,
+ parent_dir: Io.Dir,
parent_sub_path: []const u8,
) !void {
- var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true });
- defer dir.close();
+ const io = fmt.io;
+
+ var dir = try parent_dir.openDir(io, parent_sub_path, .{ .iterate = true });
+ defer dir.close(io);
- const stat = try dir.stat();
+ const stat = try dir.stat(io);
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
var dir_it = dir.iterate();
- while (try dir_it.next()) |entry| {
+ while (try dir_it.next(io)) |entry| {
const is_dir = entry.kind == .directory;
if (mem.startsWith(u8, entry.name, ".")) continue;
@@ -242,7 +244,7 @@ fn fmtPathDir(
try fmtPathDir(fmt, full_path, check_mode, dir, entry.name);
} else {
fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| {
- std.log.err("unable to format '{s}': {s}", .{ full_path, @errorName(err) });
+ std.log.err("unable to format '{s}': {t}", .{ full_path, err });
fmt.any_error = true;
return;
};
@@ -255,22 +257,22 @@ fn fmtPathFile(
fmt: *Fmt,
file_path: []const u8,
check_mode: bool,
- dir: fs.Dir,
+ dir: Io.Dir,
sub_path: []const u8,
) !void {
const io = fmt.io;
- const source_file = try dir.openFile(sub_path, .{});
+ const source_file = try dir.openFile(io, sub_path, .{});
var file_closed = false;
- errdefer if (!file_closed) source_file.close();
+ errdefer if (!file_closed) source_file.close(io);
- const stat = try source_file.stat();
+ const stat = try source_file.stat(io);
if (stat.kind == .directory)
return error.IsDir;
var read_buffer: [1024]u8 = undefined;
- var file_reader: fs.File.Reader = source_file.reader(io, &read_buffer);
+ var file_reader: Io.File.Reader = source_file.reader(io, &read_buffer);
file_reader.size = stat.size;
const gpa = fmt.gpa;
@@ -280,7 +282,7 @@ fn fmtPathFile(
};
defer gpa.free(source_code);
- source_file.close();
+ source_file.close(io);
file_closed = true;
// Add to set after no longer possible to get error.IsDir.
@@ -296,7 +298,7 @@ fn fmtPathFile(
defer tree.deinit(gpa);
if (tree.errors.len != 0) {
- try std.zig.printAstErrorsToStderr(gpa, tree, file_path, fmt.color);
+ try std.zig.printAstErrorsToStderr(gpa, io, tree, file_path, fmt.color);
fmt.any_error = true;
return;
}
@@ -317,7 +319,7 @@ fn fmtPathFile(
try wip_errors.addZirErrorMessages(zir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, fmt.color);
+ try error_bundle.renderToStderr(io, .{}, fmt.color);
fmt.any_error = true;
}
},
@@ -332,7 +334,7 @@ fn fmtPathFile(
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, fmt.color);
+ try error_bundle.renderToStderr(io, .{}, fmt.color);
fmt.any_error = true;
}
},
@@ -353,7 +355,7 @@ fn fmtPathFile(
try fmt.stdout_writer.interface.print("{s}\n", .{file_path});
fmt.any_error = true;
} else {
- var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode, .write_buffer = &.{} });
+ var af = try dir.atomicFile(io, sub_path, .{ .permissions = stat.permissions, .write_buffer = &.{} });
defer af.deinit();
try af.file_writer.interface.writeAll(fmt.out_buffer.written());
@@ -368,7 +370,7 @@ pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(gpa);
const arena = arena_instance.allocator();
const args = try process.argsAlloc(arena);
- var threaded: std.Io.Threaded = .init(gpa);
+ var threaded: std.Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
return run(gpa, arena, io, args[1..]);
diff --git a/src/introspect.zig b/src/introspect.zig
index 8467b566c6..0a57505aeb 100644
--- a/src/introspect.zig
+++ b/src/introspect.zig
@@ -1,53 +1,55 @@
-const std = @import("std");
const builtin = @import("builtin");
+const build_options = @import("build_options");
+
+const std = @import("std");
+const Io = std.Io;
+const Dir = std.Io.Dir;
const mem = std.mem;
-const Allocator = mem.Allocator;
-const os = std.os;
-const fs = std.fs;
+const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
+
const Compilation = @import("Compilation.zig");
const Package = @import("Package.zig");
-const build_options = @import("build_options");
/// Returns the sub_path that worked, or `null` if none did.
/// The path of the returned Directory is relative to `base`.
/// The handle of the returned Directory is open.
-fn testZigInstallPrefix(base_dir: fs.Dir) ?Cache.Directory {
- const test_index_file = "std" ++ fs.path.sep_str ++ "std.zig";
+fn testZigInstallPrefix(io: Io, base_dir: Io.Dir) ?Cache.Directory {
+ const test_index_file = "std" ++ Dir.path.sep_str ++ "std.zig";
zig_dir: {
// Try lib/zig/std/std.zig
- const lib_zig = "lib" ++ fs.path.sep_str ++ "zig";
- var test_zig_dir = base_dir.openDir(lib_zig, .{}) catch break :zig_dir;
- const file = test_zig_dir.openFile(test_index_file, .{}) catch {
- test_zig_dir.close();
+ const lib_zig = "lib" ++ Dir.path.sep_str ++ "zig";
+ var test_zig_dir = base_dir.openDir(io, lib_zig, .{}) catch break :zig_dir;
+ const file = test_zig_dir.openFile(io, test_index_file, .{}) catch {
+ test_zig_dir.close(io);
break :zig_dir;
};
- file.close();
+ file.close(io);
return .{ .handle = test_zig_dir, .path = lib_zig };
}
// Try lib/std/std.zig
- var test_zig_dir = base_dir.openDir("lib", .{}) catch return null;
- const file = test_zig_dir.openFile(test_index_file, .{}) catch {
- test_zig_dir.close();
+ var test_zig_dir = base_dir.openDir(io, "lib", .{}) catch return null;
+ const file = test_zig_dir.openFile(io, test_index_file, .{}) catch {
+ test_zig_dir.close(io);
return null;
};
- file.close();
+ file.close(io);
return .{ .handle = test_zig_dir, .path = "lib" };
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
-pub fn findZigLibDir(gpa: Allocator) !Cache.Directory {
+pub fn findZigLibDir(gpa: Allocator, io: Io) !Cache.Directory {
const cwd_path = try getResolvedCwd(gpa);
defer gpa.free(cwd_path);
- const self_exe_path = try fs.selfExePathAlloc(gpa);
+ const self_exe_path = try std.process.executablePathAlloc(io, gpa);
defer gpa.free(self_exe_path);
- return findZigLibDirFromSelfExe(gpa, cwd_path, self_exe_path);
+ return findZigLibDirFromSelfExe(gpa, io, cwd_path, self_exe_path);
}
-/// Like `std.process.getCwdAlloc`, but also resolves the path with `std.fs.path.resolve`. This
+/// Like `std.process.getCwdAlloc`, but also resolves the path with `Dir.path.resolve`. This
/// means the path has no repeated separators, no "." or ".." components, and no trailing separator.
/// On WASI, "" is returned instead of ".".
pub fn getResolvedCwd(gpa: Allocator) error{
@@ -65,27 +67,28 @@ pub fn getResolvedCwd(gpa: Allocator) error{
}
const cwd = try std.process.getCwdAlloc(gpa);
defer gpa.free(cwd);
- const resolved = try fs.path.resolve(gpa, &.{cwd});
- std.debug.assert(fs.path.isAbsolute(resolved));
+ const resolved = try Dir.path.resolve(gpa, &.{cwd});
+ std.debug.assert(Dir.path.isAbsolute(resolved));
return resolved;
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
pub fn findZigLibDirFromSelfExe(
allocator: Allocator,
+ io: Io,
/// The return value of `getResolvedCwd`.
/// Passed as an argument to avoid pointlessly repeating the call.
cwd_path: []const u8,
self_exe_path: []const u8,
) error{ OutOfMemory, FileNotFound }!Cache.Directory {
- const cwd = fs.cwd();
+ const cwd = Io.Dir.cwd();
var cur_path: []const u8 = self_exe_path;
- while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
- var base_dir = cwd.openDir(dirname, .{}) catch continue;
- defer base_dir.close();
+ while (Dir.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
+ var base_dir = cwd.openDir(io, dirname, .{}) catch continue;
+ defer base_dir.close(io);
- const sub_directory = testZigInstallPrefix(base_dir) orelse continue;
- const p = try fs.path.join(allocator, &.{ dirname, sub_directory.path.? });
+ const sub_directory = testZigInstallPrefix(io, base_dir) orelse continue;
+ const p = try Dir.path.join(allocator, &.{ dirname, sub_directory.path.? });
defer allocator.free(p);
const resolved = try resolvePath(allocator, cwd_path, &.{p});
@@ -109,18 +112,18 @@ pub fn resolveGlobalCacheDir(allocator: Allocator) ![]u8 {
if (builtin.os.tag != .windows) {
if (std.zig.EnvVar.XDG_CACHE_HOME.getPosix()) |cache_root| {
if (cache_root.len > 0) {
- return fs.path.join(allocator, &.{ cache_root, appname });
+ return Dir.path.join(allocator, &.{ cache_root, appname });
}
}
if (std.zig.EnvVar.HOME.getPosix()) |home| {
- return fs.path.join(allocator, &.{ home, ".cache", appname });
+ return Dir.path.join(allocator, &.{ home, ".cache", appname });
}
}
- return fs.getAppDataDir(allocator, appname);
+ return std.fs.getAppDataDir(allocator, appname);
}
-/// Similar to `fs.path.resolve`, but converts to a cwd-relative path, or, if that would
+/// Similar to `Dir.path.resolve`, but converts to a cwd-relative path, or, if that would
/// start with a relative up-dir (".."), an absolute path based on the cwd. Also, the cwd
/// returns the empty string ("") instead of ".".
pub fn resolvePath(
@@ -132,7 +135,7 @@ pub fn resolvePath(
) Allocator.Error![]u8 {
if (builtin.target.os.tag == .wasi) {
std.debug.assert(mem.eql(u8, cwd_resolved, ""));
- const res = try fs.path.resolve(gpa, paths);
+ const res = try Dir.path.resolve(gpa, paths);
if (mem.eql(u8, res, ".")) {
gpa.free(res);
return "";
@@ -142,16 +145,16 @@ pub fn resolvePath(
// Heuristic for a fast path: if no component is absolute and ".." never appears, we just need to resolve `paths`.
for (paths) |p| {
- if (fs.path.isAbsolute(p)) break; // absolute path
+ if (Dir.path.isAbsolute(p)) break; // absolute path
if (mem.indexOf(u8, p, "..") != null) break; // may contain up-dir
} else {
// no absolute path, no "..".
- const res = try fs.path.resolve(gpa, paths);
+ const res = try Dir.path.resolve(gpa, paths);
if (mem.eql(u8, res, ".")) {
gpa.free(res);
return "";
}
- std.debug.assert(!fs.path.isAbsolute(res));
+ std.debug.assert(!Dir.path.isAbsolute(res));
std.debug.assert(!isUpDir(res));
return res;
}
@@ -160,19 +163,19 @@ pub fn resolvePath(
// Optimization: `paths` often has just one element.
const path_resolved = switch (paths.len) {
0 => unreachable,
- 1 => try fs.path.resolve(gpa, &.{ cwd_resolved, paths[0] }),
+ 1 => try Dir.path.resolve(gpa, &.{ cwd_resolved, paths[0] }),
else => r: {
const all_paths = try gpa.alloc([]const u8, paths.len + 1);
defer gpa.free(all_paths);
all_paths[0] = cwd_resolved;
@memcpy(all_paths[1..], paths);
- break :r try fs.path.resolve(gpa, all_paths);
+ break :r try Dir.path.resolve(gpa, all_paths);
},
};
errdefer gpa.free(path_resolved);
- std.debug.assert(fs.path.isAbsolute(path_resolved));
- std.debug.assert(fs.path.isAbsolute(cwd_resolved));
+ std.debug.assert(Dir.path.isAbsolute(path_resolved));
+ std.debug.assert(Dir.path.isAbsolute(cwd_resolved));
if (!std.mem.startsWith(u8, path_resolved, cwd_resolved)) return path_resolved; // not in cwd
if (path_resolved.len == cwd_resolved.len) {
@@ -180,7 +183,7 @@ pub fn resolvePath(
gpa.free(path_resolved);
return "";
}
- if (path_resolved[cwd_resolved.len] != std.fs.path.sep) return path_resolved; // not in cwd (last component differs)
+ if (path_resolved[cwd_resolved.len] != Dir.path.sep) return path_resolved; // not in cwd (last component differs)
// in cwd; extract sub path
const sub_path = try gpa.dupe(u8, path_resolved[cwd_resolved.len + 1 ..]);
@@ -188,9 +191,8 @@ pub fn resolvePath(
return sub_path;
}
-/// TODO move this to std.fs.path
pub fn isUpDir(p: []const u8) bool {
- return mem.startsWith(u8, p, "..") and (p.len == 2 or p[2] == fs.path.sep);
+ return mem.startsWith(u8, p, "..") and (p.len == 2 or p[2] == Dir.path.sep);
}
pub const default_local_zig_cache_basename = ".zig-cache";
@@ -198,15 +200,15 @@ pub const default_local_zig_cache_basename = ".zig-cache";
/// Searches upwards from `cwd` for a directory containing a `build.zig` file.
/// If such a directory is found, returns the path to it joined to the `.zig_cache` name.
/// Otherwise, returns `null`, indicating no suitable local cache location.
-pub fn resolveSuitableLocalCacheDir(arena: Allocator, cwd: []const u8) Allocator.Error!?[]u8 {
+pub fn resolveSuitableLocalCacheDir(arena: Allocator, io: Io, cwd: []const u8) Allocator.Error!?[]u8 {
var cur_dir = cwd;
while (true) {
- const joined = try fs.path.join(arena, &.{ cur_dir, Package.build_zig_basename });
- if (fs.cwd().access(joined, .{})) |_| {
- return try fs.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename });
+ const joined = try Dir.path.join(arena, &.{ cur_dir, Package.build_zig_basename });
+ if (Io.Dir.cwd().access(io, joined, .{})) |_| {
+ return try Dir.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename });
} else |err| switch (err) {
error.FileNotFound => {
- cur_dir = fs.path.dirname(cur_dir) orelse return null;
+ cur_dir = Dir.path.dirname(cur_dir) orelse return null;
continue;
},
else => return null,
diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig
index afeb5b3282..ba85f45830 100644
--- a/src/libs/freebsd.zig
+++ b/src/libs/freebsd.zig
@@ -1,9 +1,9 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const log = std.log;
-const fs = std.fs;
-const path = fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
const Path = std.Build.Cache.Path;
@@ -401,8 +401,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
- pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
@@ -444,12 +444,12 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -468,7 +468,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -477,10 +477,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
var o_directory: Cache.Directory = .{
- .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
@@ -520,7 +520,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
try map_contents.print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
}
- try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items });
map_contents.deinit();
}
@@ -974,7 +974,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "stdthreads", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
- try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items });
try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
@@ -986,7 +986,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -1014,7 +1014,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C
const so_path: Path = .{
.root_dir = so_files.dir_path.root_dir,
.sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
- so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.getSoVersion(&target.os),
+ so_files.dir_path.sub_path, path.sep, lib.name, lib.getSoVersion(&target.os),
}) catch return comp.setAllocFailure(),
};
task_buffer[task_buffer_i] = .{ .load_dso = so_path };
diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig
index 64d0fdbeac..e9b6ce1882 100644
--- a/src/libs/glibc.zig
+++ b/src/libs/glibc.zig
@@ -1,9 +1,9 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const log = std.log;
-const fs = std.fs;
-const path = fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
const Path = std.Build.Cache.Path;
@@ -640,8 +640,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
- pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
@@ -679,12 +679,12 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -703,7 +703,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -712,10 +712,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
var o_directory: Cache.Directory = .{
- .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
@@ -759,7 +759,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try map_contents.print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
}
}
- try o_directory.handle.writeFile(.{ .sub_path = all_map_basename, .data = map_contents.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = all_map_basename, .data = map_contents.items });
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
}
@@ -775,7 +775,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try stubs_asm.appendSlice(".text\n");
var sym_i: usize = 0;
- var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
+ var sym_name_buf: Io.Writer.Allocating = .init(arena);
var opt_symbol_name: ?[]const u8 = null;
var versions_buffer: [32]u8 = undefined;
var versions_len: usize = undefined;
@@ -796,7 +796,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// twice, which causes a "duplicate symbol" assembler error.
var versions_written = std.AutoArrayHashMap(Version, void).init(arena);
- var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
+ var inc_reader: Io.Reader = .fixed(metadata.inclusions);
const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
@@ -1118,7 +1118,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
- try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items });
try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
@@ -1130,7 +1130,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -1156,7 +1156,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C
const so_path: Path = .{
.root_dir = so_files.dir_path.root_dir,
.sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
- so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover,
+ so_files.dir_path.sub_path, path.sep, lib.name, lib.sover,
}) catch return comp.setAllocFailure(),
};
task_buffer[task_buffer_i] = .{ .load_dso = so_path };
diff --git a/src/libs/mingw.zig b/src/libs/mingw.zig
index b3c018996a..03ed917c4f 100644
--- a/src/libs/mingw.zig
+++ b/src/libs/mingw.zig
@@ -1,7 +1,8 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
-const path = std.fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const log = std.log.scoped(.mingw);
@@ -241,7 +242,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const def_file_path = findDef(arena, comp.getTarget(), comp.dirs.zig_lib, lib_name) catch |err| switch (err) {
+ const def_file_path = findDef(arena, io, comp.getTarget(), comp.dirs.zig_lib, lib_name) catch |err| switch (err) {
error.FileNotFound => {
log.debug("no {s}.def file available to make a DLL import {s}.lib", .{ lib_name, lib_name });
// In this case we will end up putting foo.lib onto the linker line and letting the linker
@@ -257,12 +258,12 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
cache.hash.addBytes(build_options.version);
cache.hash.addOptionalBytes(comp.dirs.zig_lib.path);
@@ -296,26 +297,32 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{});
- defer o_dir.close();
+ var o_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{});
+ defer o_dir.close(io);
const aro = @import("aro");
var diagnostics: aro.Diagnostics = .{
.output = .{ .to_list = .{ .arena = .init(gpa) } },
};
defer diagnostics.deinit();
- var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, std.fs.cwd());
+ var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, Io.Dir.cwd());
defer aro_comp.deinit();
aro_comp.target = .fromZigTarget(target.*);
const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" });
- if (comp.verbose_cc) print: {
- var stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- nosuspend stderr.print("def file: {s}\n", .{def_file_path}) catch break :print;
- nosuspend stderr.print("include dir: {s}\n", .{include_dir}) catch break :print;
+ if (comp.verbose_cc) {
+ var buffer: [256]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ w.print("def file: {s}\n", .{def_file_path}) catch |err| switch (err) {
+ error.WriteFailed => return stderr.file_writer.err.?,
+ };
+ w.print("include dir: {s}\n", .{include_dir}) catch |err| switch (err) {
+ error.WriteFailed => return stderr.file_writer.err.?,
+ };
}
try aro_comp.search_path.append(gpa, .{ .path = include_dir, .kind = .normal });
@@ -332,18 +339,21 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
if (aro_comp.diagnostics.output.to_list.messages.items.len != 0) {
var buffer: [64]u8 = undefined;
- const w, const ttyconf = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
for (aro_comp.diagnostics.output.to_list.messages.items) |msg| {
if (msg.kind == .@"fatal error" or msg.kind == .@"error") {
- msg.write(w, ttyconf, true) catch {};
+ msg.write(stderr.terminal(), true) catch |err| switch (err) {
+ error.WriteFailed => return stderr.file_writer.err.?,
+ error.Unexpected => |e| return e,
+ };
return error.AroPreprocessorFailed;
}
}
}
const members = members: {
- var aw: std.Io.Writer.Allocating = .init(gpa);
+ var aw: Io.Writer.Allocating = .init(gpa);
errdefer aw.deinit();
try pp.prettyPrintTokens(&aw.writer, .result_only);
@@ -356,8 +366,9 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
error.OutOfMemory => |e| return e,
error.ParseError => {
var buffer: [64]u8 = undefined;
- const w, _ = std.debug.lockStderrWriter(&buffer);
- defer std.debug.unlockStderrWriter();
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
try w.writeAll("error: ");
try def_diagnostics.writeMsg(w, input);
try w.writeByte('\n');
@@ -376,10 +387,10 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
errdefer gpa.free(lib_final_path);
{
- const lib_final_file = try o_dir.createFile(final_lib_basename, .{ .truncate = true });
- defer lib_final_file.close();
+ const lib_final_file = try o_dir.createFile(io, final_lib_basename, .{ .truncate = true });
+ defer lib_final_file.close(io);
var buffer: [1024]u8 = undefined;
- var file_writer = lib_final_file.writer(&buffer);
+ var file_writer = lib_final_file.writer(io, &buffer);
try implib.writeCoffArchive(gpa, &file_writer.interface, members);
try file_writer.interface.flush();
}
@@ -401,11 +412,12 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
pub fn libExists(
allocator: Allocator,
+ io: Io,
target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
) !bool {
- const s = findDef(allocator, target, zig_lib_directory, lib_name) catch |err| switch (err) {
+ const s = findDef(allocator, io, target, zig_lib_directory, lib_name) catch |err| switch (err) {
error.FileNotFound => return false,
else => |e| return e,
};
@@ -417,6 +429,7 @@ pub fn libExists(
/// see if a .def file exists.
fn findDef(
allocator: Allocator,
+ io: Io,
target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
@@ -442,7 +455,7 @@ fn findDef(
} else {
try override_path.print(fmt_path, .{ lib_path, lib_name });
}
- if (std.fs.cwd().access(override_path.items, .{})) |_| {
+ if (Io.Dir.cwd().access(io, override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
@@ -459,7 +472,7 @@ fn findDef(
} else {
try override_path.print(fmt_path, .{lib_name});
}
- if (std.fs.cwd().access(override_path.items, .{})) |_| {
+ if (Io.Dir.cwd().access(io, override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
@@ -476,7 +489,7 @@ fn findDef(
} else {
try override_path.print(fmt_path, .{lib_name});
}
- if (std.fs.cwd().access(override_path.items, .{})) |_| {
+ if (Io.Dir.cwd().access(io, override_path.items, .{})) |_| {
return override_path.toOwnedSlice();
} else |err| switch (err) {
error.FileNotFound => {},
diff --git a/src/libs/mingw/def.zig b/src/libs/mingw/def.zig
index 24dc95c13c..f1c112d16e 100644
--- a/src/libs/mingw/def.zig
+++ b/src/libs/mingw/def.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const Io = std.Io;
pub const ModuleDefinitionType = enum {
mingw,
@@ -663,7 +664,9 @@ test parse {
\\
;
- try testParse(.AMD64, source, "foo.dll", &[_]ModuleDefinition.Export{
+ const io = std.testing.io;
+
+ try testParse(io, .AMD64, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "foo",
.mangled_symbol_name = null,
@@ -743,7 +746,7 @@ test parse {
},
});
- try testParse(.I386, source, "foo.dll", &[_]ModuleDefinition.Export{
+ try testParse(io, .I386, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "_foo",
.mangled_symbol_name = null,
@@ -823,7 +826,7 @@ test parse {
},
});
- try testParse(.ARMNT, source, "foo.dll", &[_]ModuleDefinition.Export{
+ try testParse(io, .ARMNT, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "foo",
.mangled_symbol_name = null,
@@ -903,7 +906,7 @@ test parse {
},
});
- try testParse(.ARM64, source, "foo.dll", &[_]ModuleDefinition.Export{
+ try testParse(io, .ARM64, source, "foo.dll", &[_]ModuleDefinition.Export{
.{
.name = "foo",
.mangled_symbol_name = null,
@@ -997,7 +1000,9 @@ test "ntdll" {
\\RtlActivateActivationContextUnsafeFast@0
;
- try testParse(.AMD64, source, "ntdll.dll", &[_]ModuleDefinition.Export{
+ const io = std.testing.io;
+
+ try testParse(io, .AMD64, source, "ntdll.dll", &[_]ModuleDefinition.Export{
.{
.name = "RtlDispatchAPC@12",
.mangled_symbol_name = null,
@@ -1023,15 +1028,22 @@ test "ntdll" {
});
}
-fn testParse(machine_type: std.coff.IMAGE.FILE.MACHINE, source: [:0]const u8, expected_module_name: []const u8, expected_exports: []const ModuleDefinition.Export) !void {
+fn testParse(
+ io: Io,
+ machine_type: std.coff.IMAGE.FILE.MACHINE,
+ source: [:0]const u8,
+ expected_module_name: []const u8,
+ expected_exports: []const ModuleDefinition.Export,
+) !void {
var diagnostics: Diagnostics = undefined;
const module = parse(std.testing.allocator, source, machine_type, .mingw, &diagnostics) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.ParseError => {
- const stderr, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- try diagnostics.writeMsg(stderr, source);
- try stderr.writeByte('\n');
+ const stderr = try io.lockStderr(&.{}, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ try diagnostics.writeMsg(w, source);
+ try w.writeByte('\n');
return err;
},
};
diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig
index 8d35e3bd71..9e4213d237 100644
--- a/src/libs/netbsd.zig
+++ b/src/libs/netbsd.zig
@@ -1,9 +1,9 @@
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const log = std.log;
-const fs = std.fs;
-const path = fs.path;
+const path = std.Io.Dir.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
const Path = std.Build.Cache.Path;
@@ -346,8 +346,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
- pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
- self.lock.release();
+ pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
+ self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
@@ -385,12 +385,12 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var cache: Cache = .{
.gpa = gpa,
.io = io,
- .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.createDirPathOpen(io, "h", .{}),
};
- cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
+ cache.addPrefix(.{ .path = null, .handle = Io.Dir.cwd() });
cache.addPrefix(comp.dirs.zig_lib);
cache.addPrefix(comp.dirs.global_cache);
- defer cache.manifest_dir.close();
+ defer cache.manifest_dir.close(io);
var man = cache.obtain();
defer man.deinit();
@@ -409,7 +409,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -418,10 +418,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
var o_directory: Cache.Directory = .{
- .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .handle = try comp.dirs.global_cache.handle.createDirPathOpen(io, o_sub_path, .{}),
.path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
- defer o_directory.handle.close();
+ defer o_directory.handle.close(io);
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(gpa, abilists_contents);
@@ -628,7 +628,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
- try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
+ try o_directory.handle.writeFile(io, .{ .sub_path = asm_file_basename, .data = stubs_asm.items });
try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
@@ -640,7 +640,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
.lock = man.toOwnedLock(),
.dir_path = .{
.root_dir = comp.dirs.global_cache,
- .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
+ .sub_path = try gpa.dupe(u8, "o" ++ path.sep_str ++ digest),
},
});
}
@@ -661,7 +661,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) std.Io.C
const so_path: Path = .{
.root_dir = so_files.dir_path.root_dir,
.sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
- so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover,
+ so_files.dir_path.sub_path, path.sep, lib.name, lib.sover,
}) catch return comp.setAllocFailure(),
};
task_buffer[task_buffer_i] = .{ .load_dso = so_path };
diff --git a/src/link.zig b/src/link.zig
index 6ac96504c7..13306b90a4 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -393,7 +393,7 @@ pub const File = struct {
comp: *Compilation,
emit: Path,
- file: ?fs.File,
+ file: ?Io.File,
/// When using the LLVM backend, the emitted object is written to a file with this name. This
/// object file then becomes a normal link input to LLD or a self-hosted linker.
///
@@ -620,16 +620,16 @@ pub const File = struct {
emit.sub_path, std.crypto.random.int(u32),
});
defer gpa.free(tmp_sub_path);
- try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, .{});
- try emit.root_dir.handle.rename(tmp_sub_path, emit.sub_path);
+ try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, io, .{});
+ try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io);
switch (builtin.os.tag) {
.linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| {
- log.warn("ptrace failure: {s}", .{@errorName(err)});
+ log.warn("ptrace failure: {t}", .{err});
},
.maccatalyst, .macos => {
const macho_file = base.cast(.macho).?;
macho_file.ptraceAttach(pid) catch |err| {
- log.warn("attaching failed with error: {s}", .{@errorName(err)});
+ log.warn("attaching failed with error: {t}", .{err});
};
},
.windows => unreachable,
@@ -637,7 +637,7 @@ pub const File = struct {
}
}
}
- base.file = try emit.root_dir.handle.openFile(emit.sub_path, .{ .mode = .read_write });
+ base.file = try emit.root_dir.handle.openFile(io, emit.sub_path, .{ .mode = .read_write });
},
.elf2, .coff2 => if (base.file == null) {
const mf = if (base.cast(.elf2)) |elf|
@@ -646,10 +646,10 @@ pub const File = struct {
&coff.mf
else
unreachable;
- mf.file = try base.emit.root_dir.handle.adaptToNewApi().openFile(io, base.emit.sub_path, .{
+ mf.file = try base.emit.root_dir.handle.openFile(io, base.emit.sub_path, .{
.mode = .read_write,
});
- base.file = .adaptFromNewApi(mf.file);
+ base.file = mf.file;
try mf.ensureTotalCapacity(@intCast(mf.nodes.items[0].location().resolve(mf)[1]));
},
.c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }),
@@ -687,7 +687,7 @@ pub const File = struct {
.lld => assert(base.file == null),
.elf => if (base.file) |f| {
dev.check(.elf_linker);
- f.close();
+ f.close(io);
base.file = null;
if (base.child_pid) |pid| {
@@ -701,7 +701,7 @@ pub const File = struct {
},
.macho, .wasm => if (base.file) |f| {
dev.checkAny(&.{ .coff_linker, .macho_linker, .plan9_linker, .wasm_linker });
- f.close();
+ f.close(io);
base.file = null;
if (base.child_pid) |pid| {
@@ -852,10 +852,12 @@ pub const File = struct {
}
}
- pub fn releaseLock(self: *File) void {
- if (self.lock) |*lock| {
- lock.release();
- self.lock = null;
+ pub fn releaseLock(base: *File) void {
+ const comp = base.comp;
+ const io = comp.io;
+ if (base.lock) |*lock| {
+ lock.release(io);
+ base.lock = null;
}
}
@@ -866,8 +868,9 @@ pub const File = struct {
}
pub fn destroy(base: *File) void {
+ const io = base.comp.io;
base.releaseLock();
- if (base.file) |f| f.close();
+ if (base.file) |f| f.close(io);
switch (base.tag) {
.plan9 => unreachable,
inline else => |tag| {
@@ -897,16 +900,16 @@ pub const File = struct {
}
}
- pub const FlushError = error{
+ pub const FlushError = Io.Cancelable || Allocator.Error || error{
/// Indicates an error will be present in `Compilation.link_diags`.
LinkFailure,
- OutOfMemory,
};
/// Commit pending changes and write headers. Takes into account final output mode.
/// `arena` has the lifetime of the call to `Compilation.update`.
pub fn flush(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
const comp = base.comp;
+ const io = comp.io;
if (comp.clang_preprocessor_mode == .yes or comp.clang_preprocessor_mode == .pch) {
dev.check(.clang_command);
const emit = base.emit;
@@ -917,12 +920,19 @@ pub const File = struct {
assert(comp.c_object_table.count() == 1);
const the_key = comp.c_object_table.keys()[0];
const cached_pp_file_path = the_key.status.success.object_path;
- cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}) catch |err| {
+ Io.Dir.copyFile(
+ cached_pp_file_path.root_dir.handle,
+ cached_pp_file_path.sub_path,
+ emit.root_dir.handle,
+ emit.sub_path,
+ io,
+ .{},
+ ) catch |err| {
const diags = &base.comp.link_diags;
- return diags.fail("failed to copy '{f}' to '{f}': {s}", .{
+ return diags.fail("failed to copy '{f}' to '{f}': {t}", .{
std.fmt.alt(@as(Path, cached_pp_file_path), .formatEscapeChar),
std.fmt.alt(@as(Path, emit), .formatEscapeChar),
- @errorName(err),
+ err,
});
};
return;
@@ -1060,9 +1070,10 @@ pub const File = struct {
/// Opens a path as an object file and parses it into the linker.
fn openLoadObject(base: *File, path: Path) anyerror!void {
if (base.tag == .lld) return;
+ const io = base.comp.io;
const diags = &base.comp.link_diags;
- const input = try openObjectInput(diags, path);
- errdefer input.object.file.close();
+ const input = try openObjectInput(io, diags, path);
+ errdefer input.object.file.close(io);
try loadInput(base, input);
}
@@ -1070,21 +1081,22 @@ pub const File = struct {
/// If `query` is non-null, allows GNU ld scripts.
fn openLoadArchive(base: *File, path: Path, opt_query: ?UnresolvedInput.Query) anyerror!void {
if (base.tag == .lld) return;
+ const io = base.comp.io;
if (opt_query) |query| {
- const archive = try openObject(path, query.must_link, query.hidden);
- errdefer archive.file.close();
+ const archive = try openObject(io, path, query.must_link, query.hidden);
+ errdefer archive.file.close(io);
loadInput(base, .{ .archive = archive }) catch |err| switch (err) {
error.BadMagic, error.UnexpectedEndOfFile => {
if (base.tag != .elf and base.tag != .elf2) return err;
try loadGnuLdScript(base, path, query, archive.file);
- archive.file.close();
+ archive.file.close(io);
return;
},
else => return err,
};
} else {
- const archive = try openObject(path, false, false);
- errdefer archive.file.close();
+ const archive = try openObject(io, path, false, false);
+ errdefer archive.file.close(io);
try loadInput(base, .{ .archive = archive });
}
}
@@ -1093,29 +1105,30 @@ pub const File = struct {
/// Handles GNU ld scripts.
fn openLoadDso(base: *File, path: Path, query: UnresolvedInput.Query) anyerror!void {
if (base.tag == .lld) return;
- const dso = try openDso(path, query.needed, query.weak, query.reexport);
- errdefer dso.file.close();
+ const io = base.comp.io;
+ const dso = try openDso(io, path, query.needed, query.weak, query.reexport);
+ errdefer dso.file.close(io);
loadInput(base, .{ .dso = dso }) catch |err| switch (err) {
error.BadMagic, error.UnexpectedEndOfFile => {
if (base.tag != .elf and base.tag != .elf2) return err;
try loadGnuLdScript(base, path, query, dso.file);
- dso.file.close();
+ dso.file.close(io);
return;
},
else => return err,
};
}
- fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: fs.File) anyerror!void {
+ fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: Io.File) anyerror!void {
const comp = base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
const gpa = comp.gpa;
- const io = comp.io;
- const stat = try file.stat();
+ const stat = try file.stat(io);
const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig;
const buf = try gpa.alloc(u8, size);
defer gpa.free(buf);
- const n = try file.preadAll(buf, 0);
+ const n = try file.readPositionalAll(io, buf, 0);
if (buf.len != n) return error.UnexpectedEndOfFile;
var ld_script = try LdScript.parse(gpa, diags, path, buf);
defer ld_script.deinit(gpa);
@@ -1180,6 +1193,32 @@ pub const File = struct {
}
}
+ /// Legacy function for old linker code
+ pub fn copyRangeAll(base: *File, old_offset: u64, new_offset: u64, size: u64) !void {
+ const comp = base.comp;
+ const io = comp.io;
+ const file = base.file.?;
+ return copyRangeAll2(io, file, file, old_offset, new_offset, size);
+ }
+
+ /// Legacy function for old linker code
+ pub fn copyRangeAll2(io: Io, src_file: Io.File, dst_file: Io.File, old_offset: u64, new_offset: u64, size: u64) !void {
+ var write_buffer: [2048]u8 = undefined;
+ var file_reader = src_file.reader(io, &.{});
+ file_reader.pos = old_offset;
+ var file_writer = dst_file.writer(io, &write_buffer);
+ file_writer.pos = new_offset;
+ const size_u = std.math.cast(usize, size) orelse return error.Overflow;
+ const n = file_writer.interface.sendFileAll(&file_reader, .limited(size_u)) catch |err| switch (err) {
+ error.ReadFailed => return file_reader.err.?,
+ error.WriteFailed => return file_writer.err.?,
+ };
+ assert(n == size_u);
+ file_writer.interface.flush() catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ };
+ }
+
pub const Tag = enum {
coff2,
elf,
@@ -1231,22 +1270,26 @@ pub const File = struct {
ty: InternPool.Index,
};
- pub fn determineMode(
+ pub fn determinePermissions(
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
- ) fs.File.Mode {
+ ) Io.File.Permissions {
// On common systems with a 0o022 umask, 0o777 will still result in a file created
// with 0o755 permissions, but it works appropriately if the system is configured
// more leniently. As another data point, C's fopen seems to open files with the
// 666 mode.
- const executable_mode = if (builtin.target.os.tag == .windows) 0 else 0o777;
+ const executable_mode: Io.File.Permissions = if (builtin.target.os.tag == .windows)
+ .default_file
+ else
+ .fromMode(0o777);
+
switch (output_mode) {
.Lib => return switch (link_mode) {
.dynamic => executable_mode,
- .static => fs.File.default_mode,
+ .static => .default_file,
},
.Exe => return executable_mode,
- .Obj => return fs.File.default_mode,
+ .Obj => return .default_file,
}
}
@@ -1656,19 +1699,19 @@ pub const Input = union(enum) {
pub const Object = struct {
path: Path,
- file: fs.File,
+ file: Io.File,
must_link: bool,
hidden: bool,
};
pub const Res = struct {
path: Path,
- file: fs.File,
+ file: Io.File,
};
pub const Dso = struct {
path: Path,
- file: fs.File,
+ file: Io.File,
needed: bool,
weak: bool,
reexport: bool,
@@ -1690,7 +1733,7 @@ pub const Input = union(enum) {
}
/// Returns `null` in the case of `dso_exact`.
- pub fn pathAndFile(input: Input) ?struct { Path, fs.File } {
+ pub fn pathAndFile(input: Input) ?struct { Path, Io.File } {
return switch (input) {
.object, .archive => |obj| .{ obj.path, obj.file },
inline .res, .dso => |x| .{ x.path, x.file },
@@ -1735,6 +1778,7 @@ pub fn hashInputs(man: *Cache.Manifest, link_inputs: []const Input) !void {
pub fn resolveInputs(
gpa: Allocator,
arena: Allocator,
+ io: Io,
target: *const std.Target,
/// This function mutates this array but does not take ownership.
/// Allocated with `gpa`.
@@ -1784,6 +1828,7 @@ pub fn resolveInputs(
for (lib_directories) |lib_directory| switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1810,6 +1855,7 @@ pub fn resolveInputs(
for (lib_directories) |lib_directory| switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1837,6 +1883,7 @@ pub fn resolveInputs(
switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1855,6 +1902,7 @@ pub fn resolveInputs(
switch (try resolveLibInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&checked_paths,
@@ -1886,6 +1934,7 @@ pub fn resolveInputs(
if (try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1903,6 +1952,7 @@ pub fn resolveInputs(
switch ((try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1930,6 +1980,7 @@ pub fn resolveInputs(
if (try resolvePathInput(
gpa,
arena,
+ io,
unresolved_inputs,
resolved_inputs,
&ld_script_bytes,
@@ -1969,6 +2020,7 @@ const fatal = std.process.fatal;
fn resolveLibInput(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated via `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated via `gpa`.
@@ -1994,11 +2046,11 @@ fn resolveLibInput(
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.tbd", .{lib_name}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :tbd,
else => |e| fatal("unable to search for tbd library '{f}': {s}", .{ test_path, @errorName(e) }),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2013,7 +2065,7 @@ fn resolveLibInput(
}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- switch (try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
+ switch (try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, .{
.path = test_path,
.query = name_query.query,
}, link_mode, color)) {
@@ -2030,13 +2082,13 @@ fn resolveLibInput(
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.so", .{lib_name}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :so,
else => |e| fatal("unable to search for so library '{f}': {s}", .{
test_path, @errorName(e),
}),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2048,11 +2100,11 @@ fn resolveLibInput(
.sub_path = try std.fmt.allocPrint(arena, "lib{s}.a", .{lib_name}),
};
try checked_paths.print(gpa, "\n {f}", .{test_path});
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :mingw,
else => |e| fatal("unable to search for static library '{f}': {s}", .{ test_path, @errorName(e) }),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, name_query.query);
}
@@ -2062,7 +2114,7 @@ fn resolveLibInput(
fn finishResolveLibInput(
resolved_inputs: *std.ArrayList(Input),
path: Path,
- file: std.fs.File,
+ file: Io.File,
link_mode: std.builtin.LinkMode,
query: UnresolvedInput.Query,
) ResolveLibInputResult {
@@ -2087,6 +2139,7 @@ fn finishResolveLibInput(
fn resolvePathInput(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated with `gpa`.
@@ -2098,12 +2151,12 @@ fn resolvePathInput(
color: std.zig.Color,
) Allocator.Error!?ResolveLibInputResult {
switch (Compilation.classifyFileExt(pq.path.sub_path)) {
- .static_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color),
- .shared_library => return try resolvePathInputLib(gpa, arena, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
+ .static_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .static, color),
+ .shared_library => return try resolvePathInputLib(gpa, arena, io, unresolved_inputs, resolved_inputs, ld_script_bytes, target, pq, .dynamic, color),
.object => {
- var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
+ var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err|
fatal("failed to open object {f}: {s}", .{ pq.path, @errorName(err) });
- errdefer file.close();
+ errdefer file.close(io);
try resolved_inputs.append(gpa, .{ .object = .{
.path = pq.path,
.file = file,
@@ -2113,9 +2166,9 @@ fn resolvePathInput(
return null;
},
.res => {
- var file = pq.path.root_dir.handle.openFile(pq.path.sub_path, .{}) catch |err|
+ var file = pq.path.root_dir.handle.openFile(io, pq.path.sub_path, .{}) catch |err|
fatal("failed to open windows resource {f}: {s}", .{ pq.path, @errorName(err) });
- errdefer file.close();
+ errdefer file.close(io);
try resolved_inputs.append(gpa, .{ .res = .{
.path = pq.path,
.file = file,
@@ -2129,6 +2182,7 @@ fn resolvePathInput(
fn resolvePathInputLib(
gpa: Allocator,
arena: Allocator,
+ io: Io,
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayList(UnresolvedInput),
/// Allocated with `gpa`.
@@ -2149,30 +2203,29 @@ fn resolvePathInputLib(
.static_library, .shared_library => true,
else => false,
}) {
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => return .no_match,
- else => |e| fatal("unable to search for {s} library '{f}': {s}", .{
- @tagName(link_mode), std.fmt.alt(test_path, .formatEscapeChar), @errorName(e),
+ else => |e| fatal("unable to search for {t} library '{f}': {t}", .{
+ link_mode, std.fmt.alt(test_path, .formatEscapeChar), e,
}),
};
- errdefer file.close();
+ errdefer file.close(io);
try ld_script_bytes.resize(gpa, @max(std.elf.MAGIC.len, std.elf.ARMAG.len));
- const n = file.preadAll(ld_script_bytes.items, 0) catch |err| fatal("failed to read '{f}': {s}", .{
- std.fmt.alt(test_path, .formatEscapeChar), @errorName(err),
- });
+ const n = file.readPositionalAll(io, ld_script_bytes.items, 0) catch |err|
+ fatal("failed to read '{f}': {t}", .{ std.fmt.alt(test_path, .formatEscapeChar), err });
const buf = ld_script_bytes.items[0..n];
if (mem.startsWith(u8, buf, std.elf.MAGIC) or mem.startsWith(u8, buf, std.elf.ARMAG)) {
// Appears to be an ELF or archive file.
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
}
- const stat = file.stat() catch |err|
- fatal("failed to stat {f}: {s}", .{ test_path, @errorName(err) });
+ const stat = file.stat(io) catch |err|
+ fatal("failed to stat {f}: {t}", .{ test_path, err });
const size = std.math.cast(u32, stat.size) orelse
fatal("{f}: linker script too big", .{test_path});
try ld_script_bytes.resize(gpa, size);
const buf2 = ld_script_bytes.items[n..];
- const n2 = file.preadAll(buf2, n) catch |err|
- fatal("failed to read {f}: {s}", .{ test_path, @errorName(err) });
+ const n2 = file.readPositionalAll(io, buf2, n) catch |err|
+ fatal("failed to read {f}: {t}", .{ test_path, err });
if (n2 != buf2.len) fatal("failed to read {f}: unexpected end of file", .{test_path});
// This `Io` is only used for a mutex, and we know we aren't doing anything async/concurrent.
@@ -2192,13 +2245,12 @@ fn resolvePathInputLib(
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, color);
-
+ error_bundle.renderToStderr(io, .{}, color) catch {};
std.process.exit(1);
}
var ld_script = ld_script_result catch |err|
- fatal("{f}: failed to parse linker script: {s}", .{ test_path, @errorName(err) });
+ fatal("{f}: failed to parse linker script: {t}", .{ test_path, err });
defer ld_script.deinit(gpa);
try unresolved_inputs.ensureUnusedCapacity(gpa, ld_script.args.len);
@@ -2223,23 +2275,23 @@ fn resolvePathInputLib(
} });
}
}
- file.close();
+ file.close(io);
return .ok;
}
- var file = test_path.root_dir.handle.openFile(test_path.sub_path, .{}) catch |err| switch (err) {
+ var file = test_path.root_dir.handle.openFile(io, test_path.sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => return .no_match,
else => |e| fatal("unable to search for {s} library {f}: {s}", .{
@tagName(link_mode), test_path, @errorName(e),
}),
};
- errdefer file.close();
+ errdefer file.close(io);
return finishResolveLibInput(resolved_inputs, test_path, file, link_mode, pq.query);
}
-pub fn openObject(path: Path, must_link: bool, hidden: bool) !Input.Object {
- var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- errdefer file.close();
+pub fn openObject(io: Io, path: Path, must_link: bool, hidden: bool) !Input.Object {
+ var file = try path.root_dir.handle.openFile(io, path.sub_path, .{});
+ errdefer file.close(io);
return .{
.path = path,
.file = file,
@@ -2248,9 +2300,9 @@ pub fn openObject(path: Path, must_link: bool, hidden: bool) !Input.Object {
};
}
-pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso {
- var file = try path.root_dir.handle.openFile(path.sub_path, .{});
- errdefer file.close();
+pub fn openDso(io: Io, path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso {
+ var file = try path.root_dir.handle.openFile(io, path.sub_path, .{});
+ errdefer file.close(io);
return .{
.path = path,
.file = file,
@@ -2260,20 +2312,20 @@ pub fn openDso(path: Path, needed: bool, weak: bool, reexport: bool) !Input.Dso
};
}
-pub fn openObjectInput(diags: *Diags, path: Path) error{LinkFailure}!Input {
- return .{ .object = openObject(path, false, false) catch |err| {
+pub fn openObjectInput(io: Io, diags: *Diags, path: Path) error{LinkFailure}!Input {
+ return .{ .object = openObject(io, path, false, false) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
-pub fn openArchiveInput(diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
- return .{ .archive = openObject(path, must_link, hidden) catch |err| {
+pub fn openArchiveInput(io: Io, diags: *Diags, path: Path, must_link: bool, hidden: bool) error{LinkFailure}!Input {
+ return .{ .archive = openObject(io, path, must_link, hidden) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
-pub fn openDsoInput(diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
- return .{ .dso = openDso(path, needed, weak, reexport) catch |err| {
+pub fn openDsoInput(io: Io, diags: *Diags, path: Path, needed: bool, weak: bool, reexport: bool) error{LinkFailure}!Input {
+ return .{ .dso = openDso(io, path, needed, weak, reexport) catch |err| {
return diags.failParse(path, "failed to open {f}: {s}", .{ path, @errorName(err) });
} };
}
diff --git a/src/link/C.zig b/src/link/C.zig
index ce48e85851..93e771ebfc 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -124,6 +124,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*C {
+ const io = comp.io;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .c);
const optimize_mode = comp.root_mod.optimize_mode;
@@ -135,11 +136,11 @@ pub fn createEmpty(
assert(!use_lld);
assert(!use_llvm);
- const file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ const file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
// Truncation is done on `flush`.
.truncate = false,
});
- errdefer file.close();
+ errdefer file.close(io);
const c_file = try arena.create(C);
@@ -370,6 +371,7 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P
const comp = self.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
+ const io = comp.io;
const zcu = self.base.comp.zcu.?;
const ip = &zcu.intern_pool;
const pt: Zcu.PerThread = .activate(zcu, tid);
@@ -507,8 +509,8 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P
}, self.getString(av_block.code));
const file = self.base.file.?;
- file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)});
- var fw = file.writer(&.{});
+ file.setLength(io, f.file_size) catch |err| return diags.fail("failed to allocate file: {t}", .{err});
+ var fw = file.writer(io, &.{});
var w = &fw.interface;
w.writeVecAll(f.all_buffers.items) catch |err| switch (err) {
error.WriteFailed => return diags.fail("failed to write to '{f}': {s}", .{
@@ -763,6 +765,7 @@ pub fn flushEmitH(zcu: *Zcu) !void {
if (true) return; // emit-h is regressed
const emit_h = zcu.emit_h orelse return;
+ const io = zcu.comp.io;
// We collect a list of buffers to write, and write them all at once with pwritev 😎
const num_buffers = emit_h.decl_table.count() + 1;
@@ -790,14 +793,14 @@ pub fn flushEmitH(zcu: *Zcu) !void {
}
const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory;
- const file = try directory.handle.createFile(emit_h.loc.basename, .{
+ const file = try directory.handle.createFile(io, emit_h.loc.basename, .{
// We set the end position explicitly below; by not truncating the file, we possibly
// make it easier on the file system by doing 1 reallocation instead of two.
.truncate = false,
});
- defer file.close();
+ defer file.close(io);
- try file.setEndPos(file_size);
+ try file.setLength(io, file_size);
try file.pwritevAll(all_buffers.items, 0);
}
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index f33e0ccdea..03b757f5b4 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1,3 +1,23 @@
+const Coff = @This();
+
+const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+
+const codegen = @import("../codegen.zig");
+const Compilation = @import("../Compilation.zig");
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const MappedFile = @import("MappedFile.zig");
+const target_util = @import("../target.zig");
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
+
base: link.File,
mf: MappedFile,
nodes: std.MultiArrayList(Node),
@@ -631,12 +651,14 @@ fn create(
else => return error.UnsupportedCOFFArchitecture,
};
+ const io = comp.io;
+
const coff = try arena.create(Coff);
- const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{
+ const file = try path.root_dir.handle.createFile(io, path.sub_path, .{
.read = true,
- .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
+ .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode),
});
- errdefer file.close(comp.io);
+ errdefer file.close(io);
coff.* = .{
.base = .{
.tag = .coff2,
@@ -644,14 +666,14 @@ fn create(
.comp = comp,
.emit = path,
- .file = .adaptFromNewApi(file),
+ .file = file,
.gc_sections = false,
.print_gc_sections = false,
.build_id = .none,
.allow_shlib_undefined = false,
.stack_size = 0,
},
- .mf = try .init(file, comp.gpa),
+ .mf = try .init(file, comp.gpa, io),
.nodes = .empty,
.import_table = .{
.ni = .none,
@@ -1727,22 +1749,20 @@ pub fn flush(
const comp = coff.base.comp;
if (comp.compiler_rt_dyn_lib) |crt_file| {
const gpa = comp.gpa;
+ const io = comp.io;
const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
std.fs.path.basename(crt_file.full_object_path.sub_path),
});
defer gpa.free(compiler_rt_sub_path);
- crt_file.full_object_path.root_dir.handle.copyFile(
+ std.Io.Dir.copyFile(
+ crt_file.full_object_path.root_dir.handle,
crt_file.full_object_path.sub_path,
coff.base.emit.root_dir.handle,
compiler_rt_sub_path,
+ io,
.{},
- ) catch |err| switch (err) {
- else => |e| return comp.link_diags.fail("Copy '{s}' failed: {s}", .{
- compiler_rt_sub_path,
- @errorName(e),
- }),
- };
+ ) catch |err| return comp.link_diags.fail("copy '{s}' failed: {t}", .{ compiler_rt_sub_path, err });
}
}
@@ -2358,10 +2378,16 @@ pub fn deleteExport(coff: *Coff, exported: Zcu.Exported, name: InternPool.NullTe
_ = name;
}
-pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) void {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- coff.printNode(tid, w, .root, 0) catch {};
+pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) Io.Cancelable!void {
+ const comp = coff.base.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.unlockStderr();
+ const w = &stderr.file_writer.interface;
+ coff.printNode(tid, w, .root, 0) catch |err| switch (err) {
+ error.WriteFailed => return stderr.err.?,
+ };
}
pub fn printNode(
@@ -2459,19 +2485,3 @@ pub fn printNode(
}
}
}
-
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const codegen = @import("../codegen.zig");
-const Compilation = @import("../Compilation.zig");
-const Coff = @This();
-const InternPool = @import("../InternPool.zig");
-const link = @import("../link.zig");
-const log = std.log.scoped(.link);
-const MappedFile = @import("MappedFile.zig");
-const native_endian = builtin.cpu.arch.endian();
-const std = @import("std");
-const target_util = @import("../target.zig");
-const Type = @import("../Type.zig");
-const Value = @import("../Value.zig");
-const Zcu = @import("../Zcu.zig");
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 95f4ca8bbd..0fda09e385 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -1,3 +1,24 @@
+const Dwarf = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const Allocator = std.mem.Allocator;
+const DW = std.dwarf;
+const Zir = std.zig.Zir;
+const assert = std.debug.assert;
+const log = std.log.scoped(.dwarf);
+const Writer = std.Io.Writer;
+
+const InternPool = @import("../InternPool.zig");
+const Module = @import("../Package.zig").Module;
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
+const codegen = @import("../codegen.zig");
+const dev = @import("../dev.zig");
+const link = @import("../link.zig");
+const target_info = @import("../target.zig");
+
gpa: Allocator,
bin_file: *link.File,
format: DW.Format,
@@ -27,18 +48,18 @@ pub const UpdateError = error{
EndOfStream,
Underflow,
UnexpectedEndOfFile,
+ NonResizable,
} ||
codegen.GenerateSymbolError ||
- std.fs.File.OpenError ||
- std.fs.File.SetEndPosError ||
- std.fs.File.CopyRangeError ||
- std.fs.File.PReadError ||
- std.fs.File.PWriteError;
+ Io.File.OpenError ||
+ Io.File.LengthError ||
+ Io.File.ReadPositionalError ||
+ Io.File.WritePositionalError;
pub const FlushError = UpdateError;
pub const RelocError =
- std.fs.File.PWriteError;
+ Io.File.PWriteError;
pub const AddressSize = enum(u8) {
@"32" = 4,
@@ -135,11 +156,14 @@ const DebugInfo = struct {
fn declAbbrevCode(debug_info: *DebugInfo, unit: Unit.Index, entry: Entry.Index) !AbbrevCode {
const dwarf: *Dwarf = @fieldParentPtr("debug_info", debug_info);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const unit_ptr = debug_info.section.getUnit(unit);
const entry_ptr = unit_ptr.getEntry(entry);
if (entry_ptr.len < AbbrevCode.decl_bytes) return .null;
var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined;
- if (try dwarf.getFile().?.preadAll(
+ if (try dwarf.getFile().?.readPositionalAll(
+ io,
&abbrev_code_buf,
debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
) != abbrev_code_buf.len) return error.InputOutput;
@@ -619,13 +643,10 @@ const Unit = struct {
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
- const n = try dwarf.getFile().?.copyRangeAll(
- sec.off(dwarf) + unit.off,
- dwarf.getFile().?,
- sec.off(dwarf) + new_off,
- unit.len,
- );
- if (n != unit.len) return error.InputOutput;
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
+ const file = dwarf.getFile().?;
+ try link.File.copyRangeAll2(io, file, file, sec.off(dwarf) + unit.off, sec.off(dwarf) + new_off, unit.len);
unit.off = new_off;
}
@@ -655,10 +676,14 @@ const Unit = struct {
fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == unit.header_len);
- try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
+ try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off);
}
fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const start = unit.off + unit.header_len + if (unit.last.unwrap()) |last_entry| end: {
const last_entry_ptr = unit.getEntry(last_entry);
break :end last_entry_ptr.off + last_entry_ptr.len;
@@ -688,7 +713,7 @@ const Unit = struct {
assert(fw.end == extended_op_bytes + op_len_bytes);
fw.writeByte(DW.LNE.padding) catch unreachable;
assert(fw.end >= unit.trailer_len and fw.end <= len);
- return dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + start);
+ return dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + start);
}
var trailer_aw: Writer.Allocating = try .initCapacity(dwarf.gpa, len);
defer trailer_aw.deinit();
@@ -748,7 +773,7 @@ const Unit = struct {
assert(tw.end == unit.trailer_len);
tw.splatByteAll(fill_byte, len - unit.trailer_len) catch unreachable;
assert(tw.end == len);
- try dwarf.getFile().?.pwriteAll(trailer_aw.written(), sec.off(dwarf) + start);
+ try dwarf.getFile().?.writePositionalAll(io, trailer_aw.written(), sec.off(dwarf) + start);
}
fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
@@ -834,6 +859,8 @@ const Entry = struct {
dwarf: *Dwarf,
) (UpdateError || Writer.Error)!void {
assert(entry.len > 0);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const start = entry.off + entry.len;
if (sec == &dwarf.debug_frame.section) {
const len = if (entry.next.unwrap()) |next_entry|
@@ -843,11 +870,11 @@ const Entry = struct {
var unit_len_buf: [8]u8 = undefined;
const unit_len_bytes = unit_len_buf[0..dwarf.sectionOffsetBytes()];
dwarf.writeInt(unit_len_bytes, len - dwarf.unitLengthBytes());
- try dwarf.getFile().?.pwriteAll(unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off);
+ try dwarf.getFile().?.writePositionalAll(io, unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off);
const buf = try dwarf.gpa.alloc(u8, len - entry.len);
defer dwarf.gpa.free(buf);
@memset(buf, DW.CFA.nop);
- try dwarf.getFile().?.pwriteAll(buf, sec.off(dwarf) + unit.off + unit.header_len + start);
+ try dwarf.getFile().?.writePositionalAll(io, buf, sec.off(dwarf) + unit.off + unit.header_len + start);
return;
}
const len = unit.getEntry(entry.next.unwrap() orelse return).off - start;
@@ -906,7 +933,7 @@ const Entry = struct {
},
} else assert(!sec.pad_entries_to_ideal and len == 0);
assert(fw.end <= len);
- try dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start);
+ try dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start);
}
fn resize(
@@ -949,11 +976,13 @@ const Entry = struct {
fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == entry_ptr.len);
- try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
+ try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
if (false) {
const buf = try dwarf.gpa.alloc(u8, sec.len);
defer dwarf.gpa.free(buf);
- _ = try dwarf.getFile().?.preadAll(buf, sec.off(dwarf));
+ _ = try dwarf.getFile().?.readPositionalAll(io, buf, sec.off(dwarf));
log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(sec.first),
@intFromEnum(sec.last),
@@ -4682,6 +4711,8 @@ fn updateContainerTypeWriterError(
}
pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedInst.Index) UpdateError!void {
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const inst_info = zir_index.resolveFull(ip).?;
@@ -4701,7 +4732,7 @@ pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedI
const unit = dwarf.debug_info.section.getUnit(dwarf.getUnitIfExists(file.mod.?) orelse return);
const entry = unit.getEntry(dwarf.decls.get(zir_index) orelse return);
- try dwarf.getFile().?.pwriteAll(&line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
+ try dwarf.getFile().?.writePositionalAll(io, &line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
}
pub fn freeNav(dwarf: *Dwarf, nav_index: InternPool.Nav.Index) void {
@@ -4738,6 +4769,8 @@ pub fn flush(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Error)!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
{
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, .anyerror_type);
@@ -4957,7 +4990,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro
if (dwarf.debug_str.section.dirty) {
const contents = dwarf.debug_str.contents.items;
try dwarf.debug_str.section.resize(dwarf, contents.len);
- try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off(dwarf));
+ try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_str.section.off(dwarf));
dwarf.debug_str.section.dirty = false;
}
if (dwarf.debug_line.section.dirty) {
@@ -5069,7 +5102,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro
if (dwarf.debug_line_str.section.dirty) {
const contents = dwarf.debug_line_str.contents.items;
try dwarf.debug_line_str.section.resize(dwarf, contents.len);
- try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off(dwarf));
+ try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_line_str.section.off(dwarf));
dwarf.debug_line_str.section.dirty = false;
}
if (dwarf.debug_loclists.section.dirty) {
@@ -6350,7 +6383,7 @@ const AbbrevCode = enum {
});
};
-fn getFile(dwarf: *Dwarf) ?std.fs.File {
+fn getFile(dwarf: *Dwarf) ?Io.File {
if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| return d_sym.file;
return dwarf.bin_file.file;
}
@@ -6391,9 +6424,11 @@ fn writeInt(dwarf: *Dwarf, buf: []u8, int: u64) void {
}
fn resolveReloc(dwarf: *Dwarf, source: u64, target: u64, size: u32) RelocError!void {
+ const comp = dwarf.bin_file.comp;
+ const io = comp.io;
var buf: [8]u8 = undefined;
dwarf.writeInt(buf[0..size], target);
- try dwarf.getFile().?.pwriteAll(buf[0..size], source);
+ try dwarf.getFile().?.writePositionalAll(io, buf[0..size], source);
}
fn unitLengthBytes(dwarf: *Dwarf) u32 {
@@ -6429,21 +6464,3 @@ const force_incremental = false;
inline fn incremental(dwarf: Dwarf) bool {
return force_incremental or dwarf.bin_file.comp.config.incremental;
}
-
-const Allocator = std.mem.Allocator;
-const DW = std.dwarf;
-const Dwarf = @This();
-const InternPool = @import("../InternPool.zig");
-const Module = @import("../Package.zig").Module;
-const Type = @import("../Type.zig");
-const Value = @import("../Value.zig");
-const Zcu = @import("../Zcu.zig");
-const Zir = std.zig.Zir;
-const assert = std.debug.assert;
-const codegen = @import("../codegen.zig");
-const dev = @import("../dev.zig");
-const link = @import("../link.zig");
-const log = std.log.scoped(.dwarf);
-const std = @import("std");
-const target_info = @import("../target.zig");
-const Writer = std.Io.Writer;
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 69acbe034b..85f37f88ce 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -313,12 +313,14 @@ pub fn createEmpty(
const is_obj = output_mode == .Obj;
const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static);
+ const io = comp.io;
+
// What path should this ELF linker code output to?
const sub_path = emit.sub_path;
- self.base.file = try emit.root_dir.handle.createFile(sub_path, .{
+ self.base.file = try emit.root_dir.handle.createFile(io, sub_path, .{
.truncate = true,
.read = true,
- .mode = link.File.determineMode(output_mode, link_mode),
+ .permissions = link.File.determinePermissions(output_mode, link_mode),
});
const gpa = comp.gpa;
@@ -406,10 +408,12 @@ pub fn open(
}
pub fn deinit(self: *Elf) void {
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
for (self.file_handles.items) |fh| {
- fh.close();
+ fh.close(io);
}
self.file_handles.deinit(gpa);
@@ -483,6 +487,8 @@ pub fn getUavVAddr(self: *Elf, uav: InternPool.Index, reloc_info: link.File.Relo
/// Returns end pos of collision, if any.
fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
+ const comp = self.base.comp;
+ const io = comp.io;
const small_ptr = self.ptr_width == .p32;
const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
if (start < ehdr_size)
@@ -522,7 +528,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.base.file.?.setEndPos(end);
+ if (at_end) try self.base.file.?.setLength(io, end);
return null;
}
@@ -552,6 +558,8 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
}
pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void {
+ const comp = self.base.comp;
+ const io = comp.io;
const shdr = &self.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
@@ -574,18 +582,11 @@ pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment:
new_offset,
});
- const amt = try self.base.file.?.copyRangeAll(
- shdr.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- // TODO figure out what to about this error condition - how to communicate it up.
- if (amt != existing_size) return error.InputOutput;
+ try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
shdr.sh_offset = new_offset;
} else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
+ try self.base.file.?.setLength(io, shdr.sh_offset + needed_size);
}
}
@@ -737,8 +738,8 @@ pub fn loadInput(self: *Elf, input: link.Input) !void {
.res => unreachable,
.dso_exact => @panic("TODO"),
.object => |obj| try parseObject(self, obj),
- .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
- .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target),
+ .archive => |obj| try parseArchive(gpa, io, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
+ .dso => |dso| try parseDso(gpa, io, diags, dso, &self.shared_objects, &self.files, target),
}
}
@@ -747,9 +748,10 @@ pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std
defer tracy.end();
const comp = self.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- if (comp.verbose_link) Compilation.dump_argv(self.dump_argv_list.items);
+ if (comp.verbose_link) try Compilation.dumpArgv(io, self.dump_argv_list.items);
const sub_prog_node = prog_node.start("ELF Flush", 0);
defer sub_prog_node.end();
@@ -757,7 +759,7 @@ pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std
return flushInner(self, arena, tid) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
- else => |e| return diags.fail("ELF flush failed: {s}", .{@errorName(e)}),
+ else => |e| return diags.fail("ELF flush failed: {t}", .{e}),
};
}
@@ -1047,9 +1049,11 @@ fn dumpArgvInit(self: *Elf, arena: Allocator) !void {
}
pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
- const diags = &self.base.comp.link_diags;
- const obj = link.openObject(path, false, false) catch |err| {
- switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) {
+ const comp = self.base.comp;
+ const io = comp.io;
+ const diags = &comp.link_diags;
+ const obj = link.openObject(io, path, false, false) catch |err| {
+ switch (diags.failParse(path, "failed to open object: {t}", .{err})) {
error.LinkFailure => return,
}
};
@@ -1057,10 +1061,11 @@ pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
}
fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void {
- const diags = &self.base.comp.link_diags;
+ const comp = self.base.comp;
+ const diags = &comp.link_diags;
self.parseObject(obj) catch |err| switch (err) {
error.LinkFailure => return, // already reported
- else => |e| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
+ else => |e| diags.addParseError(obj.path, "failed to parse object: {t}", .{e}),
};
}
@@ -1068,10 +1073,12 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = self.base.comp.gpa;
- const diags = &self.base.comp.link_diags;
- const target = &self.base.comp.root_mod.resolved_target.result;
- const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
+ const comp = self.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
+ const diags = &comp.link_diags;
+ const target = &comp.root_mod.resolved_target.result;
+ const debug_fmt_strip = comp.config.debug_format == .strip;
const default_sym_version = self.default_sym_version;
const file_handles = &self.file_handles;
@@ -1090,14 +1097,15 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
try self.objects.append(gpa, index);
const object = self.file(index).?.object;
- try object.parseCommon(gpa, diags, obj.path, handle, target);
+ try object.parseCommon(gpa, io, diags, obj.path, handle, target);
if (!self.base.isStaticLib()) {
- try object.parse(gpa, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
+ try object.parse(gpa, io, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
}
}
fn parseArchive(
gpa: Allocator,
+ io: Io,
diags: *Diags,
file_handles: *std.ArrayList(File.Handle),
files: *std.MultiArrayList(File.Entry),
@@ -1112,7 +1120,7 @@ fn parseArchive(
defer tracy.end();
const fh = try addFileHandle(gpa, file_handles, obj.file);
- var archive = try Archive.parse(gpa, diags, file_handles, obj.path, fh);
+ var archive = try Archive.parse(gpa, io, diags, file_handles, obj.path, fh);
defer archive.deinit(gpa);
const init_alive = if (is_static_lib) true else obj.must_link;
@@ -1123,15 +1131,16 @@ fn parseArchive(
const object = &files.items(.data)[index].object;
object.index = index;
object.alive = init_alive;
- try object.parseCommon(gpa, diags, obj.path, obj.file, target);
+ try object.parseCommon(gpa, io, diags, obj.path, obj.file, target);
if (!is_static_lib)
- try object.parse(gpa, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
+ try object.parse(gpa, io, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
try objects.append(gpa, index);
}
}
fn parseDso(
gpa: Allocator,
+ io: Io,
diags: *Diags,
dso: link.Input.Dso,
shared_objects: *std.StringArrayHashMapUnmanaged(File.Index),
@@ -1143,8 +1152,8 @@ fn parseDso(
const handle = dso.file;
- const stat = Stat.fromFs(try handle.stat());
- var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target);
+ const stat = Stat.fromFs(try handle.stat(io));
+ var header = try SharedObject.parseHeader(gpa, io, diags, dso.path, handle, stat, target);
defer header.deinit(gpa);
const soname = header.soname() orelse dso.path.basename();
@@ -1158,7 +1167,7 @@ fn parseDso(
gop.value_ptr.* = index;
- var parsed = try SharedObject.parse(gpa, &header, handle);
+ var parsed = try SharedObject.parse(gpa, io, &header, handle);
errdefer parsed.deinit(gpa);
const duped_path: Path = .{
@@ -2888,13 +2897,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
if (shdr.sh_offset > 0) {
// Get size actually commited to the output file.
const existing_size = self.sectionSize(shndx);
- const amt = try self.base.file.?.copyRangeAll(
- shdr.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@@ -2930,13 +2933,7 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
if (shdr.sh_offset > 0) {
const existing_size = self.sectionSize(@intCast(shndx));
- const amt = try self.base.file.?.copyRangeAll(
- shdr.sh_offset,
- self.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@@ -3649,7 +3646,7 @@ fn fileLookup(files: std.MultiArrayList(File.Entry), index: File.Index, zig_obje
pub fn addFileHandle(
gpa: Allocator,
file_handles: *std.ArrayList(File.Handle),
- handle: fs.File,
+ handle: Io.File,
) Allocator.Error!File.HandleIndex {
try file_handles.append(gpa, handle);
return @intCast(file_handles.items.len - 1);
@@ -4066,10 +4063,10 @@ fn fmtDumpState(self: *Elf, writer: *std.Io.Writer) std.Io.Writer.Error!void {
}
/// Caller owns the memory.
-pub fn preadAllAlloc(allocator: Allocator, handle: fs.File, offset: u64, size: u64) ![]u8 {
+pub fn preadAllAlloc(allocator: Allocator, io: Io, io_file: Io.File, offset: u64, size: u64) ![]u8 {
const buffer = try allocator.alloc(u8, math.cast(usize, size) orelse return error.Overflow);
errdefer allocator.free(buffer);
- const amt = try handle.preadAll(buffer, offset);
+ const amt = try io_file.readPositionalAll(io, buffer, offset);
if (amt != size) return error.InputOutput;
return buffer;
}
@@ -4435,16 +4432,17 @@ pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 {
pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = elf_file.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
- return diags.fail("failed to write: {s}", .{@errorName(err)});
- };
+ elf_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err|
+ return diags.fail("failed to write: {t}", .{err});
}
-pub fn setEndPos(elf_file: *Elf, length: u64) error{LinkFailure}!void {
+pub fn setLength(elf_file: *Elf, length: u64) error{LinkFailure}!void {
const comp = elf_file.base.comp;
+ const io = comp.i;
const diags = &comp.link_diags;
- elf_file.base.file.?.setEndPos(length) catch |err| {
+ elf_file.base.file.?.setLength(io, length) catch |err| {
return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
};
}
@@ -4458,6 +4456,7 @@ pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T {
}
const std = @import("std");
+const Io = std.Io;
const build_options = @import("build_options");
const builtin = @import("builtin");
const assert = std.debug.assert;
diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig
index a9961bf8f9..14f2868956 100644
--- a/src/link/Elf/Archive.zig
+++ b/src/link/Elf/Archive.zig
@@ -1,3 +1,21 @@
+const Archive = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const elf = std.elf;
+const fs = std.fs;
+const log = std.log.scoped(.link);
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const Allocator = std.mem.Allocator;
+
+const Diags = @import("../../link.zig").Diags;
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const Object = @import("Object.zig");
+const StringTable = @import("../StringTable.zig");
+
objects: []const Object,
/// '\n'-delimited
strtab: []const u8,
@@ -10,22 +28,23 @@ pub fn deinit(a: *Archive, gpa: Allocator) void {
pub fn parse(
gpa: Allocator,
+ io: Io,
diags: *Diags,
file_handles: *const std.ArrayList(File.Handle),
path: Path,
handle_index: File.HandleIndex,
) !Archive {
- const handle = file_handles.items[handle_index];
+ const file = file_handles.items[handle_index];
var pos: usize = 0;
{
var magic_buffer: [elf.ARMAG.len]u8 = undefined;
- const n = try handle.preadAll(&magic_buffer, pos);
+ const n = try file.readPositionalAll(io, &magic_buffer, pos);
if (n != magic_buffer.len) return error.BadMagic;
if (!mem.eql(u8, &magic_buffer, elf.ARMAG)) return error.BadMagic;
pos += magic_buffer.len;
}
- const size = (try handle.stat()).size;
+ const size = (try file.stat(io)).size;
var objects: std.ArrayList(Object) = .empty;
defer objects.deinit(gpa);
@@ -36,7 +55,7 @@ pub fn parse(
while (pos < size) {
var hdr: elf.ar_hdr = undefined;
{
- const n = try handle.preadAll(mem.asBytes(&hdr), pos);
+ const n = try file.readPositionalAll(io, mem.asBytes(&hdr), pos);
if (n != @sizeOf(elf.ar_hdr)) return error.UnexpectedEndOfFile;
}
pos += @sizeOf(elf.ar_hdr);
@@ -53,7 +72,7 @@ pub fn parse(
if (hdr.isSymtab() or hdr.isSymtab64()) continue;
if (hdr.isStrtab()) {
try strtab.resize(gpa, obj_size);
- const amt = try handle.preadAll(strtab.items, pos);
+ const amt = try file.readPositionalAll(io, strtab.items, pos);
if (amt != obj_size) return error.InputOutput;
continue;
}
@@ -120,7 +139,7 @@ pub fn setArHdr(opts: struct {
@memset(mem.asBytes(&hdr), 0x20);
{
- var writer: std.Io.Writer = .fixed(&hdr.ar_name);
+ var writer: Io.Writer = .fixed(&hdr.ar_name);
switch (opts.name) {
.symtab => writer.print("{s}", .{elf.SYM64NAME}) catch unreachable,
.strtab => writer.print("//", .{}) catch unreachable,
@@ -133,7 +152,7 @@ pub fn setArHdr(opts: struct {
hdr.ar_gid[0] = '0';
hdr.ar_mode[0] = '0';
{
- var writer: std.Io.Writer = .fixed(&hdr.ar_size);
+ var writer: Io.Writer = .fixed(&hdr.ar_size);
writer.print("{d}", .{opts.size}) catch unreachable;
}
hdr.ar_fmag = elf.ARFMAG.*;
@@ -206,7 +225,7 @@ pub const ArSymtab = struct {
ar: ArSymtab,
elf_file: *Elf,
- fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ fn default(f: Format, writer: *Io.Writer) Io.Writer.Error!void {
const ar = f.ar;
const elf_file = f.elf_file;
for (ar.symtab.items, 0..) |entry, i| {
@@ -261,7 +280,7 @@ pub const ArStrtab = struct {
try writer.writeAll(ar.buffer.items);
}
- pub fn format(ar: ArStrtab, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ pub fn format(ar: ArStrtab, writer: *Io.Writer) Io.Writer.Error!void {
try writer.print("{f}", .{std.ascii.hexEscape(ar.buffer.items, .lower)});
}
};
@@ -277,19 +296,3 @@ pub const ArState = struct {
/// Total size of the contributing object (excludes ar_hdr).
size: u64 = 0,
};
-
-const std = @import("std");
-const assert = std.debug.assert;
-const elf = std.elf;
-const fs = std.fs;
-const log = std.log.scoped(.link);
-const mem = std.mem;
-const Path = std.Build.Cache.Path;
-const Allocator = std.mem.Allocator;
-
-const Diags = @import("../../link.zig").Diags;
-const Archive = @This();
-const Elf = @import("../Elf.zig");
-const File = @import("file.zig").File;
-const Object = @import("Object.zig");
-const StringTable = @import("../StringTable.zig");
diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig
index 8fdf555115..9350f1a276 100644
--- a/src/link/Elf/AtomList.zig
+++ b/src/link/Elf/AtomList.zig
@@ -90,7 +90,9 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
}
pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
assert(!list.dirty);
@@ -121,12 +123,14 @@ pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype,
try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
}
- try elf_file.base.file.?.pwriteAll(buffer.written(), list.offset(elf_file));
+ try elf_file.base.file.?.writePositionalAll(io, buffer.written(), list.offset(elf_file));
buffer.clearRetainingCapacity();
}
pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
@@ -152,7 +156,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf
@memcpy(out_code, code);
}
- try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
+ try elf_file.base.file.?.writePositionalAll(io, buffer.items, list.offset(elf_file));
buffer.clearRetainingCapacity();
}
diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig
index d51a82b266..ebdd1f2098 100644
--- a/src/link/Elf/Object.zig
+++ b/src/link/Elf/Object.zig
@@ -1,3 +1,30 @@
+const Object = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const eh_frame = @import("eh_frame.zig");
+const elf = std.elf;
+const fs = std.fs;
+const log = std.log.scoped(.link);
+const math = std.math;
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const Allocator = std.mem.Allocator;
+
+const Diags = @import("../../link.zig").Diags;
+const Archive = @import("Archive.zig");
+const Atom = @import("Atom.zig");
+const AtomList = @import("AtomList.zig");
+const Cie = eh_frame.Cie;
+const Elf = @import("../Elf.zig");
+const Fde = eh_frame.Fde;
+const File = @import("file.zig").File;
+const Merge = @import("Merge.zig");
+const Symbol = @import("Symbol.zig");
+const Alignment = Atom.Alignment;
+const riscv = @import("../riscv.zig");
+
archive: ?InArchive = null,
/// Archive files cannot contain subdirectories, so only the basename is needed
/// for output. However, the full path is kept for error reporting.
@@ -65,10 +92,11 @@ pub fn deinit(self: *Object, gpa: Allocator) void {
pub fn parse(
self: *Object,
gpa: Allocator,
+ io: Io,
diags: *Diags,
/// For error reporting purposes only.
path: Path,
- handle: fs.File,
+ handle: Io.File,
target: *const std.Target,
debug_fmt_strip: bool,
default_sym_version: elf.Versym,
@@ -78,7 +106,7 @@ pub fn parse(
// Allocate atom index 0 to null atom
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) });
- try self.initAtoms(gpa, diags, path, handle, debug_fmt_strip, target);
+ try self.initAtoms(gpa, io, diags, path, handle, debug_fmt_strip, target);
try self.initSymbols(gpa, default_sym_version);
for (self.shdrs.items, 0..) |shdr, i| {
@@ -87,7 +115,7 @@ pub fn parse(
if ((target.cpu.arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, self.getString(atom_ptr.name_offset), ".eh_frame"))
{
- try self.parseEhFrame(gpa, handle, @intCast(i), target);
+ try self.parseEhFrame(gpa, io, handle, @intCast(i), target);
}
}
}
@@ -95,15 +123,16 @@ pub fn parse(
pub fn parseCommon(
self: *Object,
gpa: Allocator,
+ io: Io,
diags: *Diags,
path: Path,
- handle: fs.File,
+ handle: Io.File,
target: *const std.Target,
) !void {
const offset = if (self.archive) |ar| ar.offset else 0;
- const file_size = (try handle.stat()).size;
+ const file_size = (try handle.stat(io)).size;
- const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr));
+ const header_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset, @sizeOf(elf.Elf64_Ehdr));
defer gpa.free(header_buffer);
self.header = @as(*align(1) const elf.Elf64_Ehdr, @ptrCast(header_buffer)).*;
if (!mem.eql(u8, self.header.?.e_ident[0..4], elf.MAGIC)) {
@@ -127,7 +156,7 @@ pub fn parseCommon(
return diags.failParse(path, "corrupt header: section header table extends past the end of file", .{});
}
- const shdrs_buffer = try Elf.preadAllAlloc(gpa, handle, offset + shoff, shsize);
+ const shdrs_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset + shoff, shsize);
defer gpa.free(shdrs_buffer);
const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(shdrs_buffer.ptr))[0..shnum];
try self.shdrs.appendUnalignedSlice(gpa, shdrs);
@@ -140,7 +169,7 @@ pub fn parseCommon(
}
}
- const shstrtab = try self.preadShdrContentsAlloc(gpa, handle, self.header.?.e_shstrndx);
+ const shstrtab = try self.preadShdrContentsAlloc(gpa, io, handle, self.header.?.e_shstrndx);
defer gpa.free(shstrtab);
for (self.shdrs.items) |shdr| {
if (shdr.sh_name >= shstrtab.len) {
@@ -158,7 +187,7 @@ pub fn parseCommon(
const shdr = self.shdrs.items[index];
self.first_global = shdr.sh_info;
- const raw_symtab = try self.preadShdrContentsAlloc(gpa, handle, index);
+ const raw_symtab = try self.preadShdrContentsAlloc(gpa, io, handle, index);
defer gpa.free(raw_symtab);
const nsyms = math.divExact(usize, raw_symtab.len, @sizeOf(elf.Elf64_Sym)) catch {
return diags.failParse(path, "symbol table not evenly divisible", .{});
@@ -166,7 +195,7 @@ pub fn parseCommon(
const symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(raw_symtab.ptr))[0..nsyms];
const strtab_bias = @as(u32, @intCast(self.strtab.items.len));
- const strtab = try self.preadShdrContentsAlloc(gpa, handle, shdr.sh_link);
+ const strtab = try self.preadShdrContentsAlloc(gpa, io, handle, shdr.sh_link);
defer gpa.free(strtab);
try self.strtab.appendSlice(gpa, strtab);
@@ -262,9 +291,10 @@ pub fn validateEFlags(
fn initAtoms(
self: *Object,
gpa: Allocator,
+ io: Io,
diags: *Diags,
path: Path,
- handle: fs.File,
+ handle: Io.File,
debug_fmt_strip: bool,
target: *const std.Target,
) !void {
@@ -297,7 +327,7 @@ fn initAtoms(
};
const shndx: u32 = @intCast(i);
- const group_raw_data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ const group_raw_data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(group_raw_data);
const group_nmembers = math.divExact(usize, group_raw_data.len, @sizeOf(u32)) catch {
return diags.failParse(path, "corrupt section group: not evenly divisible ", .{});
@@ -338,7 +368,7 @@ fn initAtoms(
const shndx: u32 = @intCast(i);
if (self.skipShdr(shndx, debug_fmt_strip)) continue;
const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: {
- const data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ const data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(data);
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
break :blk .{ chdr.ch_size, Alignment.fromNonzeroByteUnits(chdr.ch_addralign) };
@@ -359,7 +389,7 @@ fn initAtoms(
elf.SHT_REL, elf.SHT_RELA => {
const atom_index = self.atoms_indexes.items[shdr.sh_info];
if (self.atom(atom_index)) |atom_ptr| {
- const relocs = try self.preadRelocsAlloc(gpa, handle, @intCast(i));
+ const relocs = try self.preadRelocsAlloc(gpa, io, handle, @intCast(i));
defer gpa.free(relocs);
atom_ptr.relocs_section_index = @intCast(i);
const rel_index: u32 = @intCast(self.relocs.items.len);
@@ -421,7 +451,8 @@ fn initSymbols(
fn parseEhFrame(
self: *Object,
gpa: Allocator,
- handle: fs.File,
+ io: Io,
+ handle: Io.File,
shndx: u32,
target: *const std.Target,
) !void {
@@ -430,12 +461,12 @@ fn parseEhFrame(
else => {},
} else null;
- const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+ const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(raw);
const data_start: u32 = @intCast(self.eh_frame_data.items.len);
try self.eh_frame_data.appendSlice(gpa, raw);
const relocs = if (relocs_shndx) |index|
- try self.preadRelocsAlloc(gpa, handle, index)
+ try self.preadRelocsAlloc(gpa, io, handle, index)
else
&[0]elf.Elf64_Rela{};
defer gpa.free(relocs);
@@ -1095,13 +1126,18 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf
}
pub fn updateArSize(self: *Object, elf_file: *Elf) !void {
+ const comp = elf_file.base.comp;
+ const io = comp.io;
self.output_ar_state.size = if (self.archive) |ar| ar.size else size: {
const handle = elf_file.fileHandle(self.file_handle);
- break :size (try handle.stat()).size;
+ break :size (try handle.stat(io)).size;
};
}
pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const offset: u64 = if (self.archive) |ar| ar.offset else 0;
const name = fs.path.basename(self.path.sub_path);
@@ -1114,10 +1150,9 @@ pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
});
try writer.writeAll(mem.asBytes(&hdr));
const handle = elf_file.fileHandle(self.file_handle);
- const gpa = elf_file.base.comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
- const amt = try handle.preadAll(data, offset);
+ const amt = try handle.readPositionalAll(io, data, offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
@@ -1190,11 +1225,12 @@ pub fn writeSymtab(self: *Object, elf_file: *Elf) void {
/// Caller owns the memory.
pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const comp = elf_file.base.comp;
+ const io = comp.io;
const gpa = comp.gpa;
const atom_ptr = self.atom(atom_index).?;
const shdr = atom_ptr.inputShdr(elf_file);
const handle = elf_file.fileHandle(self.file_handle);
- const data = try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index);
+ const data = try self.preadShdrContentsAlloc(gpa, io, handle, atom_ptr.input_section_index);
defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(data);
if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
@@ -1310,18 +1346,18 @@ fn addString(self: *Object, gpa: Allocator, str: []const u8) !u32 {
}
/// Caller owns the memory.
-fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: fs.File, index: u32) ![]u8 {
+fn preadShdrContentsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, index: u32) ![]u8 {
assert(index < self.shdrs.items.len);
const offset = if (self.archive) |ar| ar.offset else 0;
const shdr = self.shdrs.items[index];
const sh_offset = math.cast(u64, shdr.sh_offset) orelse return error.Overflow;
const sh_size = math.cast(u64, shdr.sh_size) orelse return error.Overflow;
- return Elf.preadAllAlloc(gpa, handle, offset + sh_offset, sh_size);
+ return Elf.preadAllAlloc(gpa, io, handle, offset + sh_offset, sh_size);
}
/// Caller owns the memory.
-fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
- const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
+fn preadRelocsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
+ const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela));
return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num];
}
@@ -1552,29 +1588,3 @@ const InArchive = struct {
offset: u64,
size: u32,
};
-
-const Object = @This();
-
-const std = @import("std");
-const assert = std.debug.assert;
-const eh_frame = @import("eh_frame.zig");
-const elf = std.elf;
-const fs = std.fs;
-const log = std.log.scoped(.link);
-const math = std.math;
-const mem = std.mem;
-const Path = std.Build.Cache.Path;
-const Allocator = std.mem.Allocator;
-
-const Diags = @import("../../link.zig").Diags;
-const Archive = @import("Archive.zig");
-const Atom = @import("Atom.zig");
-const AtomList = @import("AtomList.zig");
-const Cie = eh_frame.Cie;
-const Elf = @import("../Elf.zig");
-const Fde = eh_frame.Fde;
-const File = @import("file.zig").File;
-const Merge = @import("Merge.zig");
-const Symbol = @import("Symbol.zig");
-const Alignment = Atom.Alignment;
-const riscv = @import("../riscv.zig");
diff --git a/src/link/Elf/SharedObject.zig b/src/link/Elf/SharedObject.zig
index 1e17aa34a8..c97d53a862 100644
--- a/src/link/Elf/SharedObject.zig
+++ b/src/link/Elf/SharedObject.zig
@@ -1,3 +1,20 @@
+const SharedObject = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const elf = std.elf;
+const log = std.log.scoped(.elf);
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const Stat = std.Build.Cache.File.Stat;
+const Allocator = mem.Allocator;
+
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const Symbol = @import("Symbol.zig");
+const Diags = @import("../../link.zig").Diags;
+
path: Path,
index: File.Index,
@@ -92,16 +109,17 @@ pub const Parsed = struct {
pub fn parseHeader(
gpa: Allocator,
+ io: Io,
diags: *Diags,
file_path: Path,
- fs_file: std.fs.File,
+ file: Io.File,
stat: Stat,
target: *const std.Target,
) !Header {
var ehdr: elf.Elf64_Ehdr = undefined;
{
const buf = mem.asBytes(&ehdr);
- const amt = try fs_file.preadAll(buf, 0);
+ const amt = try file.readPositionalAll(io, buf, 0);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
if (!mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")) return error.BadMagic;
@@ -118,7 +136,7 @@ pub fn parseHeader(
errdefer gpa.free(sections);
{
const buf = mem.sliceAsBytes(sections);
- const amt = try fs_file.preadAll(buf, shoff);
+ const amt = try file.readPositionalAll(io, buf, shoff);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
@@ -143,7 +161,7 @@ pub fn parseHeader(
const dynamic_table = try gpa.alloc(elf.Elf64_Dyn, n);
errdefer gpa.free(dynamic_table);
const buf = mem.sliceAsBytes(dynamic_table);
- const amt = try fs_file.preadAll(buf, shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :dt dynamic_table;
} else &.{};
@@ -158,7 +176,7 @@ pub fn parseHeader(
const strtab_shdr = sections[dynsym_shdr.sh_link];
const n = std.math.cast(usize, strtab_shdr.sh_size) orelse return error.Overflow;
const buf = try strtab.addManyAsSlice(gpa, n);
- const amt = try fs_file.preadAll(buf, strtab_shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, strtab_shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
@@ -190,9 +208,10 @@ pub fn parseHeader(
pub fn parse(
gpa: Allocator,
+ io: Io,
/// Moves resources from header. Caller may unconditionally deinit.
header: *Header,
- fs_file: std.fs.File,
+ file: Io.File,
) !Parsed {
const symtab = if (header.dynsym_sect_index) |index| st: {
const shdr = header.sections[index];
@@ -200,7 +219,7 @@ pub fn parse(
const symtab = try gpa.alloc(elf.Elf64_Sym, n);
errdefer gpa.free(symtab);
const buf = mem.sliceAsBytes(symtab);
- const amt = try fs_file.preadAll(buf, shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :st symtab;
} else &.{};
@@ -211,7 +230,7 @@ pub fn parse(
if (header.verdef_sect_index) |shndx| {
const shdr = header.sections[shndx];
- const verdefs = try Elf.preadAllAlloc(gpa, fs_file, shdr.sh_offset, shdr.sh_size);
+ const verdefs = try Elf.preadAllAlloc(gpa, io, file, shdr.sh_offset, shdr.sh_size);
defer gpa.free(verdefs);
var offset: u32 = 0;
@@ -237,7 +256,7 @@ pub fn parse(
const versyms = try gpa.alloc(elf.Versym, symtab.len);
errdefer gpa.free(versyms);
const buf = mem.sliceAsBytes(versyms);
- const amt = try fs_file.preadAll(buf, shdr.sh_offset);
+ const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :vs versyms;
} else &.{};
@@ -534,19 +553,3 @@ const Format = struct {
}
}
};
-
-const SharedObject = @This();
-
-const std = @import("std");
-const assert = std.debug.assert;
-const elf = std.elf;
-const log = std.log.scoped(.elf);
-const mem = std.mem;
-const Path = std.Build.Cache.Path;
-const Stat = std.Build.Cache.File.Stat;
-const Allocator = mem.Allocator;
-
-const Elf = @import("../Elf.zig");
-const File = @import("file.zig").File;
-const Symbol = @import("Symbol.zig");
-const Diags = @import("../../link.zig").Diags;
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index 1450e3ab92..588b4e3fc3 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -740,7 +740,9 @@ pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{O
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const shsize: u64 = switch (elf_file.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
.p64 => @sizeOf(elf.Elf64_Shdr),
@@ -753,7 +755,7 @@ pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
const size = std.math.cast(usize, end_pos) orelse return error.Overflow;
try self.data.resize(gpa, size);
- const amt = try elf_file.base.file.?.preadAll(self.data.items, 0);
+ const amt = try elf_file.base.file.?.readPositionalAll(io, self.data.items, 0);
if (amt != size) return error.InputOutput;
}
@@ -901,13 +903,15 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void {
/// Returns atom's code.
/// Caller owns the memory.
pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const atom_ptr = self.atom(atom_index).?;
const file_offset = atom_ptr.offset(elf_file);
const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
- const amt = try elf_file.base.file.?.preadAll(code, file_offset);
+ const amt = try elf_file.base.file.?.readPositionalAll(io, code, file_offset);
if (amt != code.len) {
log.err("fetching code for {s} failed", .{atom_ptr.name(elf_file)});
return error.InputOutput;
@@ -1365,6 +1369,8 @@ fn updateNavCode(
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@@ -1449,8 +1455,8 @@ fn updateNavCode(
const shdr = elf_file.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
- elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
- return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
+ elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
+ return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err});
log.debug("writing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
}
}
@@ -1467,6 +1473,8 @@ fn updateTlv(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
const nav = ip.getNav(nav_index);
log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
@@ -1503,8 +1511,8 @@ fn updateTlv(
const shdr = elf_file.sections.items(.shdr)[shndx];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
- elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
- return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
+ elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
+ return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err});
log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
atom_ptr.name(elf_file),
file_offset,
@@ -2003,6 +2011,8 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
}
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
+ const comp = elf_file.base.comp;
+ const io = comp.io;
const atom_ptr = tr_sym.atom(elf_file).?;
const fileoff = atom_ptr.offset(elf_file);
const source_addr = tr_sym.address(.{}, elf_file);
@@ -2012,7 +2022,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
else => @panic("TODO implement write trampoline for this CPU arch"),
};
- try elf_file.base.file.?.pwriteAll(out, fileoff);
+ try elf_file.base.file.?.writePositionalAll(io, out, fileoff);
if (elf_file.base.child_pid) |pid| {
switch (builtin.os.tag) {
diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig
index 50f5159d18..52d3c6e6f0 100644
--- a/src/link/Elf/file.zig
+++ b/src/link/Elf/file.zig
@@ -1,3 +1,20 @@
+const std = @import("std");
+const Io = std.Io;
+const elf = std.elf;
+const log = std.log.scoped(.link);
+const Path = std.Build.Cache.Path;
+const Allocator = std.mem.Allocator;
+
+const Archive = @import("Archive.zig");
+const Atom = @import("Atom.zig");
+const Cie = @import("eh_frame.zig").Cie;
+const Elf = @import("../Elf.zig");
+const LinkerDefined = @import("LinkerDefined.zig");
+const Object = @import("Object.zig");
+const SharedObject = @import("SharedObject.zig");
+const Symbol = @import("Symbol.zig");
+const ZigObject = @import("ZigObject.zig");
+
pub const File = union(enum) {
zig_object: *ZigObject,
linker_defined: *LinkerDefined,
@@ -279,22 +296,6 @@ pub const File = union(enum) {
shared_object: SharedObject,
};
- pub const Handle = std.fs.File;
+ pub const Handle = Io.File;
pub const HandleIndex = Index;
};
-
-const std = @import("std");
-const elf = std.elf;
-const log = std.log.scoped(.link);
-const Path = std.Build.Cache.Path;
-const Allocator = std.mem.Allocator;
-
-const Archive = @import("Archive.zig");
-const Atom = @import("Atom.zig");
-const Cie = @import("eh_frame.zig").Cie;
-const Elf = @import("../Elf.zig");
-const LinkerDefined = @import("LinkerDefined.zig");
-const Object = @import("Object.zig");
-const SharedObject = @import("SharedObject.zig");
-const Symbol = @import("Symbol.zig");
-const ZigObject = @import("ZigObject.zig");
diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig
index 7adeecdcde..ec3ff252fb 100644
--- a/src/link/Elf/relocatable.zig
+++ b/src/link/Elf/relocatable.zig
@@ -1,5 +1,26 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const elf = std.elf;
+const math = std.math;
+const mem = std.mem;
+const Path = std.Build.Cache.Path;
+const log = std.log.scoped(.link);
+const state_log = std.log.scoped(.link_state);
+
+const build_options = @import("build_options");
+
+const eh_frame = @import("eh_frame.zig");
+const link = @import("../../link.zig");
+const Archive = @import("Archive.zig");
+const Compilation = @import("../../Compilation.zig");
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const Object = @import("Object.zig");
+const Symbol = @import("Symbol.zig");
+
pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
const gpa = comp.gpa;
+ const io = comp.io;
const diags = &comp.link_diags;
if (diags.hasErrors()) return error.LinkFailure;
@@ -125,8 +146,8 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
assert(writer.buffered().len == total_size);
- try elf_file.base.file.?.setEndPos(total_size);
- try elf_file.base.file.?.pwriteAll(writer.buffered(), 0);
+ try elf_file.base.file.?.setLength(io, total_size);
+ try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;
}
@@ -330,13 +351,7 @@ fn allocateAllocSections(elf_file: *Elf) !void {
if (shdr.sh_offset > 0) {
const existing_size = elf_file.sectionSize(@intCast(shndx));
- const amt = try elf_file.base.file.?.copyRangeAll(
- shdr.sh_offset,
- elf_file.base.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ try elf_file.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@@ -360,7 +375,9 @@ fn writeAtoms(elf_file: *Elf) !void {
}
fn writeSyntheticSections(elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const slice = elf_file.sections.slice();
const SortRelocs = struct {
@@ -397,7 +414,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset + shdr.sh_size,
});
- try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
+ try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset);
}
if (elf_file.section_indexes.eh_frame) |shndx| {
@@ -417,7 +434,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset + sh_size,
});
assert(writer.buffered().len == sh_size - existing_size);
- try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset + existing_size);
+ try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset + existing_size);
}
if (elf_file.section_indexes.eh_frame_rela) |shndx| {
const shdr = slice.items(.shdr)[shndx];
@@ -435,7 +452,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
});
- try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
+ try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset);
}
try writeGroups(elf_file);
@@ -444,7 +461,9 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
}
fn writeGroups(elf_file: *Elf) !void {
- const gpa = elf_file.base.comp.gpa;
+ const comp = elf_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
for (elf_file.group_sections.items) |cgs| {
const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
@@ -457,25 +476,6 @@ fn writeGroups(elf_file: *Elf) !void {
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
});
- try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset);
+ try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset);
}
}
-
-const assert = std.debug.assert;
-const build_options = @import("build_options");
-const eh_frame = @import("eh_frame.zig");
-const elf = std.elf;
-const link = @import("../../link.zig");
-const log = std.log.scoped(.link);
-const math = std.math;
-const mem = std.mem;
-const state_log = std.log.scoped(.link_state);
-const Path = std.Build.Cache.Path;
-const std = @import("std");
-
-const Archive = @import("Archive.zig");
-const Compilation = @import("../../Compilation.zig");
-const Elf = @import("../Elf.zig");
-const File = @import("file.zig").File;
-const Object = @import("Object.zig");
-const Symbol = @import("Symbol.zig");
diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig
index 7d12ccedb2..bbdb439385 100644
--- a/src/link/Elf2.zig
+++ b/src/link/Elf2.zig
@@ -1,3 +1,23 @@
+const Elf = @This();
+
+const builtin = @import("builtin");
+const native_endian = builtin.cpu.arch.endian();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+
+const codegen = @import("../codegen.zig");
+const Compilation = @import("../Compilation.zig");
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const MappedFile = @import("MappedFile.zig");
+const target_util = @import("../target.zig");
+const Type = @import("../Type.zig");
+const Value = @import("../Value.zig");
+const Zcu = @import("../Zcu.zig");
+
base: link.File,
options: link.File.OpenOptions,
mf: MappedFile,
@@ -908,6 +928,7 @@ fn create(
path: std.Build.Cache.Path,
options: link.File.OpenOptions,
) !*Elf {
+ const io = comp.io;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .elf);
const class: std.elf.CLASS = switch (target.ptrBitWidth()) {
@@ -953,11 +974,11 @@ fn create(
};
const elf = try arena.create(Elf);
- const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{
+ const file = try path.root_dir.handle.createFile(io, path.sub_path, .{
.read = true,
- .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
+ .permissions = link.File.determinePermissions(comp.config.output_mode, comp.config.link_mode),
});
- errdefer file.close(comp.io);
+ errdefer file.close(io);
elf.* = .{
.base = .{
.tag = .elf2,
@@ -965,7 +986,7 @@ fn create(
.comp = comp,
.emit = path,
- .file = .adaptFromNewApi(file),
+ .file = file,
.gc_sections = false,
.print_gc_sections = false,
.build_id = .none,
@@ -973,7 +994,7 @@ fn create(
.stack_size = 0,
},
.options = options,
- .mf = try .init(file, comp.gpa),
+ .mf = try .init(file, comp.gpa, io),
.ni = .{
.tls = .none,
},
@@ -1973,8 +1994,8 @@ pub fn lazySymbol(elf: *Elf, lazy: link.File.LazySymbol) !Symbol.Index {
return lazy_gop.value_ptr.*;
}
-pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError ||
- std.Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void {
+pub fn loadInput(elf: *Elf, input: link.Input) (Io.File.Reader.SizeError ||
+ Io.File.Reader.Error || MappedFile.Error || error{ EndOfStream, BadMagic, LinkFailure })!void {
const io = elf.base.comp.io;
var buf: [4096]u8 = undefined;
switch (input) {
@@ -2007,7 +2028,7 @@ pub fn loadInput(elf: *Elf, input: link.Input) (std.fs.File.Reader.SizeError ||
.dso_exact => |dso_exact| try elf.loadDsoExact(dso_exact.name),
}
}
-fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void {
+fn loadArchive(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void {
const comp = elf.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
@@ -2067,7 +2088,7 @@ fn loadObject(
elf: *Elf,
path: std.Build.Cache.Path,
member: ?[]const u8,
- fr: *std.Io.File.Reader,
+ fr: *Io.File.Reader,
fl: MappedFile.Node.FileLocation,
) !void {
const comp = elf.base.comp;
@@ -2310,7 +2331,7 @@ fn loadObject(
},
}
}
-fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *std.Io.File.Reader) !void {
+fn loadDso(elf: *Elf, path: std.Build.Cache.Path, fr: *Io.File.Reader) !void {
const comp = elf.base.comp;
const diags = &comp.link_diags;
const r = &fr.interface;
@@ -3305,12 +3326,13 @@ fn flushInputSection(elf: *Elf, isi: Node.InputSectionIndex) !void {
const file_loc = isi.fileLocation(elf);
if (file_loc.size == 0) return;
const comp = elf.base.comp;
+ const io = comp.io;
const gpa = comp.gpa;
const ii = isi.input(elf);
const path = ii.path(elf);
- const file = try path.root_dir.handle.adaptToNewApi().openFile(comp.io, path.sub_path, .{});
- defer file.close(comp.io);
- var fr = file.reader(comp.io, &.{});
+ const file = try path.root_dir.handle.openFile(io, path.sub_path, .{});
+ defer file.close(io);
+ var fr = file.reader(io, &.{});
try fr.seekTo(file_loc.offset);
var nw: MappedFile.Node.Writer = undefined;
const si = isi.symbol(elf);
@@ -3707,10 +3729,16 @@ pub fn deleteExport(elf: *Elf, exported: Zcu.Exported, name: InternPool.NullTerm
_ = name;
}
-pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) void {
- const w, _ = std.debug.lockStderrWriter(&.{});
- defer std.debug.unlockStderrWriter();
- elf.printNode(tid, w, .root, 0) catch {};
+pub fn dump(elf: *Elf, tid: Zcu.PerThread.Id) Io.Cancelable!void {
+ const comp = elf.base.comp;
+ const io = comp.io;
+ var buffer: [512]u8 = undefined;
+ const stderr = try io.lockStderr(&buffer, null);
+ defer io.lockStderr();
+ const w = &stderr.file_writer.interface;
+ elf.printNode(tid, w, .root, 0) catch |err| switch (err) {
+ error.WriteFailed => return stderr.err.?,
+ };
}
pub fn printNode(
@@ -3822,19 +3850,3 @@ pub fn printNode(
try w.writeByte('\n');
}
}
-
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const codegen = @import("../codegen.zig");
-const Compilation = @import("../Compilation.zig");
-const Elf = @This();
-const InternPool = @import("../InternPool.zig");
-const link = @import("../link.zig");
-const log = std.log.scoped(.link);
-const MappedFile = @import("MappedFile.zig");
-const native_endian = builtin.cpu.arch.endian();
-const std = @import("std");
-const target_util = @import("../target.zig");
-const Type = @import("../Type.zig");
-const Value = @import("../Value.zig");
-const Zcu = @import("../Zcu.zig");
diff --git a/src/link/Lld.zig b/src/link/Lld.zig
index 2345090482..b2a0f6e396 100644
--- a/src/link/Lld.zig
+++ b/src/link/Lld.zig
@@ -359,6 +359,7 @@ fn linkAsArchive(lld: *Lld, arena: Allocator) !void {
fn coffLink(lld: *Lld, arena: Allocator) !void {
const comp = lld.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
const base = &lld.base;
const coff = &lld.ofmt.coff;
@@ -400,11 +401,12 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
- try std.fs.Dir.copyFile(
+ try Io.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
+ io,
.{},
);
} else {
@@ -718,13 +720,13 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
argv.appendAssumeCapacity(try crt_file.full_object_path.toString(arena));
continue;
}
- if (try findLib(arena, lib_basename, coff.lib_directories)) |full_path| {
+ if (try findLib(arena, io, lib_basename, coff.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
if (target.abi.isGnu()) {
const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
- if (try findLib(arena, fallback_name, coff.lib_directories)) |full_path| {
+ if (try findLib(arena, io, fallback_name, coff.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
@@ -741,9 +743,9 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
try spawnLld(comp, arena, argv.items);
}
}
-fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Cache.Directory) !?[]const u8 {
+fn findLib(arena: Allocator, io: Io, name: []const u8, lib_directories: []const Cache.Directory) !?[]const u8 {
for (lib_directories) |lib_directory| {
- lib_directory.handle.access(name, .{}) catch |err| switch (err) {
+ lib_directory.handle.access(io, name, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -755,6 +757,7 @@ fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Cache.Di
fn elfLink(lld: *Lld, arena: Allocator) !void {
const comp = lld.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
const diags = &comp.link_diags;
const base = &lld.base;
const elf = &lld.ofmt.elf;
@@ -816,11 +819,12 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
- try std.fs.Dir.copyFile(
+ try Io.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
+ io,
.{},
);
} else {
@@ -1326,6 +1330,7 @@ fn getLDMOption(target: *const std.Target) ?[]const u8 {
}
fn wasmLink(lld: *Lld, arena: Allocator) !void {
const comp = lld.base.comp;
+ const diags = &comp.link_diags;
const shared_memory = comp.config.shared_memory;
const export_memory = comp.config.export_memory;
const import_memory = comp.config.import_memory;
@@ -1334,6 +1339,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
const wasm = &lld.ofmt.wasm;
const gpa = comp.gpa;
+ const io = comp.io;
const directory = base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{base.emit.sub_path});
@@ -1371,11 +1377,12 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
- try fs.Dir.copyFile(
+ try Io.Dir.copyFile(
the_object_path.root_dir.handle,
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
+ io,
.{},
);
} else {
@@ -1565,27 +1572,23 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
// is not the case, it means we will get "exec format error" when trying to run
// it, and then can react to that in the same way as trying to run an ELF file
// from a foreign CPU architecture.
- if (fs.has_executable_bit and target.os.tag == .wasi and
+ if (Io.File.Permissions.has_executable_bit and target.os.tag == .wasi and
comp.config.output_mode == .Exe)
{
- // TODO: what's our strategy for reporting linker errors from this function?
- // report a nice error here with the file path if it fails instead of
- // just returning the error code.
// chmod does not interact with umask, so we use a conservative -rwxr--r-- here.
- std.posix.fchmodat(fs.cwd().fd, full_out_path, 0o744, 0) catch |err| switch (err) {
- error.OperationNotSupported => unreachable, // Not a symlink.
- else => |e| return e,
- };
+ Io.Dir.cwd().setFilePermissions(io, full_out_path, .fromMode(0o744), .{}) catch |err|
+ return diags.fail("{s}: failed to enable executable permissions: {t}", .{ full_out_path, err });
}
}
}
fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !void {
const io = comp.io;
+ const gpa = comp.gpa;
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
- Compilation.dump_argv(argv[1..]);
+ try Compilation.dumpArgv(io, argv[1..]);
}
// If possible, we run LLD as a child process because it does not always
@@ -1599,7 +1602,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
}
var stderr: []u8 = &.{};
- defer comp.gpa.free(stderr);
+ defer gpa.free(stderr);
var child = std.process.Child.init(argv, arena);
const term = (if (comp.clang_passthrough_mode) term: {
@@ -1607,16 +1610,16 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
- break :term child.spawnAndWait();
+ break :term child.spawnAndWait(io);
} else term: {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
- child.spawn() catch |err| break :term err;
+ child.spawn(io) catch |err| break :term err;
var stderr_reader = child.stderr.?.readerStreaming(io, &.{});
- stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
- break :term child.wait();
+ stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited);
+ break :term child.wait(io);
}) catch |first_err| term: {
const err = switch (first_err) {
error.NameTooLong => err: {
@@ -1624,13 +1627,13 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
const rand_int = std.crypto.random.int(u64);
const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp";
- const rsp_file = try comp.dirs.local_cache.handle.createFile(rsp_path, .{});
- defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err|
- log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) });
+ const rsp_file = try comp.dirs.local_cache.handle.createFile(io, rsp_path, .{});
+ defer comp.dirs.local_cache.handle.deleteFile(io, rsp_path) catch |err|
+ log.warn("failed to delete response file {s}: {t}", .{ rsp_path, err });
{
- defer rsp_file.close();
+ defer rsp_file.close(io);
var rsp_file_buffer: [1024]u8 = undefined;
- var rsp_file_writer = rsp_file.writer(&rsp_file_buffer);
+ var rsp_file_writer = rsp_file.writer(io, &rsp_file_buffer);
const rsp_writer = &rsp_file_writer.interface;
for (argv[2..]) |arg| {
try rsp_writer.writeByte('"');
@@ -1657,16 +1660,16 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
rsp_child.stdout_behavior = .Inherit;
rsp_child.stderr_behavior = .Inherit;
- break :term rsp_child.spawnAndWait() catch |err| break :err err;
+ break :term rsp_child.spawnAndWait(io) catch |err| break :err err;
} else {
rsp_child.stdin_behavior = .Ignore;
rsp_child.stdout_behavior = .Ignore;
rsp_child.stderr_behavior = .Pipe;
- rsp_child.spawn() catch |err| break :err err;
+ rsp_child.spawn(io) catch |err| break :err err;
var stderr_reader = rsp_child.stderr.?.readerStreaming(io, &.{});
- stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
- break :term rsp_child.wait() catch |err| break :err err;
+ stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited);
+ break :term rsp_child.wait(io) catch |err| break :err err;
}
},
else => first_err,
@@ -1692,6 +1695,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
}
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
const allocPrint = std.fmt.allocPrint;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 2c4ffd6632..b747b3de56 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -219,10 +219,12 @@ pub fn createEmpty(
};
errdefer self.base.destroy();
- self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ const io = comp.io;
+
+ self.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
.truncate = true,
.read = true,
- .mode = link.File.determineMode(output_mode, link_mode),
+ .permissions = link.File.determinePermissions(output_mode, link_mode),
});
// Append null file
@@ -267,14 +269,16 @@ pub fn open(
}
pub fn deinit(self: *MachO) void {
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
if (self.d_sym) |*d_sym| {
d_sym.deinit();
}
for (self.file_handles.items) |handle| {
- handle.close();
+ handle.close(io);
}
self.file_handles.deinit(gpa);
@@ -343,7 +347,8 @@ pub fn flush(
const comp = self.base.comp;
const gpa = comp.gpa;
- const diags = &self.base.comp.link_diags;
+ const io = comp.io;
+ const diags = &comp.link_diags;
const sub_prog_node = prog_node.start("MachO Flush", 0);
defer sub_prog_node.end();
@@ -376,26 +381,26 @@ pub fn flush(
// in this set.
try positionals.ensureUnusedCapacity(comp.c_object_table.keys().len);
for (comp.c_object_table.keys()) |key| {
- positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path));
+ positionals.appendAssumeCapacity(try link.openObjectInput(io, diags, key.status.success.object_path));
}
- if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
+ if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (comp.config.any_sanitize_thread) {
- try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path));
+ try positionals.append(try link.openObjectInput(io, diags, comp.tsan_lib.?.full_object_path));
}
if (comp.config.any_fuzz) {
- try positionals.append(try link.openArchiveInput(diags, comp.fuzzer_lib.?.full_object_path, false, false));
+ try positionals.append(try link.openArchiveInput(io, diags, comp.fuzzer_lib.?.full_object_path, false, false));
}
if (comp.ubsan_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
+ self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.ubsan_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
+ self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
@@ -430,7 +435,7 @@ pub fn flush(
if (comp.config.link_libc and is_exe_or_dyn_lib) {
if (comp.zigc_static_lib) |zigc| {
const path = zigc.full_object_path;
- self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
+ self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
}
@@ -453,12 +458,12 @@ pub fn flush(
for (system_libs.items) |lib| {
switch (Compilation.classifyFileExt(lib.path.sub_path)) {
.shared_library => {
- const dso_input = try link.openDsoInput(diags, lib.path, lib.needed, lib.weak, lib.reexport);
+ const dso_input = try link.openDsoInput(io, diags, lib.path, lib.needed, lib.weak, lib.reexport);
self.classifyInputFile(dso_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
.static_library => {
- const archive_input = try link.openArchiveInput(diags, lib.path, lib.must_link, lib.hidden);
+ const archive_input = try link.openArchiveInput(io, diags, lib.path, lib.must_link, lib.hidden);
self.classifyInputFile(archive_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
@@ -469,11 +474,11 @@ pub fn flush(
// Finally, link against compiler_rt.
if (comp.compiler_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
+ self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.compiler_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
- self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
+ self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
@@ -564,7 +569,7 @@ pub fn flush(
self.writeLinkeditSectionsToFile() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
- else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}),
+ else => |e| return diags.fail("failed to write linkedit sections to file: {t}", .{e}),
};
var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: {
@@ -575,8 +580,8 @@ pub fn flush(
// where the code signature goes into.
var codesig = CodeSignature.init(self.getPageSize());
codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path);
- if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err|
- return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) });
+ if (self.entitlements) |path| codesig.addEntitlements(gpa, io, path) catch |err|
+ return diags.fail("failed to add entitlements from {s}: {t}", .{ path, err });
try self.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
@@ -612,15 +617,17 @@ pub fn flush(
else => |e| return diags.fail("failed to write code signature: {s}", .{@errorName(e)}),
};
const emit = self.base.emit;
- invalidateKernelCache(emit.root_dir.handle, emit.sub_path) catch |err| switch (err) {
- else => |e| return diags.fail("failed to invalidate kernel cache: {s}", .{@errorName(e)}),
+ invalidateKernelCache(io, emit.root_dir.handle, emit.sub_path) catch |err| switch (err) {
+ else => |e| return diags.fail("failed to invalidate kernel cache: {t}", .{e}),
};
}
}
/// --verbose-link output
fn dumpArgv(self: *MachO, comp: *Compilation) !void {
- const gpa = self.base.comp.gpa;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@@ -815,7 +822,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
if (comp.ubsan_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena));
}
- Compilation.dump_argv(argv.items);
+ try Compilation.dumpArgv(io, argv.items);
}
/// TODO delete this, libsystem must be resolved when setting up the compilation pipeline
@@ -825,7 +832,8 @@ pub fn resolveLibSystem(
comp: *Compilation,
out_libs: anytype,
) !void {
- const diags = &self.base.comp.link_diags;
+ const io = comp.io;
+ const diags = &comp.link_diags;
var test_path = std.array_list.Managed(u8).init(arena);
var checked_paths = std.array_list.Managed([]const u8).init(arena);
@@ -834,16 +842,16 @@ pub fn resolveLibSystem(
if (self.sdk_layout) |sdk_layout| switch (sdk_layout) {
.sdk => {
const dir = try fs.path.join(arena, &.{ comp.sysroot.?, "usr", "lib" });
- if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, dir, "System")) break :success;
},
.vendored => {
const dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "darwin" });
- if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, dir, "System")) break :success;
},
};
for (self.lib_directories) |directory| {
- if (try accessLibPath(arena, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success;
}
diags.addMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{});
@@ -861,6 +869,9 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const tracy = trace(@src());
defer tracy.end();
+ const comp = self.base.comp;
+ const io = comp.io;
+
const path, const file = input.pathAndFile().?;
// TODO don't classify now, it's too late. The input file has already been classified
log.debug("classifying input file {f}", .{path});
@@ -871,7 +882,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const fat_arch: ?fat.Arch = try self.parseFatFile(file, path);
const offset = if (fat_arch) |fa| fa.offset else 0;
- if (readMachHeader(file, offset) catch null) |h| blk: {
+ if (readMachHeader(io, file, offset) catch null) |h| blk: {
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_OBJECT => try self.addObject(path, fh, offset),
@@ -880,7 +891,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
}
return;
}
- if (readArMagic(file, offset, &buffer) catch null) |ar_magic| blk: {
+ if (readArMagic(io, file, offset, &buffer) catch null) |ar_magic| blk: {
if (!mem.eql(u8, ar_magic, Archive.ARMAG)) break :blk;
try self.addArchive(input.archive, fh, fat_arch);
return;
@@ -888,12 +899,14 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
_ = try self.addTbd(.fromLinkInput(input), true, fh);
}
-fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch {
- const diags = &self.base.comp.link_diags;
- const fat_h = fat.readFatHeader(file) catch return null;
+fn parseFatFile(self: *MachO, file: Io.File, path: Path) !?fat.Arch {
+ const comp = self.base.comp;
+ const io = comp.io;
+ const diags = &comp.link_diags;
+ const fat_h = fat.readFatHeader(io, file) catch return null;
if (fat_h.magic != macho.FAT_MAGIC and fat_h.magic != macho.FAT_MAGIC_64) return null;
var fat_archs_buffer: [2]fat.Arch = undefined;
- const fat_archs = try fat.parseArchs(file, fat_h, &fat_archs_buffer);
+ const fat_archs = try fat.parseArchs(io, file, fat_h, &fat_archs_buffer);
const cpu_arch = self.getTarget().cpu.arch;
for (fat_archs) |arch| {
if (arch.tag == cpu_arch) return arch;
@@ -901,16 +914,16 @@ fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch {
return diags.failParse(path, "missing arch in universal file: expected {s}", .{@tagName(cpu_arch)});
}
-pub fn readMachHeader(file: std.fs.File, offset: usize) !macho.mach_header_64 {
+pub fn readMachHeader(io: Io, file: Io.File, offset: usize) !macho.mach_header_64 {
var buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
- const nread = try file.preadAll(&buffer, offset);
+ const nread = try file.readPositionalAll(io, &buffer, offset);
if (nread != buffer.len) return error.InputOutput;
const hdr = @as(*align(1) const macho.mach_header_64, @ptrCast(&buffer)).*;
return hdr;
}
-pub fn readArMagic(file: std.fs.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 {
- const nread = try file.preadAll(buffer, offset);
+pub fn readArMagic(io: Io, file: Io.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 {
+ const nread = try file.readPositionalAll(io, buffer, offset);
if (nread != buffer.len) return error.InputOutput;
return buffer[0..Archive.SARMAG];
}
@@ -921,6 +934,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u
const comp = self.base.comp;
const gpa = comp.gpa;
+ const io = comp.io;
const abs_path = try std.fs.path.resolvePosix(gpa, &.{
comp.dirs.cwd,
@@ -930,7 +944,7 @@ fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u
errdefer gpa.free(abs_path);
const file = self.getFileHandle(handle_index);
- const stat = try file.stat();
+ const stat = try file.stat(io);
const mtime = stat.mtime.toSeconds();
const index: File.Index = @intCast(try self.files.addOne(gpa));
self.files.set(index, .{ .object = .{
@@ -1069,6 +1083,7 @@ fn isHoisted(self: *MachO, install_name: []const u8) bool {
/// TODO delete this, libraries must be instead resolved when instantiating the compilation pipeline
fn accessLibPath(
arena: Allocator,
+ io: Io,
test_path: *std.array_list.Managed(u8),
checked_paths: *std.array_list.Managed([]const u8),
search_dir: []const u8,
@@ -1080,7 +1095,7 @@ fn accessLibPath(
test_path.clearRetainingCapacity();
try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
try checked_paths.append(try arena.dupe(u8, test_path.items));
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -1092,6 +1107,7 @@ fn accessLibPath(
fn accessFrameworkPath(
arena: Allocator,
+ io: Io,
test_path: *std.array_list.Managed(u8),
checked_paths: *std.array_list.Managed([]const u8),
search_dir: []const u8,
@@ -1108,7 +1124,7 @@ fn accessFrameworkPath(
ext,
});
try checked_paths.append(try arena.dupe(u8, test_path.items));
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -1124,7 +1140,9 @@ fn parseDependentDylibs(self: *MachO) !void {
if (self.dylibs.items.len == 0) return;
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
const framework_dirs = self.framework_dirs;
// TODO delete this, directories must instead be resolved by the frontend
@@ -1165,14 +1183,14 @@ fn parseDependentDylibs(self: *MachO) !void {
// Framework
for (framework_dirs) |dir| {
test_path.clearRetainingCapacity();
- if (try accessFrameworkPath(arena, &test_path, &checked_paths, dir, stem)) break :full_path test_path.items;
+ if (try accessFrameworkPath(arena, io, &test_path, &checked_paths, dir, stem)) break :full_path test_path.items;
}
// Library
const lib_name = eatPrefix(stem, "lib") orelse stem;
for (lib_directories) |lib_directory| {
test_path.clearRetainingCapacity();
- if (try accessLibPath(arena, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items;
+ if (try accessLibPath(arena, io, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items;
}
}
@@ -1181,13 +1199,13 @@ fn parseDependentDylibs(self: *MachO) !void {
const path = if (existing_ext.len > 0) id.name[0 .. id.name.len - existing_ext.len] else id.name;
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
test_path.clearRetainingCapacity();
- if (self.base.comp.sysroot) |root| {
+ if (comp.sysroot) |root| {
try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
} else {
try test_path.print("{s}{s}", .{ path, ext });
}
try checked_paths.append(try arena.dupe(u8, test_path.items));
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@@ -1202,7 +1220,8 @@ fn parseDependentDylibs(self: *MachO) !void {
const rel_path = try fs.path.join(arena, &.{ prefix, path });
try checked_paths.append(rel_path);
var buffer: [fs.max_path_bytes]u8 = undefined;
- const full_path = fs.realpath(rel_path, &buffer) catch continue;
+ // TODO don't use realpath
+ const full_path = buffer[0 .. Io.Dir.realPathFileAbsolute(io, rel_path, &buffer) catch continue];
break :full_path try arena.dupe(u8, full_path);
}
} else if (eatPrefix(id.name, "@loader_path/")) |_| {
@@ -1215,8 +1234,9 @@ fn parseDependentDylibs(self: *MachO) !void {
try checked_paths.append(try arena.dupe(u8, id.name));
var buffer: [fs.max_path_bytes]u8 = undefined;
- if (fs.realpath(id.name, &buffer)) |full_path| {
- break :full_path try arena.dupe(u8, full_path);
+ // TODO don't use realpath
+ if (Io.Dir.realPathFileAbsolute(io, id.name, &buffer)) |full_path_n| {
+ break :full_path try arena.dupe(u8, buffer[0..full_path_n]);
} else |_| {
try self.reportMissingDependencyError(
self.getFile(dylib_index).?.dylib.getUmbrella(self).index,
@@ -1233,12 +1253,12 @@ fn parseDependentDylibs(self: *MachO) !void {
.path = Path.initCwd(full_path),
.weak = is_weak,
};
- const file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{});
+ const file = try lib.path.root_dir.handle.openFile(io, lib.path.sub_path, .{});
const fh = try self.addFileHandle(file);
const fat_arch = try self.parseFatFile(file, lib.path);
const offset = if (fat_arch) |fa| fa.offset else 0;
const file_index = file_index: {
- if (readMachHeader(file, offset) catch null) |h| blk: {
+ if (readMachHeader(io, file, offset) catch null) |h| blk: {
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_DYLIB => break :file_index try self.addDylib(lib, false, fh, offset),
@@ -3147,7 +3167,9 @@ fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.base.file.?.setEndPos(end);
+ const comp = self.base.comp;
+ const io = comp.io;
+ if (at_end) try self.base.file.?.setLength(io, end);
return null;
}
@@ -3232,21 +3254,36 @@ pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32)
}
pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
- const file = self.base.file.?;
- const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
- if (amt != size) return error.InputOutput;
+ return self.base.copyRangeAll(old_offset, new_offset, size);
}
-/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy.
+/// Like copyRangeAll but also ensures the source region is zeroed out after copy.
/// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader.
fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
- const gpa = self.base.comp.gpa;
- try self.copyRangeAll(old_offset, new_offset, size);
+ const comp = self.base.comp;
+ const io = comp.io;
+ const file = self.base.file.?;
+ var write_buffer: [2048]u8 = undefined;
+ var file_reader = file.reader(io, &.{});
+ file_reader.pos = old_offset;
+ var file_writer = file.writer(io, &write_buffer);
+ file_writer.pos = new_offset;
const size_u = math.cast(usize, size) orelse return error.Overflow;
- const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here.
- defer gpa.free(zeroes);
- @memset(zeroes, 0);
- try self.base.file.?.pwriteAll(zeroes, old_offset);
+ const n = file_writer.interface.sendFileAll(&file_reader, .limited(size_u)) catch |err| switch (err) {
+ error.ReadFailed => return file_reader.err.?,
+ error.WriteFailed => return file_writer.err.?,
+ };
+ assert(n == size_u);
+ file_writer.seekTo(old_offset) catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ else => |e| return e,
+ };
+ file_writer.interface.splatByteAll(0, size_u) catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ };
+ file_writer.interface.flush() catch |err| switch (err) {
+ error.WriteFailed => return file_writer.err.?,
+ };
}
const InitMetadataOptions = struct {
@@ -3257,8 +3294,10 @@ const InitMetadataOptions = struct {
};
pub fn closeDebugInfo(self: *MachO) bool {
+ const comp = self.base.comp;
+ const io = comp.io;
const d_sym = &(self.d_sym orelse return false);
- d_sym.file.?.close();
+ d_sym.file.?.close(io);
d_sym.file = null;
return true;
}
@@ -3269,7 +3308,9 @@ pub fn reopenDebugInfo(self: *MachO) !void {
assert(!self.base.comp.config.use_llvm);
assert(self.base.comp.config.debug_format == .dwarf);
- const gpa = self.base.comp.gpa;
+ const comp = self.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const sep = fs.path.sep_str;
const d_sym_path = try std.fmt.allocPrint(
gpa,
@@ -3278,10 +3319,10 @@ pub fn reopenDebugInfo(self: *MachO) !void {
);
defer gpa.free(d_sym_path);
- var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{});
- defer d_sym_bundle.close();
+ var d_sym_bundle = try self.base.emit.root_dir.handle.createDirPathOpen(io, d_sym_path, .{});
+ defer d_sym_bundle.close(io);
- self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{
+ self.d_sym.?.file = try d_sym_bundle.createFile(io, fs.path.basename(self.base.emit.sub_path), .{
.truncate = false,
.read = true,
});
@@ -3289,6 +3330,10 @@ pub fn reopenDebugInfo(self: *MachO) !void {
// TODO: move to ZigObject
fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+
if (!self.base.isRelocatable()) {
const base_vmaddr = blk: {
const pagezero_size = self.pagezero_size orelse default_pagezero_size;
@@ -3343,7 +3388,11 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
if (options.zo.dwarf) |*dwarf| {
// Create dSYM bundle.
log.debug("creating {s}.dSYM bundle", .{options.emit.sub_path});
- self.d_sym = .{ .allocator = self.base.comp.gpa, .file = null };
+ self.d_sym = .{
+ .io = io,
+ .allocator = gpa,
+ .file = null,
+ };
try self.reopenDebugInfo();
try self.d_sym.?.initMetadata(self);
try dwarf.initMetadata();
@@ -3463,6 +3512,9 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
const seg_id = self.sections.items(.segment_id)[sect_index];
const seg = &self.segments.items[seg_id];
+ const comp = self.base.comp;
+ const io = comp.io;
+
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (needed_size > allocated_size) {
@@ -3484,7 +3536,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
sect.offset = @intCast(new_offset);
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
+ try self.base.file.?.setLength(io, sect.offset + needed_size);
}
seg.filesize = needed_size;
}
@@ -3506,6 +3558,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
}
fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
+ const comp = self.base.comp;
+ const io = comp.io;
const sect = &self.sections.items(.header)[sect_index];
if (!sect.isZerofill()) {
@@ -3533,7 +3587,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
+ try self.base.file.?.setLength(io, sect.offset + needed_size);
}
}
sect.size = needed_size;
@@ -3567,11 +3621,11 @@ pub fn getTarget(self: *const MachO) *const std.Target {
/// into a new inode, remove the original file, and rename the copy to match
/// the original file. This is super messy, but there doesn't seem any other
/// way to please the XNU.
-pub fn invalidateKernelCache(dir: fs.Dir, sub_path: []const u8) !void {
+pub fn invalidateKernelCache(io: Io, dir: Io.Dir, sub_path: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
if (builtin.target.os.tag.isDarwin() and builtin.target.cpu.arch == .aarch64) {
- try dir.copyFile(sub_path, dir, sub_path, .{});
+ try dir.copyFile(sub_path, dir, sub_path, io, .{});
}
}
@@ -3762,7 +3816,7 @@ pub fn getInternalObject(self: *MachO) ?*InternalObject {
return self.getFile(index).?.internal;
}
-pub fn addFileHandle(self: *MachO, file: fs.File) !File.HandleIndex {
+pub fn addFileHandle(self: *MachO, file: Io.File) !File.HandleIndex {
const gpa = self.base.comp.gpa;
const index: File.HandleIndex = @intCast(self.file_handles.items.len);
const fh = try self.file_handles.addOne(gpa);
@@ -4333,11 +4387,13 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
+ const io = comp.io;
+
const sdk_dir = switch (sdk_layout) {
.sdk => comp.sysroot.?,
.vendored => fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "libc", "darwin" }) catch return null,
};
- if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| {
+ if (readSdkVersionFromSettings(arena, io, sdk_dir)) |ver| {
return parseSdkVersion(ver);
} else |_| {
// Read from settings should always succeed when vendored.
@@ -4360,9 +4416,9 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi
// Official Apple SDKs ship with a `SDKSettings.json` located at the top of SDK fs layout.
// Use property `MinimalDisplayName` to determine version.
// The file/property is also available with vendored libc.
-fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 {
+fn readSdkVersionFromSettings(arena: Allocator, io: Io, dir: []const u8) ![]const u8 {
const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" });
- const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16)));
+ const contents = try Io.Dir.cwd().readFileAlloc(io, sdk_path, arena, .limited(std.math.maxInt(u16)));
const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{});
if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string;
return error.SdkVersionFailure;
@@ -5324,18 +5380,18 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool {
pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = macho_file.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
- return diags.fail("failed to write: {s}", .{@errorName(err)});
- };
+ macho_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err|
+ return diags.fail("failed to write: {t}", .{err});
}
-pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void {
+pub fn setLength(macho_file: *MachO, length: u64) error{LinkFailure}!void {
const comp = macho_file.base.comp;
+ const io = comp.io;
const diags = &comp.link_diags;
- macho_file.base.file.?.setEndPos(length) catch |err| {
- return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
- };
+ macho_file.base.file.?.setLength(io, length) catch |err|
+ return diags.fail("failed to set file end pos: {t}", .{err});
}
pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T {
@@ -5367,10 +5423,11 @@ const max_distance = (1 << (jump_bits - 1));
const max_allowed_distance = max_distance - 0x500_000;
const MachO = @This();
-
-const std = @import("std");
const build_options = @import("build_options");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
@@ -5380,6 +5437,11 @@ const math = std.math;
const mem = std.mem;
const meta = std.meta;
const Writer = std.Io.Writer;
+const AtomicBool = std.atomic.Value(bool);
+const Cache = std.Build.Cache;
+const Hash = std.hash.Wyhash;
+const Md5 = std.crypto.hash.Md5;
+const Allocator = std.mem.Allocator;
const aarch64 = codegen.aarch64.encoding;
const bind = @import("MachO/dyld_info/bind.zig");
@@ -5397,11 +5459,8 @@ const trace = @import("../tracy.zig").trace;
const synthetic = @import("MachO/synthetic.zig");
const Alignment = Atom.Alignment;
-const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
-const AtomicBool = std.atomic.Value(bool);
const Bind = bind.Bind;
-const Cache = std.Build.Cache;
const CodeSignature = @import("MachO/CodeSignature.zig");
const Compilation = @import("../Compilation.zig");
const DataInCode = synthetic.DataInCode;
@@ -5411,14 +5470,12 @@ const ExportTrie = @import("MachO/dyld_info/Trie.zig");
const Path = Cache.Path;
const File = @import("MachO/file.zig").File;
const GotSection = synthetic.GotSection;
-const Hash = std.hash.Wyhash;
const Indsymtab = synthetic.Indsymtab;
const InternalObject = @import("MachO/InternalObject.zig");
const ObjcStubsSection = synthetic.ObjcStubsSection;
const Object = @import("MachO/Object.zig");
const LazyBind = bind.LazyBind;
const LaSymbolPtrSection = synthetic.LaSymbolPtrSection;
-const Md5 = std.crypto.hash.Md5;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Rebase = @import("MachO/dyld_info/Rebase.zig");
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index d1962412c4..54c00e33ee 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -6,6 +6,7 @@ pub fn deinit(self: *Archive, allocator: Allocator) void {
pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File.HandleIndex, fat_arch: ?fat.Arch) !void {
const comp = macho_file.base.comp;
+ const io = comp.io;
const gpa = comp.gpa;
const diags = &comp.link_diags;
@@ -14,7 +15,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
const handle = macho_file.getFileHandle(handle_index);
const offset = if (fat_arch) |ar| ar.offset else 0;
- const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat()).size;
+ const end_pos = if (fat_arch) |ar| offset + ar.size else (try handle.stat(io)).size;
var pos: usize = offset + SARMAG;
while (true) {
@@ -23,7 +24,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
var hdr_buffer: [@sizeOf(ar_hdr)]u8 = undefined;
{
- const amt = try handle.preadAll(&hdr_buffer, pos);
+ const amt = try handle.readPositionalAll(io, &hdr_buffer, pos);
if (amt != @sizeOf(ar_hdr)) return error.InputOutput;
}
const hdr = @as(*align(1) const ar_hdr, @ptrCast(&hdr_buffer)).*;
@@ -41,7 +42,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
if (try hdr.nameLength()) |len| {
hdr_size -= len;
const buf = try arena.allocator().alloc(u8, len);
- const amt = try handle.preadAll(buf, pos);
+ const amt = try handle.readPositionalAll(io, buf, pos);
if (amt != len) return error.InputOutput;
pos += len;
const actual_len = mem.indexOfScalar(u8, buf, @as(u8, 0)) orelse len;
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 5bded3b9e3..0955c823b8 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -1,20 +1,28 @@
const CodeSignature = @This();
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
const macho = std.macho;
const mem = std.mem;
const testing = std.testing;
+const Sha256 = std.crypto.hash.sha2.Sha256;
+const Allocator = std.mem.Allocator;
+
const trace = @import("../../tracy.zig").trace;
-const Allocator = mem.Allocator;
-const Hasher = @import("hasher.zig").ParallelHasher;
+const ParallelHasher = @import("hasher.zig").ParallelHasher;
const MachO = @import("../MachO.zig");
-const Sha256 = std.crypto.hash.sha2.Sha256;
const hash_size = Sha256.digest_length;
+page_size: u16,
+code_directory: CodeDirectory,
+requirements: ?Requirements = null,
+entitlements: ?Entitlements = null,
+signature: ?Signature = null,
+
const Blob = union(enum) {
code_directory: *CodeDirectory,
requirements: *Requirements,
@@ -218,12 +226,6 @@ const Signature = struct {
}
};
-page_size: u16,
-code_directory: CodeDirectory,
-requirements: ?Requirements = null,
-entitlements: ?Entitlements = null,
-signature: ?Signature = null,
-
pub fn init(page_size: u16) CodeSignature {
return .{
.page_size = page_size,
@@ -244,13 +246,13 @@ pub fn deinit(self: *CodeSignature, allocator: Allocator) void {
}
}
-pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void {
- const inner = try fs.cwd().readFileAlloc(path, allocator, .limited(std.math.maxInt(u32)));
+pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, io: Io, path: []const u8) !void {
+ const inner = try Io.Dir.cwd().readFileAlloc(io, path, allocator, .limited(std.math.maxInt(u32)));
self.entitlements = .{ .inner = inner };
}
pub const WriteOpts = struct {
- file: fs.File,
+ file: Io.File,
exec_seg_base: u64,
exec_seg_limit: u64,
file_size: u32,
@@ -266,7 +268,9 @@ pub fn writeAdhocSignature(
const tracy = trace(@src());
defer tracy.end();
- const allocator = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
var header: macho.SuperBlob = .{
.magic = macho.CSMAGIC_EMBEDDED_SIGNATURE,
@@ -274,7 +278,7 @@ pub fn writeAdhocSignature(
.count = 0,
};
- var blobs = std.array_list.Managed(Blob).init(allocator);
+ var blobs = std.array_list.Managed(Blob).init(gpa);
defer blobs.deinit();
self.code_directory.inner.execSegBase = opts.exec_seg_base;
@@ -284,13 +288,12 @@ pub fn writeAdhocSignature(
const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size));
- try self.code_directory.code_slots.ensureTotalCapacityPrecise(allocator, total_pages);
+ try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
self.code_directory.code_slots.items.len = total_pages;
self.code_directory.inner.nCodeSlots = total_pages;
// Calculate hash for each page (in file) and write it to the buffer
- var hasher = Hasher(Sha256){ .allocator = allocator, .io = macho_file.base.comp.io };
- try hasher.hash(opts.file, self.code_directory.code_slots.items, .{
+ try ParallelHasher(Sha256).hash(gpa, io, opts.file, self.code_directory.code_slots.items, .{
.chunk_size = self.page_size,
.max_file_size = opts.file_size,
});
@@ -302,7 +305,7 @@ pub fn writeAdhocSignature(
var hash: [hash_size]u8 = undefined;
if (self.requirements) |*req| {
- var a: std.Io.Writer.Allocating = .init(allocator);
+ var a: std.Io.Writer.Allocating = .init(gpa);
defer a.deinit();
try req.write(&a.writer);
Sha256.hash(a.written(), &hash, .{});
@@ -314,7 +317,7 @@ pub fn writeAdhocSignature(
}
if (self.entitlements) |*ents| {
- var a: std.Io.Writer.Allocating = .init(allocator);
+ var a: std.Io.Writer.Allocating = .init(gpa);
defer a.deinit();
try ents.write(&a.writer);
Sha256.hash(a.written(), &hash, .{});
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 5d7b9b88c3..3e723bd9d7 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -1,5 +1,28 @@
+const DebugSymbols = @This();
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const fs = std.fs;
+const log = std.log.scoped(.link_dsym);
+const macho = std.macho;
+const makeStaticString = MachO.makeStaticString;
+const math = std.math;
+const mem = std.mem;
+const Writer = std.Io.Writer;
+const Allocator = std.mem.Allocator;
+
+const link = @import("../../link.zig");
+const MachO = @import("../MachO.zig");
+const StringTable = @import("../StringTable.zig");
+const Type = @import("../../Type.zig");
+const trace = @import("../../tracy.zig").trace;
+const load_commands = @import("load_commands.zig");
+const padToIdeal = MachO.padToIdeal;
+
+io: Io,
allocator: Allocator,
-file: ?fs.File,
+file: ?Io.File,
symtab_cmd: macho.symtab_command = .{},
uuid_cmd: macho.uuid_command = .{ .uuid = [_]u8{0} ** 16 },
@@ -102,6 +125,7 @@ pub fn growSection(
requires_file_copy: bool,
macho_file: *MachO,
) !void {
+ const io = self.io;
const sect = self.getSectionPtr(sect_index);
const allocated_size = self.allocatedSize(sect.offset);
@@ -111,25 +135,17 @@ pub fn growSection(
const new_offset = try self.findFreeSpace(needed_size, 1);
log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{
- sect.sectName(),
- existing_size,
- sect.offset,
- new_offset,
+ sect.sectName(), existing_size, sect.offset, new_offset,
});
if (requires_file_copy) {
- const amt = try self.file.?.copyRangeAll(
- sect.offset,
- self.file.?,
- new_offset,
- existing_size,
- );
- if (amt != existing_size) return error.InputOutput;
+ const file = self.file.?;
+ try link.File.copyRangeAll2(io, file, file, sect.offset, new_offset, existing_size);
}
sect.offset = @intCast(new_offset);
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.file.?.setEndPos(sect.offset + needed_size);
+ try self.file.?.setLength(io, sect.offset + needed_size);
}
sect.size = needed_size;
@@ -153,6 +169,7 @@ pub fn markDirty(self: *DebugSymbols, sect_index: u8, macho_file: *MachO) void {
}
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 {
+ const io = self.io;
var at_end = true;
const end = start + padToIdeal(size);
@@ -165,7 +182,7 @@ fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.file.?.setEndPos(end);
+ if (at_end) try self.file.?.setLength(io, end);
return null;
}
@@ -179,6 +196,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) !u64
}
pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
+ const io = self.io;
const zo = macho_file.getZigObject().?;
for (self.relocs.items) |*reloc| {
const sym = zo.symbols.items[reloc.target];
@@ -190,12 +208,9 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
const sect = &self.sections.items[self.debug_info_section_index.?];
const file_offset = sect.offset + reloc.offset;
log.debug("resolving relocation: {d}@{x} ('{s}') at offset {x}", .{
- reloc.target,
- addr,
- sym_name,
- file_offset,
+ reloc.target, addr, sym_name, file_offset,
});
- try self.file.?.pwriteAll(mem.asBytes(&addr), file_offset);
+ try self.file.?.writePositionalAll(io, mem.asBytes(&addr), file_offset);
}
self.finalizeDwarfSegment(macho_file);
@@ -208,7 +223,8 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
pub fn deinit(self: *DebugSymbols) void {
const gpa = self.allocator;
- if (self.file) |file| file.close();
+ const io = self.io;
+ if (self.file) |file| file.close(io);
self.segments.deinit(gpa);
self.sections.deinit(gpa);
self.relocs.deinit(gpa);
@@ -268,6 +284,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
}
fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, usize } {
+ const io = self.io;
const gpa = self.allocator;
const needed_size = load_commands.calcLoadCommandsSizeDsym(macho_file, self);
const buffer = try gpa.alloc(u8, needed_size);
@@ -319,12 +336,13 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
assert(writer.end == needed_size);
- try self.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
+ try self.file.?.writePositionalAll(io, buffer, @sizeOf(macho.mach_header_64));
return .{ ncmds, buffer.len };
}
fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
+ const io = self.io;
var header: macho.mach_header_64 = .{};
header.filetype = macho.MH_DSYM;
@@ -345,7 +363,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds
log.debug("writing Mach-O header {}", .{header});
- try self.file.?.pwriteAll(mem.asBytes(&header), 0);
+ try self.file.?.writePositionalAll(io, mem.asBytes(&header), 0);
}
fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
@@ -380,6 +398,8 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void {
pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 {
const tracy = trace(@src());
defer tracy.end();
+
+ const io = self.io;
const gpa = self.allocator;
const cmd = &self.symtab_cmd;
cmd.nsyms = macho_file.symtab_cmd.nsyms;
@@ -403,15 +423,16 @@ pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 {
internal.writeSymtab(macho_file, self);
}
- try self.file.?.pwriteAll(@ptrCast(self.symtab.items), cmd.symoff);
+ try self.file.?.writePositionalAll(io, @ptrCast(self.symtab.items), cmd.symoff);
return off + cmd.nsyms * @sizeOf(macho.nlist_64);
}
pub fn writeStrtab(self: *DebugSymbols, off: u32) !u32 {
+ const io = self.io;
const cmd = &self.symtab_cmd;
cmd.stroff = off;
- try self.file.?.pwriteAll(self.strtab.items, cmd.stroff);
+ try self.file.?.writePositionalAll(io, self.strtab.items, cmd.stroff);
return off + cmd.strsize;
}
@@ -443,25 +464,3 @@ pub fn getSection(self: DebugSymbols, sect: u8) macho.section_64 {
assert(sect < self.sections.items.len);
return self.sections.items[sect];
}
-
-const DebugSymbols = @This();
-
-const std = @import("std");
-const build_options = @import("build_options");
-const assert = std.debug.assert;
-const fs = std.fs;
-const link = @import("../../link.zig");
-const load_commands = @import("load_commands.zig");
-const log = std.log.scoped(.link_dsym);
-const macho = std.macho;
-const makeStaticString = MachO.makeStaticString;
-const math = std.math;
-const mem = std.mem;
-const padToIdeal = MachO.padToIdeal;
-const trace = @import("../../tracy.zig").trace;
-const Writer = std.Io.Writer;
-
-const Allocator = mem.Allocator;
-const MachO = @import("../MachO.zig");
-const StringTable = @import("../StringTable.zig");
-const Type = @import("../../Type.zig");
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 69c64b6717..638630b608 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -57,7 +57,9 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
const offset = self.offset;
@@ -65,7 +67,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
- const amt = try file.preadAll(&header_buffer, offset);
+ const amt = try file.readPositionalAll(io, &header_buffer, offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
const header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@@ -86,7 +88,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, header.sizeofcmds);
defer gpa.free(lc_buffer);
{
- const amt = try file.preadAll(lc_buffer, offset + @sizeOf(macho.mach_header_64));
+ const amt = try file.readPositionalAll(io, lc_buffer, offset + @sizeOf(macho.mach_header_64));
if (amt != lc_buffer.len) return error.InputOutput;
}
@@ -103,7 +105,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const dyld_cmd = cmd.cast(macho.dyld_info_command).?;
const data = try gpa.alloc(u8, dyld_cmd.export_size);
defer gpa.free(data);
- const amt = try file.preadAll(data, dyld_cmd.export_off + offset);
+ const amt = try file.readPositionalAll(io, data, dyld_cmd.export_off + offset);
if (amt != data.len) return error.InputOutput;
try self.parseTrie(data, macho_file);
},
@@ -111,7 +113,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const ld_cmd = cmd.cast(macho.linkedit_data_command).?;
const data = try gpa.alloc(u8, ld_cmd.datasize);
defer gpa.free(data);
- const amt = try file.preadAll(data, ld_cmd.dataoff + offset);
+ const amt = try file.readPositionalAll(io, data, ld_cmd.dataoff + offset);
if (amt != data.len) return error.InputOutput;
try self.parseTrie(data, macho_file);
},
@@ -238,13 +240,15 @@ fn parseTbd(self: *Dylib, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
log.debug("parsing dylib from stub: {f}", .{self.path});
const file = macho_file.getFileHandle(self.file_handle);
- var lib_stub = LibStub.loadFromFile(gpa, file) catch |err| {
- try macho_file.reportParseError2(self.index, "failed to parse TBD file: {s}", .{@errorName(err)});
+ var lib_stub = LibStub.loadFromFile(gpa, io, file) catch |err| {
+ try macho_file.reportParseError2(self.index, "failed to parse TBD file: {t}", .{err});
return error.MalformedTbd;
};
defer lib_stub.deinit();
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index 5fc77fe763..b9def4568d 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -1,3 +1,30 @@
+const Object = @This();
+
+const trace = @import("../../tracy.zig").trace;
+const Archive = @import("Archive.zig");
+const Atom = @import("Atom.zig");
+const Dwarf = @import("Dwarf.zig");
+const File = @import("file.zig").File;
+const MachO = @import("../MachO.zig");
+const Relocation = @import("Relocation.zig");
+const Symbol = @import("Symbol.zig");
+const UnwindInfo = @import("UnwindInfo.zig");
+
+const std = @import("std");
+const Io = std.Io;
+const Writer = std.Io.Writer;
+const assert = std.debug.assert;
+const log = std.log.scoped(.link);
+const macho = std.macho;
+const LoadCommandIterator = macho.LoadCommandIterator;
+const math = std.math;
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+
+const eh_frame = @import("eh_frame.zig");
+const Cie = eh_frame.Cie;
+const Fde = eh_frame.Fde;
+
/// Non-zero for fat object files or archives
offset: u64,
/// If `in_archive` is not `null`, this is the basename of the object in the archive. Otherwise,
@@ -75,7 +102,9 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
log.debug("parsing {f}", .{self.fmtPath()});
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const handle = macho_file.getFileHandle(self.file_handle);
const cpu_arch = macho_file.getTarget().cpu.arch;
@@ -84,7 +113,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
- const amt = try handle.preadAll(&header_buffer, self.offset);
+ const amt = try handle.readPositionalAll(io, &header_buffer, self.offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@@ -105,7 +134,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds);
defer gpa.free(lc_buffer);
{
- const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
+ const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
if (amt != self.header.?.sizeofcmds) return error.InputOutput;
}
@@ -129,14 +158,14 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const cmd = lc.cast(macho.symtab_command).?;
try self.strtab.resize(gpa, cmd.strsize);
{
- const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset);
+ const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset);
if (amt != self.strtab.items.len) return error.InputOutput;
}
const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64));
defer gpa.free(symtab_buffer);
{
- const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset);
+ const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset);
if (amt != symtab_buffer.len) return error.InputOutput;
}
const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms];
@@ -154,7 +183,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const buffer = try gpa.alloc(u8, cmd.datasize);
defer gpa.free(buffer);
{
- const amt = try handle.preadAll(buffer, self.offset + cmd.dataoff);
+ const amt = try handle.readPositionalAll(io, buffer, self.offset + cmd.dataoff);
if (amt != buffer.len) return error.InputOutput;
}
const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry));
@@ -440,12 +469,14 @@ fn initCstringLiterals(self: *Object, allocator: Allocator, file: File.Handle, m
const tracy = trace(@src());
defer tracy.end();
+ const comp = macho_file.base.comp;
+ const io = comp.io;
const slice = self.sections.slice();
for (slice.items(.header), 0..) |sect, n_sect| {
if (!isCstringLiteral(sect)) continue;
- const data = try self.readSectionData(allocator, file, @intCast(n_sect));
+ const data = try self.readSectionData(allocator, io, file, @intCast(n_sect));
defer allocator.free(data);
var count: u32 = 0;
@@ -628,7 +659,9 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
var buffer = std.array_list.Managed(u8).init(gpa);
@@ -647,7 +680,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.subsections), 0..) |header, subs, n_sect| {
if (isCstringLiteral(header) or isFixedSizeLiteral(header)) {
- const data = try self.readSectionData(gpa, file, @intCast(n_sect));
+ const data = try self.readSectionData(gpa, io, file, @intCast(n_sect));
defer gpa.free(data);
for (subs.items) |sub| {
@@ -682,7 +715,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
buffer.resize(target_size) catch unreachable;
const gop = try sections_data.getOrPut(target.n_sect);
if (!gop.found_existing) {
- gop.value_ptr.* = try self.readSectionData(gpa, file, @intCast(target.n_sect));
+ gop.value_ptr.* = try self.readSectionData(gpa, io, file, @intCast(target.n_sect));
}
const data = gop.value_ptr.*;
const target_off = try macho_file.cast(usize, target.off);
@@ -1037,9 +1070,11 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi
const sect = slice.items(.header)[sect_id];
const relocs = slice.items(.relocs)[sect_id];
+ const comp = macho_file.base.comp;
+ const io = comp.io;
const size = try macho_file.cast(usize, sect.size);
try self.eh_frame_data.resize(allocator, size);
- const amt = try file.preadAll(self.eh_frame_data.items, sect.offset + self.offset);
+ const amt = try file.readPositionalAll(io, self.eh_frame_data.items, sect.offset + self.offset);
if (amt != self.eh_frame_data.items.len) return error.InputOutput;
// Check for non-personality relocs in FDEs and apply them
@@ -1138,8 +1173,10 @@ fn initUnwindRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fil
}
};
+ const comp = macho_file.base.comp;
+ const io = comp.io;
const header = self.sections.items(.header)[sect_id];
- const data = try self.readSectionData(allocator, file, sect_id);
+ const data = try self.readSectionData(allocator, io, file, sect_id);
defer allocator.free(data);
const nrecs = @divExact(data.len, @sizeOf(macho.compact_unwind_entry));
@@ -1348,7 +1385,9 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
var dwarf: Dwarf = .{};
@@ -1358,18 +1397,18 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void {
const n_sect: u8 = @intCast(index);
if (sect.attrs() & macho.S_ATTR_DEBUG == 0) continue;
if (mem.eql(u8, sect.sectName(), "__debug_info")) {
- dwarf.debug_info = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_info = try self.readSectionData(gpa, io, file, n_sect);
}
if (mem.eql(u8, sect.sectName(), "__debug_abbrev")) {
- dwarf.debug_abbrev = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_abbrev = try self.readSectionData(gpa, io, file, n_sect);
}
if (mem.eql(u8, sect.sectName(), "__debug_str")) {
- dwarf.debug_str = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_str = try self.readSectionData(gpa, io, file, n_sect);
}
// __debug_str_offs[ets] section is a new addition in DWARFv5 and is generally
// required in order to correctly parse strings.
if (mem.eql(u8, sect.sectName(), "__debug_str_offs")) {
- dwarf.debug_str_offsets = try self.readSectionData(gpa, file, n_sect);
+ dwarf.debug_str_offsets = try self.readSectionData(gpa, io, file, n_sect);
}
}
@@ -1611,12 +1650,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const handle = macho_file.getFileHandle(self.file_handle);
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
- const amt = try handle.preadAll(&header_buffer, self.offset);
+ const amt = try handle.readPositionalAll(io, &header_buffer, self.offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@@ -1637,7 +1678,7 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds);
defer gpa.free(lc_buffer);
{
- const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
+ const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
if (amt != self.header.?.sizeofcmds) return error.InputOutput;
}
@@ -1647,14 +1688,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const cmd = lc.cast(macho.symtab_command).?;
try self.strtab.resize(gpa, cmd.strsize);
{
- const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset);
+ const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset);
if (amt != self.strtab.items.len) return error.InputOutput;
}
const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64));
defer gpa.free(symtab_buffer);
{
- const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset);
+ const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset);
if (amt != symtab_buffer.len) return error.InputOutput;
}
const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms];
@@ -1689,13 +1730,15 @@ pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, macho_file: *M
}
pub fn updateArSize(self: *Object, macho_file: *MachO) !void {
+ const comp = macho_file.base.comp;
+ const io = comp.io;
self.output_ar_state.size = if (self.in_archive) |ar| ar.size else size: {
const file = macho_file.getFileHandle(self.file_handle);
- break :size (try file.stat()).size;
+ break :size (try file.stat(io)).size;
};
}
-pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void {
+pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: *Writer) !void {
// Header
const size = try macho_file.cast(usize, self.output_ar_state.size);
const basename = std.fs.path.basename(self.path);
@@ -1703,10 +1746,12 @@ pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writ
// Data
const file = macho_file.getFileHandle(self.file_handle);
// TODO try using copyRangeAll
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
- const amt = try file.preadAll(data, self.offset);
+ const amt = try file.readPositionalAll(io, data, self.offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
@@ -1811,7 +1856,9 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const headers = self.sections.items(.header);
const sections_data = try gpa.alloc([]const u8, headers.len);
defer {
@@ -1827,7 +1874,7 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
if (header.isZerofill()) continue;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
- const amt = try file.preadAll(data, header.offset + self.offset);
+ const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
sections_data[n_sect] = data;
}
@@ -1850,7 +1897,9 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const headers = self.sections.items(.header);
const sections_data = try gpa.alloc([]const u8, headers.len);
defer {
@@ -1866,7 +1915,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
if (header.isZerofill()) continue;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
- const amt = try file.preadAll(data, header.offset + self.offset);
+ const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
sections_data[n_sect] = data;
}
@@ -2482,11 +2531,11 @@ pub fn getUnwindRecord(self: *Object, index: UnwindInfo.Record.Index) *UnwindInf
}
/// Caller owns the memory.
-pub fn readSectionData(self: Object, allocator: Allocator, file: File.Handle, n_sect: u8) ![]u8 {
+pub fn readSectionData(self: Object, allocator: Allocator, io: Io, file: File.Handle, n_sect: u8) ![]u8 {
const header = self.sections.items(.header)[n_sect];
const size = math.cast(usize, header.size) orelse return error.Overflow;
const data = try allocator.alloc(u8, size);
- const amt = try file.preadAll(data, header.offset + self.offset);
+ const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
errdefer allocator.free(data);
if (amt != data.len) return error.InputOutput;
return data;
@@ -2710,15 +2759,17 @@ const x86_64 = struct {
handle: File.Handle,
macho_file: *MachO,
) !void {
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info));
defer gpa.free(relocs_buffer);
- const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset);
+ const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset);
if (amt != relocs_buffer.len) return error.InputOutput;
const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc];
- const code = try self.readSectionData(gpa, handle, n_sect);
+ const code = try self.readSectionData(gpa, io, handle, n_sect);
defer gpa.free(code);
try out.ensureTotalCapacityPrecise(gpa, relocs.len);
@@ -2877,15 +2928,17 @@ const aarch64 = struct {
handle: File.Handle,
macho_file: *MachO,
) !void {
- const gpa = macho_file.base.comp.gpa;
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+ const gpa = comp.gpa;
const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info));
defer gpa.free(relocs_buffer);
- const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset);
+ const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset);
if (amt != relocs_buffer.len) return error.InputOutput;
const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc];
- const code = try self.readSectionData(gpa, handle, n_sect);
+ const code = try self.readSectionData(gpa, io, handle, n_sect);
defer gpa.free(code);
try out.ensureTotalCapacityPrecise(gpa, relocs.len);
@@ -3061,27 +3114,3 @@ const aarch64 = struct {
}
}
};
-
-const std = @import("std");
-const assert = std.debug.assert;
-const log = std.log.scoped(.link);
-const macho = std.macho;
-const math = std.math;
-const mem = std.mem;
-const Allocator = std.mem.Allocator;
-const Writer = std.Io.Writer;
-
-const eh_frame = @import("eh_frame.zig");
-const trace = @import("../../tracy.zig").trace;
-const Archive = @import("Archive.zig");
-const Atom = @import("Atom.zig");
-const Cie = eh_frame.Cie;
-const Dwarf = @import("Dwarf.zig");
-const Fde = eh_frame.Fde;
-const File = @import("file.zig").File;
-const LoadCommandIterator = macho.LoadCommandIterator;
-const MachO = @import("../MachO.zig");
-const Object = @This();
-const Relocation = @import("Relocation.zig");
-const Symbol = @import("Symbol.zig");
-const UnwindInfo = @import("UnwindInfo.zig");
diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig
index 5a4ea65790..49555c2746 100644
--- a/src/link/MachO/ZigObject.zig
+++ b/src/link/MachO/ZigObject.zig
@@ -171,6 +171,9 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8
const isec = atom.getInputSection(macho_file);
assert(!isec.isZerofill());
+ const comp = macho_file.base.comp;
+ const io = comp.io;
+
switch (isec.type()) {
macho.S_THREAD_LOCAL_REGULAR => {
const tlv = self.tlv_initializers.get(atom.atom_index).?;
@@ -182,7 +185,7 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8
else => {
const sect = macho_file.sections.items(.header)[atom.out_n_sect];
const file_offset = sect.offset + atom.value;
- const amt = try macho_file.base.file.?.preadAll(buffer, file_offset);
+ const amt = try macho_file.base.file.?.readPositionalAll(io, buffer, file_offset);
if (amt != buffer.len) return error.InputOutput;
},
}
@@ -290,12 +293,14 @@ pub fn dedupLiterals(self: *ZigObject, lp: MachO.LiteralPool, macho_file: *MachO
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, macho_file: *MachO) !void {
- const diags = &macho_file.base.comp.link_diags;
+ const comp = macho_file.base.comp;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const diags = &comp.link_diags;
// Size of the output object file is always the offset + size of the strtab
const size = macho_file.symtab_cmd.stroff + macho_file.symtab_cmd.strsize;
- const gpa = macho_file.base.comp.gpa;
try self.data.resize(gpa, size);
- const amt = macho_file.base.file.?.preadAll(self.data.items, 0) catch |err|
+ const amt = macho_file.base.file.?.readPositionalAll(io, self.data.items, 0) catch |err|
return diags.fail("failed to read output file: {s}", .{@errorName(err)});
if (amt != size)
return diags.fail("unexpected EOF reading from output file", .{});
@@ -945,6 +950,8 @@ fn updateNavCode(
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
+ const comp = zcu.comp;
+ const io = comp.io;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@@ -1012,8 +1019,8 @@ fn updateNavCode(
if (!sect.isZerofill()) {
const file_offset = sect.offset + atom.value;
- macho_file.base.file.?.pwriteAll(code, file_offset) catch |err|
- return macho_file.base.cgFail(nav_index, "failed to write output file: {s}", .{@errorName(err)});
+ macho_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
+ return macho_file.base.cgFail(nav_index, "failed to write output file: {t}", .{err});
}
}
@@ -1493,7 +1500,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, macho_file: *MachO) !void {
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
else => @panic("TODO implement write trampoline for this CPU arch"),
};
- try macho_file.base.file.?.pwriteAll(out, fileoff);
+ return macho_file.pwriteAll(out, fileoff);
}
pub fn getOrCreateMetadataForNav(
diff --git a/src/link/MachO/fat.zig b/src/link/MachO/fat.zig
index 7772f7a4de..73b9c626e8 100644
--- a/src/link/MachO/fat.zig
+++ b/src/link/MachO/fat.zig
@@ -1,20 +1,22 @@
+const builtin = @import("builtin");
+const native_endian = builtin.target.cpu.arch.endian();
+
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
-const builtin = @import("builtin");
const log = std.log.scoped(.macho);
const macho = std.macho;
const mem = std.mem;
-const native_endian = builtin.target.cpu.arch.endian();
const MachO = @import("../MachO.zig");
-pub fn readFatHeader(file: std.fs.File) !macho.fat_header {
- return readFatHeaderGeneric(macho.fat_header, file, 0);
+pub fn readFatHeader(io: Io, file: Io.File) !macho.fat_header {
+ return readFatHeaderGeneric(io, macho.fat_header, file, 0);
}
-fn readFatHeaderGeneric(comptime Hdr: type, file: std.fs.File, offset: usize) !Hdr {
+fn readFatHeaderGeneric(io: Io, comptime Hdr: type, file: Io.File, offset: usize) !Hdr {
var buffer: [@sizeOf(Hdr)]u8 = undefined;
- const nread = try file.preadAll(&buffer, offset);
+ const nread = try file.readPositionalAll(io, &buffer, offset);
if (nread != buffer.len) return error.InputOutput;
var hdr = @as(*align(1) const Hdr, @ptrCast(&buffer)).*;
mem.byteSwapAllFields(Hdr, &hdr);
@@ -27,12 +29,12 @@ pub const Arch = struct {
size: u32,
};
-pub fn parseArchs(file: std.fs.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch {
+pub fn parseArchs(io: Io, file: Io.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch {
var count: usize = 0;
var fat_arch_index: u32 = 0;
while (fat_arch_index < fat_header.nfat_arch and count < out.len) : (fat_arch_index += 1) {
const offset = @sizeOf(macho.fat_header) + @sizeOf(macho.fat_arch) * fat_arch_index;
- const fat_arch = try readFatHeaderGeneric(macho.fat_arch, file, offset);
+ const fat_arch = try readFatHeaderGeneric(io, macho.fat_arch, file, offset);
// If we come across an architecture that we do not know how to handle, that's
// fine because we can keep looking for one that might match.
const arch: std.Target.Cpu.Arch = switch (fat_arch.cputype) {
diff --git a/src/link/MachO/file.zig b/src/link/MachO/file.zig
index 05b43de181..cd687a4941 100644
--- a/src/link/MachO/file.zig
+++ b/src/link/MachO/file.zig
@@ -355,11 +355,12 @@ pub const File = union(enum) {
dylib: Dylib,
};
- pub const Handle = std.fs.File;
+ pub const Handle = Io.File;
pub const HandleIndex = Index;
};
const std = @import("std");
+const Io = std.Io;
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig
index 78cd847c40..822474e3e1 100644
--- a/src/link/MachO/hasher.zig
+++ b/src/link/MachO/hasher.zig
@@ -1,34 +1,36 @@
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+const trace = @import("../../tracy.zig").trace;
+
pub fn ParallelHasher(comptime Hasher: type) type {
const hash_size = Hasher.digest_length;
return struct {
- allocator: Allocator,
- io: std.Io,
-
- pub fn hash(self: Self, file: fs.File, out: [][hash_size]u8, opts: struct {
+ pub fn hash(gpa: Allocator, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct {
chunk_size: u64 = 0x4000,
max_file_size: ?u64 = null,
}) !void {
const tracy = trace(@src());
defer tracy.end();
- const io = self.io;
-
const file_size = blk: {
- const file_size = opts.max_file_size orelse try file.getEndPos();
+ const file_size = opts.max_file_size orelse try file.length(io);
break :blk std.math.cast(usize, file_size) orelse return error.Overflow;
};
const chunk_size = std.math.cast(usize, opts.chunk_size) orelse return error.Overflow;
- const buffer = try self.allocator.alloc(u8, chunk_size * out.len);
- defer self.allocator.free(buffer);
+ const buffer = try gpa.alloc(u8, chunk_size * out.len);
+ defer gpa.free(buffer);
- const results = try self.allocator.alloc(fs.File.PReadError!usize, out.len);
- defer self.allocator.free(results);
+ const results = try gpa.alloc(Io.File.ReadPositionalError!usize, out.len);
+ defer gpa.free(results);
{
- var group: std.Io.Group = .init;
- errdefer group.cancel(io);
+ var group: Io.Group = .init;
+ defer group.cancel(io);
for (out, results, 0..) |*out_buf, *result, i| {
const fstart = i * chunk_size;
@@ -37,6 +39,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
else
chunk_size;
group.async(io, worker, .{
+ io,
file,
fstart,
buffer[fstart..][0..fsize],
@@ -51,26 +54,15 @@ pub fn ParallelHasher(comptime Hasher: type) type {
}
fn worker(
- file: fs.File,
+ io: Io,
+ file: Io.File,
fstart: usize,
buffer: []u8,
out: *[hash_size]u8,
- err: *fs.File.PReadError!usize,
+ err: *Io.File.ReadPositionalError!usize,
) void {
- const tracy = trace(@src());
- defer tracy.end();
- err.* = file.preadAll(buffer, fstart);
+ err.* = file.readPositionalAll(io, buffer, fstart);
Hasher.hash(buffer, out, .{});
}
-
- const Self = @This();
};
}
-
-const assert = std.debug.assert;
-const fs = std.fs;
-const mem = std.mem;
-const std = @import("std");
-const trace = @import("../../tracy.zig").trace;
-
-const Allocator = mem.Allocator;
diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig
index d2a6c2a3ab..13dd35a558 100644
--- a/src/link/MachO/relocatable.zig
+++ b/src/link/MachO/relocatable.zig
@@ -1,6 +1,7 @@
pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
- const gpa = macho_file.base.comp.gpa;
- const diags = &macho_file.base.comp.link_diags;
+ const gpa = comp.gpa;
+ const io = comp.io;
+ const diags = &comp.link_diags;
// TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
var positionals = std.array_list.Managed(link.Input).init(gpa);
@@ -9,24 +10,22 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
- try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
+ try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path));
}
- if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
+ if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (macho_file.getZigObject() == null and positionals.items.len == 1) {
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
// debug info segments/sections (this is apparently by design by Apple), we copy
// the *only* input file over.
const path = positionals.items[0].path().?;
- const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err|
+ const in_file = path.root_dir.handle.openFile(io, path.sub_path, .{}) catch |err|
return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) });
- const stat = in_file.stat() catch |err|
+ const stat = in_file.stat(io) catch |err|
return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) });
- const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err|
- return diags.fail("failed to copy range of file {f}: {s}", .{ path, @errorName(err) });
- if (amt != stat.size)
- return diags.fail("unexpected short write in copy range of file {f}", .{path});
+ link.File.copyRangeAll2(io, in_file, macho_file.base.file.?, 0, 0, stat.size) catch |err|
+ return diags.fail("failed to copy range of file {f}: {t}", .{ path, err });
return;
}
@@ -79,6 +78,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
const gpa = comp.gpa;
+ const io = comp.io;
const diags = &macho_file.base.comp.link_diags;
var positionals = std.array_list.Managed(link.Input).init(gpa);
@@ -88,17 +88,17 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
- try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
+ try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path));
}
- if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
+ if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (comp.compiler_rt_strat == .obj) {
- try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path));
+ try positionals.append(try link.openObjectInput(io, diags, comp.compiler_rt_obj.?.full_object_path));
}
if (comp.ubsan_rt_strat == .obj) {
- try positionals.append(try link.openObjectInput(diags, comp.ubsan_rt_obj.?.full_object_path));
+ try positionals.append(try link.openObjectInput(io, diags, comp.ubsan_rt_obj.?.full_object_path));
}
for (positionals.items) |link_input| {
@@ -229,7 +229,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
assert(writer.end == total_size);
- try macho_file.setEndPos(total_size);
+ try macho_file.setLength(total_size);
try macho_file.pwriteAll(writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;
diff --git a/src/link/MachO/uuid.zig b/src/link/MachO/uuid.zig
index d08ac0c5b8..a75799d01e 100644
--- a/src/link/MachO/uuid.zig
+++ b/src/link/MachO/uuid.zig
@@ -1,28 +1,38 @@
+const std = @import("std");
+const Io = std.Io;
+const Md5 = std.crypto.hash.Md5;
+
+const trace = @import("../../tracy.zig").trace;
+const Compilation = @import("../../Compilation.zig");
+const ParallelHasher = @import("hasher.zig").ParallelHasher;
+
/// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce
/// the final digest.
/// While this is NOT a correct MD5 hash of the contents, this methodology is used by LLVM/LLD
/// and we will use it too as it seems accepted by Apple OSes.
/// TODO LLD also hashes the output filename to disambiguate between same builds with different
/// output files. Should we also do that?
-pub fn calcUuid(comp: *const Compilation, file: fs.File, file_size: u64, out: *[Md5.digest_length]u8) !void {
+pub fn calcUuid(comp: *const Compilation, file: Io.File, file_size: u64, out: *[Md5.digest_length]u8) !void {
const tracy = trace(@src());
defer tracy.end();
+ const gpa = comp.gpa;
+ const io = comp.io;
+
const chunk_size: usize = 1024 * 1024;
const num_chunks: usize = std.math.cast(usize, @divTrunc(file_size, chunk_size)) orelse return error.Overflow;
const actual_num_chunks = if (@rem(file_size, chunk_size) > 0) num_chunks + 1 else num_chunks;
- const hashes = try comp.gpa.alloc([Md5.digest_length]u8, actual_num_chunks);
- defer comp.gpa.free(hashes);
+ const hashes = try gpa.alloc([Md5.digest_length]u8, actual_num_chunks);
+ defer gpa.free(hashes);
- var hasher = Hasher(Md5){ .allocator = comp.gpa, .io = comp.io };
- try hasher.hash(file, hashes, .{
+ try ParallelHasher(Md5).hash(gpa, io, file, hashes, .{
.chunk_size = chunk_size,
.max_file_size = file_size,
});
- const final_buffer = try comp.gpa.alloc(u8, actual_num_chunks * Md5.digest_length);
- defer comp.gpa.free(final_buffer);
+ const final_buffer = try gpa.alloc(u8, actual_num_chunks * Md5.digest_length);
+ defer gpa.free(final_buffer);
for (hashes, 0..) |hash, i| {
@memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash);
@@ -37,12 +47,3 @@ inline fn conform(out: *[Md5.digest_length]u8) void {
out[6] = (out[6] & 0x0F) | (3 << 4);
out[8] = (out[8] & 0x3F) | 0x80;
}
-
-const fs = std.fs;
-const mem = std.mem;
-const std = @import("std");
-const trace = @import("../../tracy.zig").trace;
-
-const Compilation = @import("../../Compilation.zig");
-const Md5 = std.crypto.hash.Md5;
-const Hasher = @import("hasher.zig").ParallelHasher;
diff --git a/src/link/MappedFile.zig b/src/link/MappedFile.zig
index 975b94578b..2986e27e24 100644
--- a/src/link/MappedFile.zig
+++ b/src/link/MappedFile.zig
@@ -1,3 +1,17 @@
+/// TODO add a mapped file abstraction to std.Io
+const MappedFile = @This();
+
+const builtin = @import("builtin");
+const is_linux = builtin.os.tag == .linux;
+const is_windows = builtin.os.tag == .windows;
+
+const std = @import("std");
+const Io = std.Io;
+const assert = std.debug.assert;
+const linux = std.os.linux;
+const windows = std.os.windows;
+
+io: Io,
file: std.Io.File,
flags: packed struct {
block_size: std.mem.Alignment,
@@ -16,16 +30,22 @@ writers: std.SinglyLinkedList,
pub const growth_factor = 4;
-pub const Error = std.posix.MMapError || std.posix.MRemapError || std.fs.File.SetEndPosError || error{
+pub const Error = std.posix.MMapError || std.posix.MRemapError || Io.File.LengthError || error{
NotFile,
SystemResources,
IsDir,
Unseekable,
NoSpaceLeft,
+
+ InputOutput,
+ FileTooBig,
+ FileBusy,
+ NonResizable,
};
-pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile {
+pub fn init(file: std.Io.File, gpa: std.mem.Allocator, io: Io) !MappedFile {
var mf: MappedFile = .{
+ .io = io,
.file = file,
.flags = undefined,
.section = if (is_windows) windows.INVALID_HANDLE_VALUE else {},
@@ -55,18 +75,41 @@ pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile {
};
}
if (is_linux) {
- const statx = try linux.wrapped.statx(
- mf.file.handle,
- "",
- std.posix.AT.EMPTY_PATH,
- .{ .TYPE = true, .SIZE = true, .BLOCKS = true },
- );
- assert(statx.mask.TYPE);
- assert(statx.mask.SIZE);
- assert(statx.mask.BLOCKS);
-
- if (!std.posix.S.ISREG(statx.mode)) return error.PathAlreadyExists;
- break :stat .{ statx.size, @max(std.heap.pageSize(), statx.blksize) };
+ const use_c = std.c.versionCheck(if (builtin.abi.isAndroid())
+ .{ .major = 30, .minor = 0, .patch = 0 }
+ else
+ .{ .major = 2, .minor = 28, .patch = 0 });
+ const sys = if (use_c) std.c else std.os.linux;
+ while (true) {
+ var statx = std.mem.zeroes(linux.Statx);
+ const rc = sys.statx(
+ mf.file.handle,
+ "",
+ std.posix.AT.EMPTY_PATH,
+ .{ .TYPE = true, .SIZE = true, .BLOCKS = true },
+ &statx,
+ );
+ switch (sys.errno(rc)) {
+ .SUCCESS => {
+ assert(statx.mask.TYPE);
+ assert(statx.mask.SIZE);
+ assert(statx.mask.BLOCKS);
+ if (!std.posix.S.ISREG(statx.mode)) return error.PathAlreadyExists;
+ break :stat .{ statx.size, @max(std.heap.pageSize(), statx.blksize) };
+ },
+ .INTR => continue,
+ .ACCES => return error.AccessDenied,
+ .BADF => if (std.debug.runtime_safety) unreachable else return error.Unexpected,
+ .FAULT => if (std.debug.runtime_safety) unreachable else return error.Unexpected,
+ .INVAL => if (std.debug.runtime_safety) unreachable else return error.Unexpected,
+ .LOOP => return error.SymLinkLoop,
+ .NAMETOOLONG => return error.NameTooLong,
+ .NOENT => return error.FileNotFound,
+ .NOTDIR => return error.FileNotFound,
+ .NOMEM => return error.SystemResources,
+ else => |err| return std.posix.unexpectedErrno(err),
+ }
+ }
}
const stat = try std.posix.fstat(mf.file.handle);
if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists;
@@ -433,8 +476,8 @@ pub const Node = extern struct {
return n;
},
.streaming,
- .streaming_reading,
- .positional_reading,
+ .streaming_simple,
+ .positional_simple,
.failure,
=> {
const dest = limit.slice(interface.unusedCapacitySlice());
@@ -612,13 +655,14 @@ pub fn addNodeAfter(
}
fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void {
+ const io = mf.io;
const node = ni.get(mf);
const old_offset, const old_size = node.location().resolve(mf);
const new_size = node.flags.alignment.forward(@intCast(requested_size));
// Resize the entire file
if (ni == Node.Index.root) {
try mf.ensureCapacityForSetLocation(gpa);
- try std.fs.File.adaptFromNewApi(mf.file).setEndPos(new_size);
+ try mf.file.setLength(io, new_size);
try mf.ensureTotalCapacity(@intCast(new_size));
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
@@ -1059,12 +1103,3 @@ fn verifyNode(mf: *MappedFile, parent_ni: Node.Index) void {
ni = node.next;
}
}
-
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const is_linux = builtin.os.tag == .linux;
-const is_windows = builtin.os.tag == .windows;
-const linux = std.os.linux;
-const MappedFile = @This();
-const std = @import("std");
-const windows = std.os.windows;
diff --git a/src/link/Queue.zig b/src/link/Queue.zig
index e8e7700695..b716800bae 100644
--- a/src/link/Queue.zig
+++ b/src/link/Queue.zig
@@ -121,7 +121,7 @@ pub fn enqueueZcu(
link.doZcuTask(comp, tid, task);
}
-pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) void {
+pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) Io.Cancelable!void {
if (q.future != null) {
q.prelink_queue.close(comp.io);
return;
@@ -136,6 +136,7 @@ pub fn finishPrelinkQueue(q: *Queue, comp: *Compilation) void {
} else |err| switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
error.LinkFailure => {},
+ error.Canceled => |e| return e,
}
}
}
@@ -175,6 +176,7 @@ fn runLinkTasks(q: *Queue, comp: *Compilation) void {
lf.post_prelink = true;
} else |err| switch (err) {
error.OutOfMemory => comp.link_diags.setAllocFailure(),
+ error.Canceled => @panic("TODO"),
error.LinkFailure => {},
}
}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 7e28dc0a8b..4dbdd5c089 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -33,6 +33,7 @@ pub fn createEmpty(
options: link.File.OpenOptions,
) !*Linker {
const gpa = comp.gpa;
+ const io = comp.io;
const target = &comp.root_mod.resolved_target.result;
assert(!comp.config.use_lld); // Caught by Compilation.Config.resolve
@@ -78,7 +79,7 @@ pub fn createEmpty(
};
errdefer linker.deinit();
- linker.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ linker.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
.truncate = true,
.read = true,
});
@@ -245,6 +246,7 @@ pub fn flush(
const comp = linker.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
+ const io = comp.io;
// We need to export the list of error names somewhere so that we can pretty-print them in the
// executor. This is not really an important thing though, so we can just dump it in any old
@@ -286,8 +288,8 @@ pub fn flush(
};
// TODO endianness bug. use file writer and call writeSliceEndian instead
- linker.base.file.?.writeAll(@ptrCast(linked_module)) catch |err|
- return diags.fail("failed to write: {s}", .{@errorName(err)});
+ linker.base.file.?.writeStreamingAll(io, @ptrCast(linked_module)) catch |err|
+ return diags.fail("failed to write: {t}", .{err});
}
fn linkModule(arena: Allocator, module: []Word, progress: std.Progress.Node) ![]Word {
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 92307ec40c..af800d77d2 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -20,6 +20,7 @@ const native_endian = builtin.cpu.arch.endian();
const build_options = @import("build_options");
const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
const Path = Cache.Path;
@@ -428,7 +429,11 @@ pub const OutputFunctionIndex = enum(u32) {
pub fn fromSymbolName(wasm: *const Wasm, name: String) OutputFunctionIndex {
if (wasm.flush_buffer.function_imports.getIndex(name)) |i| return @enumFromInt(i);
- return fromFunctionIndex(wasm, FunctionIndex.fromSymbolName(wasm, name).?);
+ return fromFunctionIndex(wasm, FunctionIndex.fromSymbolName(wasm, name) orelse {
+ if (std.debug.runtime_safety) {
+ std.debug.panic("function index for symbol not found: {s}", .{name.slice(wasm)});
+ } else unreachable;
+ });
}
};
@@ -2996,16 +3001,18 @@ pub fn createEmpty(
.named => |name| (try wasm.internString(name)).toOptional(),
};
- wasm.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
+ const io = comp.io;
+
+ wasm.base.file = try emit.root_dir.handle.createFile(io, emit.sub_path, .{
.truncate = true,
.read = true,
- .mode = if (fs.has_executable_bit)
+ .permissions = if (Io.File.Permissions.has_executable_bit)
if (target.os.tag == .wasi and output_mode == .Exe)
- fs.File.default_mode | 0b001_000_000
+ .executable_file
else
- fs.File.default_mode
+ .default_file
else
- 0,
+ .default_file,
});
wasm.name = emit.sub_path;
@@ -3013,14 +3020,16 @@ pub fn createEmpty(
}
fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void {
- const diags = &wasm.base.comp.link_diags;
- const obj = link.openObject(path, false, false) catch |err| {
- switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) {
+ const comp = wasm.base.comp;
+ const io = comp.io;
+ const diags = &comp.link_diags;
+ const obj = link.openObject(io, path, false, false) catch |err| {
+ switch (diags.failParse(path, "failed to open object: {t}", .{err})) {
error.LinkFailure => return,
}
};
wasm.parseObject(obj) catch |err| {
- switch (diags.failParse(path, "failed to parse object: {s}", .{@errorName(err)})) {
+ switch (diags.failParse(path, "failed to parse object: {t}", .{err})) {
error.LinkFailure => return,
}
};
@@ -3032,7 +3041,7 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
const io = wasm.base.comp.io;
const gc_sections = wasm.base.gc_sections;
- defer obj.file.close();
+ defer obj.file.close(io);
var file_reader = obj.file.reader(io, &.{});
@@ -3060,7 +3069,7 @@ fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void {
const io = wasm.base.comp.io;
const gc_sections = wasm.base.gc_sections;
- defer obj.file.close();
+ defer obj.file.close(io);
var file_reader = obj.file.reader(io, &.{});
@@ -3529,7 +3538,10 @@ pub fn markFunctionImport(
import: *FunctionImport,
func_index: FunctionImport.Index,
) link.File.FlushError!void {
- if (import.flags.alive) return;
+ // import.flags.alive might be already true from a previous update. In such
+ // case, we must still run the logic in this function, in case the item
+ // being marked was reverted by the `flush` logic that resets the hash
+ // table watermarks.
import.flags.alive = true;
const comp = wasm.base.comp;
@@ -3549,8 +3561,9 @@ pub fn markFunctionImport(
} else {
try wasm.function_imports.put(gpa, name, .fromObject(func_index, wasm));
}
- } else {
- try markFunction(wasm, import.resolution.unpack(wasm).object_function, import.flags.exported);
+ } else switch (import.resolution.unpack(wasm)) {
+ .object_function => try markFunction(wasm, import.resolution.unpack(wasm).object_function, import.flags.exported),
+ else => return,
}
}
@@ -3589,7 +3602,10 @@ fn markGlobalImport(
import: *GlobalImport,
global_index: GlobalImport.Index,
) link.File.FlushError!void {
- if (import.flags.alive) return;
+ // import.flags.alive might be already true from a previous update. In such
+ // case, we must still run the logic in this function, in case the item
+ // being marked was reverted by the `flush` logic that resets the hash
+ // table watermarks.
import.flags.alive = true;
const comp = wasm.base.comp;
@@ -3619,8 +3635,9 @@ fn markGlobalImport(
} else {
try wasm.global_imports.put(gpa, name, .fromObject(global_index, wasm));
}
- } else {
- try markGlobal(wasm, import.resolution.unpack(wasm).object_global, import.flags.exported);
+ } else switch (import.resolution.unpack(wasm)) {
+ .object_global => try markGlobal(wasm, import.resolution.unpack(wasm).object_global, import.flags.exported),
+ else => return,
}
}
@@ -3823,8 +3840,9 @@ pub fn flush(
const comp = wasm.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
+ const io = comp.io;
- if (comp.verbose_link) Compilation.dump_argv(wasm.dump_argv_list.items);
+ if (comp.verbose_link) try Compilation.dumpArgv(io, wasm.dump_argv_list.items);
if (wasm.base.zcu_object_basename) |raw| {
const zcu_obj_path: Path = try comp.resolveEmitPathFlush(arena, .temp, raw);
@@ -4037,7 +4055,7 @@ pub fn tagNameSymbolIndex(wasm: *Wasm, ip_index: InternPool.Index) Allocator.Err
const comp = wasm.base.comp;
assert(comp.config.output_mode == .Obj);
const gpa = comp.gpa;
- const name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(ip_index)});
+ const name = try wasm.internStringFmt("__zig_tag_name_{d}", .{ip_index});
const gop = try wasm.symbol_table.getOrPut(gpa, name);
gop.value_ptr.* = {};
return @enumFromInt(gop.index);
diff --git a/src/link/Wasm/Flush.zig b/src/link/Wasm/Flush.zig
index 6f7792f473..5bd18a1936 100644
--- a/src/link/Wasm/Flush.zig
+++ b/src/link/Wasm/Flush.zig
@@ -108,6 +108,7 @@ pub fn deinit(f: *Flush, gpa: Allocator) void {
pub fn finish(f: *Flush, wasm: *Wasm) !void {
const comp = wasm.base.comp;
+ const io = comp.io;
const shared_memory = comp.config.shared_memory;
const diags = &comp.link_diags;
const gpa = comp.gpa;
@@ -127,17 +128,20 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
if (comp.zcu) |zcu| {
const ip: *const InternPool = &zcu.intern_pool; // No mutations allowed!
+ log.debug("total MIR instructions: {d}", .{wasm.mir_instructions.len});
+
// Detect any intrinsics that were called; they need to have dependencies on the symbols marked.
// Likewise detect `@tagName` calls so those functions can be included in the output and synthesized.
for (wasm.mir_instructions.items(.tag), wasm.mir_instructions.items(.data)) |tag, *data| switch (tag) {
.call_intrinsic => {
const symbol_name = try wasm.internString(@tagName(data.intrinsic));
const i: Wasm.FunctionImport.Index = @enumFromInt(wasm.object_function_imports.getIndex(symbol_name) orelse {
- return diags.fail("missing compiler runtime intrinsic '{s}' (undefined linker symbol)", .{
- @tagName(data.intrinsic),
+ return diags.fail("missing compiler runtime intrinsic '{t}' (undefined linker symbol)", .{
+ data.intrinsic,
});
});
try wasm.markFunctionImport(symbol_name, i.value(wasm), i);
+ log.debug("markFunctionImport intrinsic {d}={t}", .{ i, data.intrinsic });
},
.call_tag_name => {
assert(ip.indexToKey(data.ip_index) == .enum_type);
@@ -146,11 +150,10 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
wasm.tag_name_table_ref_count += 1;
const int_tag_ty = Zcu.Type.fromInterned(data.ip_index).intTagType(zcu);
gop.value_ptr.* = .{ .tag_name = .{
- .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(data.ip_index)}),
+ .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{data.ip_index}),
.type_index = try wasm.internFunctionType(.auto, &.{int_tag_ty.ip_index}, .slice_const_u8_sentinel_0, target),
.table_index = @intCast(wasm.tag_name_offs.items.len),
} };
- try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {});
const tag_names = ip.loadEnumType(data.ip_index).names;
for (tag_names.get(ip)) |tag_name| {
const slice = tag_name.toSlice(ip);
@@ -158,6 +161,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
try wasm.tag_name_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]);
}
}
+ try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {});
},
else => continue,
};
@@ -1067,7 +1071,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
}
// Finally, write the entire binary into the file.
- var file_writer = wasm.base.file.?.writer(&.{});
+ var file_writer = wasm.base.file.?.writer(io, &.{});
file_writer.interface.writeAll(binary_bytes.items) catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
};
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 4c1471a6b4..33c31a8415 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -1,10 +1,10 @@
const std = @import("std");
-const fs = std.fs;
+const Io = std.Io;
const mem = std.mem;
const log = std.log.scoped(.tapi);
-const yaml = @import("tapi/yaml.zig");
+const Allocator = std.mem.Allocator;
-const Allocator = mem.Allocator;
+const yaml = @import("tapi/yaml.zig");
const Yaml = yaml.Yaml;
const VersionField = union(enum) {
@@ -130,7 +130,7 @@ pub const Tbd = union(enum) {
pub const TapiError = error{
NotLibStub,
InputOutput,
-} || yaml.YamlError || std.fs.File.PReadError;
+} || yaml.YamlError || Io.File.ReadPositionalError;
pub const LibStub = struct {
/// Underlying memory for stub's contents.
@@ -139,14 +139,14 @@ pub const LibStub = struct {
/// Typed contents of the tbd file.
inner: []Tbd,
- pub fn loadFromFile(allocator: Allocator, file: fs.File) TapiError!LibStub {
+ pub fn loadFromFile(allocator: Allocator, io: Io, file: Io.File) TapiError!LibStub {
const filesize = blk: {
- const stat = file.stat() catch break :blk std.math.maxInt(u32);
+ const stat = file.stat(io) catch break :blk std.math.maxInt(u32);
break :blk @min(stat.size, std.math.maxInt(u32));
};
const source = try allocator.alloc(u8, filesize);
defer allocator.free(source);
- const amt = try file.preadAll(source, 0);
+ const amt = try file.readPositionalAll(io, source, 0);
if (amt != filesize) return error.InputOutput;
var lib_stub = LibStub{
diff --git a/src/main.zig b/src/main.zig
index a897f2a847..628d91017a 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -162,17 +162,20 @@ var debug_allocator: std.heap.DebugAllocator(.{
.stack_trace_frames = build_options.mem_leak_frames,
}) = .init;
+const use_debug_allocator = build_options.debug_gpa or
+ (native_os != .wasi and !builtin.link_libc and switch (builtin.mode) {
+ .Debug, .ReleaseSafe => true,
+ .ReleaseFast, .ReleaseSmall => false,
+ });
+
pub fn main() anyerror!void {
- const gpa, const is_debug = gpa: {
- if (build_options.debug_gpa) break :gpa .{ debug_allocator.allocator(), true };
- if (native_os == .wasi) break :gpa .{ std.heap.wasm_allocator, false };
- if (builtin.link_libc) break :gpa .{ std.heap.c_allocator, false };
- break :gpa switch (builtin.mode) {
- .Debug, .ReleaseSafe => .{ debug_allocator.allocator(), true },
- .ReleaseFast, .ReleaseSmall => .{ std.heap.smp_allocator, false },
- };
+ const gpa = gpa: {
+ if (use_debug_allocator) break :gpa debug_allocator.allocator();
+ if (native_os == .wasi) break :gpa std.heap.wasm_allocator;
+ if (builtin.link_libc) break :gpa std.heap.c_allocator;
+ break :gpa std.heap.smp_allocator;
};
- defer if (is_debug) {
+ defer if (use_debug_allocator) {
_ = debug_allocator.deinit();
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
@@ -238,7 +241,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
}
}
- var threaded: Io.Threaded = .init(gpa);
+ var threaded: Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
threaded_impl_ptr = &threaded;
threaded.stack_size = thread_stack_size;
@@ -328,23 +331,24 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.prepend_global_cache_path = true,
});
} else if (mem.eql(u8, cmd, "init")) {
- return cmdInit(gpa, arena, cmd_args);
+ return cmdInit(gpa, arena, io, cmd_args);
} else if (mem.eql(u8, cmd, "targets")) {
dev.check(.targets_command);
const host = std.zig.resolveTargetQueryOrFatal(io, .{});
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
- try @import("print_targets.zig").cmdTargets(arena, cmd_args, &stdout_writer.interface, &host);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
+ try @import("print_targets.zig").cmdTargets(arena, io, cmd_args, &stdout_writer.interface, &host);
return stdout_writer.interface.flush();
} else if (mem.eql(u8, cmd, "version")) {
dev.check(.version_command);
- try fs.File.stdout().writeAll(build_options.version ++ "\n");
+ try Io.File.stdout().writeStreamingAll(io, build_options.version ++ "\n");
return;
} else if (mem.eql(u8, cmd, "env")) {
dev.check(.env_command);
const host = std.zig.resolveTargetQueryOrFatal(io, .{});
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
try @import("print_env.zig").cmdEnv(
arena,
+ io,
&stdout_writer.interface,
args,
if (native_os == .wasi) wasi_preopens,
@@ -358,10 +362,10 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
});
} else if (mem.eql(u8, cmd, "zen")) {
dev.check(.zen_command);
- return fs.File.stdout().writeAll(info_zen);
+ return Io.File.stdout().writeStreamingAll(io, info_zen);
} else if (mem.eql(u8, cmd, "help") or mem.eql(u8, cmd, "-h") or mem.eql(u8, cmd, "--help")) {
dev.check(.help_command);
- return fs.File.stdout().writeAll(usage);
+ return Io.File.stdout().writeStreamingAll(io, usage);
} else if (mem.eql(u8, cmd, "ast-check")) {
return cmdAstCheck(arena, io, cmd_args);
} else if (mem.eql(u8, cmd, "detect-cpu")) {
@@ -371,7 +375,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
} else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "dump-zir")) {
return cmdDumpZir(arena, io, cmd_args);
} else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "llvm-ints")) {
- return cmdDumpLlvmInts(gpa, arena, cmd_args);
+ return cmdDumpLlvmInts(gpa, arena, io, cmd_args);
} else {
std.log.info("{s}", .{usage});
fatal("unknown command: {s}", .{args[1]});
@@ -698,7 +702,7 @@ const Emit = union(enum) {
yes: []const u8,
const OutputToCacheReason = enum { listen, @"zig run", @"zig test" };
- fn resolve(emit: Emit, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit {
+ fn resolve(emit: Emit, io: Io, default_basename: []const u8, output_to_cache: ?OutputToCacheReason) Compilation.CreateOptions.Emit {
return switch (emit) {
.no => .no,
.yes_default_path => if (output_to_cache != null) .yes_cache else .{ .yes_path = default_basename },
@@ -713,10 +717,10 @@ const Emit = union(enum) {
} else e: {
// If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would.
if (fs.path.dirname(path)) |dir_path| {
- var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
+ var dir = Io.Dir.cwd().openDir(io, dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
- dir.close();
+ dir.close(io);
}
break :e .{ .yes_path = path };
},
@@ -1029,13 +1033,12 @@ fn buildOutputType(
if (mem.cutPrefix(u8, arg, "@")) |resp_file_path| {
// This is a "compiler response file". We must parse the file and treat its
// contents as command line parameters.
- args_iter.resp_file = initArgIteratorResponseFile(arena, resp_file_path) catch |err| {
- fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) });
- };
+ args_iter.resp_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err|
+ fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err });
} else if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_build_generic);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_build_generic);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "--")) {
if (arg_mode == .run) {
// args_iter.i is 1, referring the next arg after "--" in ["--", ...]
@@ -1856,9 +1859,7 @@ fn buildOutputType(
var must_link = false;
var file_ext: ?Compilation.FileExt = null;
while (it.has_next) {
- it.next() catch |err| {
- fatal("unable to parse command line parameters: {s}", .{@errorName(err)});
- };
+ it.next(io) catch |err| fatal("unable to parse command line parameters: {t}", .{err});
switch (it.zig_equivalent) {
.target => target_arch_os_abi = it.only_arg, // example: -target riscv64-linux-unknown
.o => {
@@ -2834,9 +2835,9 @@ fn buildOutputType(
} else if (mem.eql(u8, arg, "-V")) {
warn("ignoring request for supported emulations: unimplemented", .{});
} else if (mem.eql(u8, arg, "-v")) {
- try fs.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
+ try Io.File.stdout().writeStreamingAll(io, "zig ld " ++ build_options.version ++ "\n");
} else if (mem.eql(u8, arg, "--version")) {
- try fs.File.stdout().writeAll("zig ld " ++ build_options.version ++ "\n");
+ try Io.File.stdout().writeStreamingAll(io, "zig ld " ++ build_options.version ++ "\n");
process.exit(0);
} else {
fatal("unsupported linker arg: {s}", .{arg});
@@ -3075,14 +3076,13 @@ fn buildOutputType(
const self_exe_path = switch (native_os) {
.wasi => {},
- else => fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find zig self exe path: {s}", .{@errorName(err)});
- },
+ else => process.executablePathAlloc(io, arena) catch |err| fatal("unable to find zig self exe path: {t}", .{err}),
};
// This `init` calls `fatal` on error.
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
s: {
@@ -3095,11 +3095,9 @@ fn buildOutputType(
if (native_os == .wasi) wasi_preopens,
self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
- if (linker_optimization) |o| {
- warn("ignoring deprecated linker optimization setting '{s}'", .{o});
- }
+ if (linker_optimization) |o| warn("ignoring deprecated linker optimization setting '{s}'", .{o});
create_module.dirs = dirs;
create_module.opts.emit_llvm_ir = emit_llvm_ir != .no;
@@ -3208,6 +3206,7 @@ fn buildOutputType(
for (create_module.framework_dirs.items) |framework_dir_path| {
if (try accessFrameworkPath(
+ io,
&test_path,
&checked_paths,
framework_dir_path,
@@ -3251,8 +3250,8 @@ fn buildOutputType(
}
}
- var cleanup_emit_bin_dir: ?fs.Dir = null;
- defer if (cleanup_emit_bin_dir) |*dir| dir.close();
+ var cleanup_emit_bin_dir: ?Io.Dir = null;
+ defer if (cleanup_emit_bin_dir) |*dir| dir.close(io);
// For `zig run` and `zig test`, we don't want to put the binary in the cwd by default. So, if
// the binary is requested with no explicit path (as is the default), we emit to the cache.
@@ -3304,10 +3303,10 @@ fn buildOutputType(
} else emit: {
// If there's a dirname, check that dir exists. This will give a more descriptive error than `Compilation` otherwise would.
if (fs.path.dirname(path)) |dir_path| {
- var dir = fs.cwd().openDir(dir_path, .{}) catch |err| {
+ var dir = Io.Dir.cwd().openDir(io, dir_path, .{}) catch |err| {
fatal("unable to open output directory '{s}': {s}", .{ dir_path, @errorName(err) });
};
- dir.close();
+ dir.close(io);
}
break :emit .{ .yes_path = path };
},
@@ -3321,18 +3320,18 @@ fn buildOutputType(
};
const default_h_basename = try std.fmt.allocPrint(arena, "{s}.h", .{root_name});
- const emit_h_resolved = emit_h.resolve(default_h_basename, output_to_cache);
+ const emit_h_resolved = emit_h.resolve(io, default_h_basename, output_to_cache);
const default_asm_basename = try std.fmt.allocPrint(arena, "{s}.s", .{root_name});
- const emit_asm_resolved = emit_asm.resolve(default_asm_basename, output_to_cache);
+ const emit_asm_resolved = emit_asm.resolve(io, default_asm_basename, output_to_cache);
const default_llvm_ir_basename = try std.fmt.allocPrint(arena, "{s}.ll", .{root_name});
- const emit_llvm_ir_resolved = emit_llvm_ir.resolve(default_llvm_ir_basename, output_to_cache);
+ const emit_llvm_ir_resolved = emit_llvm_ir.resolve(io, default_llvm_ir_basename, output_to_cache);
const default_llvm_bc_basename = try std.fmt.allocPrint(arena, "{s}.bc", .{root_name});
- const emit_llvm_bc_resolved = emit_llvm_bc.resolve(default_llvm_bc_basename, output_to_cache);
+ const emit_llvm_bc_resolved = emit_llvm_bc.resolve(io, default_llvm_bc_basename, output_to_cache);
- const emit_docs_resolved = emit_docs.resolve("docs", output_to_cache);
+ const emit_docs_resolved = emit_docs.resolve(io, "docs", output_to_cache);
const is_exe_or_dyn_lib = switch (create_module.resolved_options.output_mode) {
.Obj => false,
@@ -3353,7 +3352,7 @@ fn buildOutputType(
const default_implib_basename = try std.fmt.allocPrint(arena, "{s}.lib", .{root_name});
const emit_implib_resolved: Compilation.CreateOptions.Emit = switch (emit_implib) {
.no => .no,
- .yes => emit_implib.resolve(default_implib_basename, output_to_cache),
+ .yes => emit_implib.resolve(io, default_implib_basename, output_to_cache),
.yes_default_path => emit: {
if (output_to_cache != null) break :emit .yes_cache;
const p = try fs.path.join(arena, &.{
@@ -3382,24 +3381,24 @@ fn buildOutputType(
const dump_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-dump-stdin{s}", .{
std.crypto.random.int(u64), ext.canonicalName(target),
});
- try dirs.local_cache.handle.makePath("tmp");
+ try dirs.local_cache.handle.createDirPath(io, "tmp");
// Note that in one of the happy paths, execve() is used to switch to
// clang in which case any cleanup logic that exists for this temporary
// file will not run and this temp file will be leaked. The filename
// will be a hash of its contents — so multiple invocations of
// `zig cc -` will result in the same temp file name.
- var f = try dirs.local_cache.handle.createFile(dump_path, .{});
- defer f.close();
+ var f = try dirs.local_cache.handle.createFile(io, dump_path, .{});
+ defer f.close(io);
// Re-using the hasher from Cache, since the functional requirements
// for the hashing algorithm here and in the cache are the same.
// We are providing our own cache key, because this file has nothing
// to do with the cache manifest.
- var file_writer = f.writer(&.{});
+ var file_writer = f.writer(io, &.{});
var buffer: [1000]u8 = undefined;
var hasher = file_writer.interface.hashed(Cache.Hasher.init("0123456789abcdef"), &buffer);
- var stdin_reader = fs.File.stdin().readerStreaming(io, &.{});
+ var stdin_reader = Io.File.stdin().readerStreaming(io, &.{});
_ = hasher.writer.sendFileAll(&stdin_reader, .unlimited) catch |err| switch (err) {
error.WriteFailed => fatal("failed to write {s}: {t}", .{ dump_path, file_writer.err.? }),
else => fatal("failed to pipe stdin to {s}: {t}", .{ dump_path, err }),
@@ -3411,7 +3410,7 @@ fn buildOutputType(
const sub_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-stdin{s}", .{
&bin_digest, ext.canonicalName(target),
});
- try dirs.local_cache.handle.rename(dump_path, sub_path);
+ try dirs.local_cache.handle.rename(dump_path, dirs.local_cache.handle, sub_path, io);
// Convert `sub_path` to be relative to current working directory.
src.src_path = try dirs.local_cache.join(arena, &.{sub_path});
@@ -3630,13 +3629,13 @@ fn buildOutputType(
if (show_builtin) {
const builtin_opts = comp.root_mod.getBuiltinOptions(comp.config);
const source = try builtin_opts.generate(arena);
- return fs.File.stdout().writeAll(source);
+ return Io.File.stdout().writeStreamingAll(io, source);
}
switch (listen) {
.none => {},
.stdio => {
- var stdin_reader = fs.File.stdin().reader(io, &stdin_buffer);
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdin_reader = Io.File.stdin().reader(io, &stdin_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
try serve(
comp,
&stdin_reader.interface,
@@ -3647,7 +3646,7 @@ fn buildOutputType(
all_args,
runtime_args_start,
);
- return cleanExit();
+ return cleanExit(io);
},
.ip4 => |ip4_addr| {
const addr: Io.net.IpAddress = .{ .ip4 = ip4_addr };
@@ -3673,12 +3672,12 @@ fn buildOutputType(
all_args,
runtime_args_start,
);
- return cleanExit();
+ return cleanExit(io);
},
}
{
- const root_prog_node = std.Progress.start(.{
+ const root_prog_node = std.Progress.start(io, .{
.disable_printing = (color == .off),
});
defer root_prog_node.end();
@@ -3756,7 +3755,7 @@ fn buildOutputType(
}
// Skip resource deallocation in release builds; let the OS do it.
- return cleanExit();
+ return cleanExit(io);
}
const CreateModule = struct {
@@ -3927,11 +3926,8 @@ fn createModule(
}
if (target.isMinGW()) {
- const exists = mingw.libExists(arena, target, create_module.dirs.zig_lib, lib_name) catch |err| {
- fatal("failed to check zig installation for DLL import libs: {s}", .{
- @errorName(err),
- });
- };
+ const exists = mingw.libExists(arena, io, target, create_module.dirs.zig_lib, lib_name) catch |err|
+ fatal("failed to check zig installation for DLL import libs: {t}", .{err});
if (exists) {
try create_module.windows_libs.put(arena, lib_name, {});
continue;
@@ -3959,14 +3955,14 @@ fn createModule(
if (fs.path.isAbsolute(lib_dir_arg)) {
const stripped_dir = lib_dir_arg[fs.path.parsePath(lib_dir_arg).root.len..];
const full_path = try fs.path.join(arena, &[_][]const u8{ root, stripped_dir });
- addLibDirectoryWarn(&create_module.lib_directories, full_path);
+ addLibDirectoryWarn(io, &create_module.lib_directories, full_path);
} else {
- addLibDirectoryWarn(&create_module.lib_directories, lib_dir_arg);
+ addLibDirectoryWarn(io, &create_module.lib_directories, lib_dir_arg);
}
}
} else {
for (create_module.lib_dir_args.items) |lib_dir_arg| {
- addLibDirectoryWarn(&create_module.lib_directories, lib_dir_arg);
+ addLibDirectoryWarn(io, &create_module.lib_directories, lib_dir_arg);
}
}
create_module.lib_dir_args = undefined; // From here we use lib_directories instead.
@@ -3989,9 +3985,8 @@ fn createModule(
resolved_target.is_native_os and resolved_target.is_native_abi and
create_module.want_native_include_dirs)
{
- var paths = std.zig.system.NativePaths.detect(arena, target) catch |err| {
- fatal("unable to detect native system paths: {s}", .{@errorName(err)});
- };
+ var paths = std.zig.system.NativePaths.detect(arena, io, target) catch |err|
+ fatal("unable to detect native system paths: {t}", .{err});
for (paths.warnings.items) |warning| {
warn("{s}", .{warning});
}
@@ -4002,38 +3997,35 @@ fn createModule(
try create_module.rpath_list.appendSlice(arena, paths.rpaths.items);
try create_module.lib_directories.ensureUnusedCapacity(arena, paths.lib_dirs.items.len);
- for (paths.lib_dirs.items) |path| addLibDirectoryWarn2(&create_module.lib_directories, path, true);
+ for (paths.lib_dirs.items) |path| addLibDirectoryWarn2(io, &create_module.lib_directories, path, true);
}
if (create_module.libc_paths_file) |paths_file| {
- create_module.libc_installation = LibCInstallation.parse(arena, paths_file, target) catch |err| {
- fatal("unable to parse libc paths file at path {s}: {s}", .{
- paths_file, @errorName(err),
- });
- };
+ create_module.libc_installation = LibCInstallation.parse(arena, io, paths_file, target) catch |err|
+ fatal("unable to parse libc paths file at path {s}: {t}", .{ paths_file, err });
}
if (target.os.tag == .windows and (target.abi == .msvc or target.abi == .itanium) and
any_name_queries_remaining)
{
if (create_module.libc_installation == null) {
- create_module.libc_installation = LibCInstallation.findNative(.{
- .allocator = arena,
+ create_module.libc_installation = LibCInstallation.findNative(arena, io, .{
.verbose = true,
.target = target,
}) catch |err| {
- fatal("unable to find native libc installation: {s}", .{@errorName(err)});
+ fatal("unable to find native libc installation: {t}", .{err});
};
}
try create_module.lib_directories.ensureUnusedCapacity(arena, 2);
- addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.msvc_lib_dir.?);
- addLibDirectoryWarn(&create_module.lib_directories, create_module.libc_installation.?.kernel32_lib_dir.?);
+ addLibDirectoryWarn(io, &create_module.lib_directories, create_module.libc_installation.?.msvc_lib_dir.?);
+ addLibDirectoryWarn(io, &create_module.lib_directories, create_module.libc_installation.?.kernel32_lib_dir.?);
}
// Destructively mutates but does not transfer ownership of `unresolved_link_inputs`.
link.resolveInputs(
gpa,
arena,
+ io,
target,
&unresolved_link_inputs,
&create_module.link_inputs,
@@ -4160,7 +4152,9 @@ fn serve(
var child_pid: ?std.process.Child.Id = null;
- const main_progress_node = std.Progress.start(.{});
+ const main_progress_node = std.Progress.start(io, .{});
+ defer main_progress_node.end();
+
const file_system_inputs = comp.file_system_inputs.?;
const IncrementalDebugServer = if (build_options.enable_debug_extensions and !builtin.single_threaded)
@@ -4183,7 +4177,7 @@ fn serve(
defer if (comp.debugIncremental()) ids.mutex.unlock(io);
switch (hdr.tag) {
- .exit => return cleanExit(),
+ .exit => return cleanExit(io),
.update => {
tracy.frameMark();
file_system_inputs.clearRetainingCapacity();
@@ -4436,12 +4430,12 @@ fn runOrTest(
// the error message and invocation below.
if (process.can_execv and arg_mode == .run) {
// execv releases the locks; no need to destroy the Compilation here.
- std.debug.lockStdErr();
+ _ = try io.lockStderr(&.{}, .no_color);
const err = process.execve(gpa, argv.items, &env_map);
- std.debug.unlockStdErr();
+ io.unlockStderr();
try warnAboutForeignBinaries(io, arena, arg_mode, target, link_libc);
const cmd = try std.mem.join(arena, " ", argv.items);
- fatal("the following command failed to execve with '{s}':\n{s}", .{ @errorName(err), cmd });
+ fatal("the following command failed to execve with '{t}':\n{s}", .{ err, cmd });
} else if (process.can_spawn) {
var child = std.process.Child.init(argv.items, gpa);
child.env_map = &env_map;
@@ -4455,9 +4449,9 @@ fn runOrTest(
comp_destroyed.* = true;
const term_result = t: {
- std.debug.lockStdErr();
- defer std.debug.unlockStdErr();
- break :t child.spawnAndWait();
+ _ = try io.lockStderr(&.{}, .no_color);
+ defer io.unlockStderr();
+ break :t child.spawnAndWait(io);
};
const term = term_result catch |err| {
try warnAboutForeignBinaries(io, arena, arg_mode, target, link_libc);
@@ -4469,7 +4463,7 @@ fn runOrTest(
switch (term) {
.Exited => |code| {
if (code == 0) {
- return cleanExit();
+ return cleanExit(io);
} else {
process.exit(code);
}
@@ -4483,7 +4477,7 @@ fn runOrTest(
switch (term) {
.Exited => |code| {
if (code == 0) {
- return cleanExit();
+ return cleanExit(io);
} else {
const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following test command failed with exit code {d}:\n{s}", .{ code, cmd });
@@ -4512,6 +4506,7 @@ fn runOrTestHotSwap(
all_args: []const []const u8,
runtime_args_start: ?usize,
) !std.process.Child.Id {
+ const io = comp.io;
const lf = comp.bin_file.?;
const exe_path = switch (builtin.target.os.tag) {
@@ -4520,7 +4515,7 @@ fn runOrTestHotSwap(
// tmp zig-cache and use it to spawn the child process. This way we are free to update
// the binary with each requested hot update.
.windows => blk: {
- try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.dirs.local_cache.handle, lf.emit.sub_path, .{});
+ try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.dirs.local_cache.handle, lf.emit.sub_path, io, .{});
break :blk try fs.path.join(gpa, &.{ comp.dirs.local_cache.path orelse ".", lf.emit.sub_path });
},
@@ -4593,7 +4588,7 @@ fn runOrTestHotSwap(
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
- try child.spawn();
+ try child.spawn(io);
return child.id;
},
@@ -4604,6 +4599,8 @@ const UpdateModuleError = Compilation.UpdateError || error{
/// The update caused compile errors. The error bundle has already been
/// reported to the user by being rendered to stderr.
CompileErrorsReported,
+ /// Error occurred printing compilation errors to stderr.
+ PrintingErrorsFailed,
};
fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) UpdateModuleError!void {
try comp.update(prog_node);
@@ -4612,7 +4609,11 @@ fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node)
defer errors.deinit(comp.gpa);
if (errors.errorMessageCount() > 0) {
- errors.renderToStdErr(.{}, color);
+ const io = comp.io;
+ errors.renderToStderr(io, .{}, color) catch |err| switch (err) {
+ error.Canceled => |e| return e,
+ else => return error.PrintingErrorsFailed,
+ };
return error.CompileErrorsReported;
}
}
@@ -4665,7 +4666,7 @@ fn cmdTranslateC(
return;
} else {
const color: Color = .auto;
- result.errors.renderToStdErr(.{}, color);
+ result.errors.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
}
@@ -4680,7 +4681,7 @@ fn cmdTranslateC(
} else {
const hex_digest = Cache.binToHex(result.digest);
const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_basename });
- const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| {
+ const zig_file = comp.dirs.local_cache.handle.openFile(io, out_zig_path, .{}) catch |err| {
const path = comp.dirs.local_cache.path orelse ".";
fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{
path,
@@ -4689,12 +4690,12 @@ fn cmdTranslateC(
@errorName(err),
});
};
- defer zig_file.close();
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ defer zig_file.close(io);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var file_reader = zig_file.reader(io, &.{});
_ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited);
try stdout_writer.interface.flush();
- return cleanExit();
+ return cleanExit(io);
}
}
@@ -4728,7 +4729,7 @@ const usage_init =
\\
;
-fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+fn cmdInit(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !void {
dev.check(.init_command);
var template: enum { example, minimal } = .example;
@@ -4740,8 +4741,8 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (mem.eql(u8, arg, "-m") or mem.eql(u8, arg, "--minimal")) {
template = .minimal;
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_init);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_init);
+ return cleanExit(io);
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
@@ -4759,8 +4760,8 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
switch (template) {
.example => {
- var templates = findTemplates(gpa, arena);
- defer templates.deinit();
+ var templates = findTemplates(gpa, arena, io);
+ defer templates.deinit(io);
const s = fs.path.sep_str;
const template_paths = [_][]const u8{
@@ -4772,7 +4773,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var ok_count: usize = 0;
for (template_paths) |template_path| {
- if (templates.write(arena, fs.cwd(), sanitized_root_name, template_path, fingerprint)) |_| {
+ if (templates.write(arena, io, Io.Dir.cwd(), sanitized_root_name, template_path, fingerprint)) |_| {
std.log.info("created {s}", .{template_path});
ok_count += 1;
} else |err| switch (err) {
@@ -4786,10 +4787,10 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (ok_count == template_paths.len) {
std.log.info("see `zig build --help` for a menu of options", .{});
}
- return cleanExit();
+ return cleanExit(io);
},
.minimal => {
- writeSimpleTemplateFile(Package.Manifest.basename,
+ writeSimpleTemplateFile(io, Package.Manifest.basename,
\\.{{
\\ .name = .{s},
\\ .version = "0.0.1",
@@ -4806,7 +4807,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
else => fatal("failed to create '{s}': {s}", .{ Package.Manifest.basename, @errorName(err) }),
error.PathAlreadyExists => fatal("refusing to overwrite '{s}'", .{Package.Manifest.basename}),
};
- writeSimpleTemplateFile(Package.build_zig_basename,
+ writeSimpleTemplateFile(io, Package.build_zig_basename,
\\const std = @import("std");
\\
\\pub fn build(b: *std.Build) void {{
@@ -4819,11 +4820,11 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
// their `build.zig.zon` *after* writing their `build.zig`. So this one isn't fatal.
error.PathAlreadyExists => {
std.log.info("successfully populated '{s}', preserving existing '{s}'", .{ Package.Manifest.basename, Package.build_zig_basename });
- return cleanExit();
+ return cleanExit(io);
},
};
std.log.info("successfully populated '{s}' and '{s}'", .{ Package.Manifest.basename, Package.build_zig_basename });
- return cleanExit();
+ return cleanExit(io);
},
}
}
@@ -4894,7 +4895,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
const argv_index_exe = child_argv.items.len;
_ = try child_argv.addOne();
- const self_exe_path = try fs.selfExePathAlloc(arena);
+ const self_exe_path = try process.executablePathAlloc(io, arena);
try child_argv.append(self_exe_path);
const argv_index_zig_lib_dir = child_argv.items.len;
@@ -5075,7 +5076,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
const work_around_btrfs_bug = native_os == .linux and
EnvVar.ZIG_BTRFS_WORKAROUND.isSet();
- const root_prog_node = std.Progress.start(.{
+ const root_prog_node = std.Progress.start(io, .{
.disable_printing = (color == .off),
.root_name = "Compile Build Script",
});
@@ -5110,14 +5111,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
const paths_file = debug_libc_paths_file orelse break :lci null;
if (!build_options.enable_debug_extensions) unreachable;
const lci = try arena.create(LibCInstallation);
- lci.* = try .parse(arena, paths_file, &resolved_target.result);
+ lci.* = try .parse(arena, io, paths_file, &resolved_target.result);
break :lci lci;
};
process.raiseFileDescriptorLimit();
const cwd_path = try introspect.getResolvedCwd(arena);
- const build_root = try findBuildRoot(arena, .{
+ const build_root = try findBuildRoot(arena, io, .{
.cwd_path = cwd_path,
.build_file = build_file,
});
@@ -5125,6 +5126,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
// This `init` calls `fatal` on error.
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
.{ .override = path: {
@@ -5134,7 +5136,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
{},
self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
child_argv.items[argv_index_zig_lib_dir] = dirs.zig_lib.path orelse cwd_path;
child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path;
@@ -5203,8 +5205,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
.parent = root_mod,
});
- var cleanup_build_dir: ?fs.Dir = null;
- defer if (cleanup_build_dir) |*dir| dir.close();
+ var cleanup_build_dir: ?Io.Dir = null;
+ defer if (cleanup_build_dir) |*dir| dir.close(io);
if (dev.env.supports(.fetch_command)) {
const fetch_prog_node = root_prog_node.start("Fetch Packages", 0);
@@ -5226,7 +5228,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
if (system_pkg_dir_path) |p| {
job_queue.global_cache = .{
.path = p,
- .handle = fs.cwd().openDir(p, .{}) catch |err| {
+ .handle = Io.Dir.cwd().openDir(io, p, .{}) catch |err| {
fatal("unable to open system package directory '{s}': {s}", .{
p, @errorName(err),
});
@@ -5285,17 +5287,18 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
if (fetch.error_bundle.root_list.items.len > 0) {
var errors = try fetch.error_bundle.toOwnedBundle("");
- errors.renderToStdErr(.{}, color);
+ errors.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
- if (fetch_only) return cleanExit();
+ if (fetch_only) return cleanExit(io);
var source_buf = std.array_list.Managed(u8).init(gpa);
defer source_buf.deinit();
try job_queue.createDependenciesSource(&source_buf);
const deps_mod = try createDependenciesModule(
arena,
+ io,
source_buf.items,
root_mod,
dirs,
@@ -5357,6 +5360,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
}
} else try createEmptyDependenciesModule(
arena,
+ io,
root_mod,
dirs,
config,
@@ -5415,16 +5419,15 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
child.stderr_behavior = .Inherit;
const term = t: {
- std.debug.lockStdErr();
- defer std.debug.unlockStdErr();
- break :t child.spawnAndWait() catch |err| {
- fatal("failed to spawn build runner {s}: {s}", .{ child_argv.items[0], @errorName(err) });
- };
+ _ = try io.lockStderr(&.{}, .no_color);
+ defer io.unlockStderr();
+ break :t child.spawnAndWait(io) catch |err|
+ fatal("failed to spawn build runner {s}: {t}", .{ child_argv.items[0], err });
};
switch (term) {
.Exited => |code| {
- if (code == 0) return cleanExit();
+ if (code == 0) return cleanExit(io);
// Indicates that the build runner has reported compile errors
// and this parent process does not need to report any further
// diagnostics.
@@ -5437,12 +5440,12 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
// that are missing.
const s = fs.path.sep_str;
const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce;
- const stdout = dirs.local_cache.handle.readFileAlloc(tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| {
+ const stdout = dirs.local_cache.handle.readFileAlloc(io, tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| {
fatal("unable to read results of configure phase from '{f}{s}': {s}", .{
dirs.local_cache, tmp_sub_path, @errorName(err),
});
};
- dirs.local_cache.handle.deleteFile(tmp_sub_path) catch {};
+ dirs.local_cache.handle.deleteFile(io, tmp_sub_path) catch {};
var it = mem.splitScalar(u8, stdout, '\n');
var any_errors = false;
@@ -5511,9 +5514,10 @@ fn jitCmd(
dev.check(.jit_command);
const color: Color = .auto;
- const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(.{
+ const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(io, .{
.disable_printing = (color == .off),
});
+ defer root_prog_node.end();
const target_query: std.Target.Query = .{};
const resolved_target: Package.Module.ResolvedTarget = .{
@@ -5523,9 +5527,8 @@ fn jitCmd(
.is_explicit_dynamic_linker = false,
};
- const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find self exe path: {s}", .{@errorName(err)});
- };
+ const self_exe_path = process.executablePathAlloc(io, arena) catch |err|
+ fatal("unable to find self exe path: {t}", .{err});
const optimize_mode: std.builtin.OptimizeMode = if (EnvVar.ZIG_DEBUG_CMD.isSet())
.Debug
@@ -5538,13 +5541,14 @@ fn jitCmd(
// This `init` calls `fatal` on error.
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
.global,
if (native_os == .wasi) wasi_preopens,
self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
const thread_limit = @min(
@max(std.Thread.getCpuCount() catch 1, 1),
@@ -5623,7 +5627,7 @@ fn jitCmd(
defer comp.destroy();
if (options.server) {
- var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writer(io, &stdout_buffer);
var server: std.zig.Server = .{
.out = &stdout_writer.interface,
.in = undefined, // won't be receiving messages
@@ -5683,19 +5687,23 @@ fn jitCmd(
child.stdout_behavior = if (options.capture == null) .Inherit else .Pipe;
child.stderr_behavior = .Inherit;
- try child.spawn();
+ const term = t: {
+ _ = try io.lockStderr(&.{}, .no_color);
+ defer io.unlockStderr();
+ try child.spawn(io);
- if (options.capture) |ptr| {
- var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
- ptr.* = try stdout_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32)));
- }
+ if (options.capture) |ptr| {
+ var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
+ ptr.* = try stdout_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32)));
+ }
- const term = try child.wait();
+ break :t try child.wait(io);
+ };
switch (term) {
.Exited => |code| {
if (code == 0) {
if (options.capture != null) return;
- return cleanExit();
+ return cleanExit(io);
}
const cmd = try std.mem.join(arena, " ", child_argv.items);
fatal("the following build command failed with exit code {d}:\n{s}", .{ code, cmd });
@@ -5818,9 +5826,9 @@ pub fn lldMain(
const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true, .single_quotes = true });
/// Initialize the arguments from a Response File. "*.rsp"
-fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile {
+fn initArgIteratorResponseFile(allocator: Allocator, io: Io, resp_file_path: []const u8) !ArgIteratorResponseFile {
const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit
- const cmd_line = try fs.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes));
+ const cmd_line = try Io.Dir.cwd().readFileAlloc(io, resp_file_path, allocator, .limited(max_bytes));
errdefer allocator.free(cmd_line);
return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line);
@@ -5948,7 +5956,7 @@ pub const ClangArgIterator = struct {
};
}
- fn next(self: *ClangArgIterator) !void {
+ fn next(self: *ClangArgIterator, io: Io) !void {
assert(self.has_next);
assert(self.next_index < self.argv.len);
// In this state we know that the parameter we are looking at is a root parameter
@@ -5966,10 +5974,8 @@ pub const ClangArgIterator = struct {
const arena = self.arena;
const resp_file_path = arg[1..];
- self.arg_iterator_response_file =
- initArgIteratorResponseFile(arena, resp_file_path) catch |err| {
- fatal("unable to read response file '{s}': {s}", .{ resp_file_path, @errorName(err) });
- };
+ self.arg_iterator_response_file = initArgIteratorResponseFile(arena, io, resp_file_path) catch |err|
+ fatal("unable to read response file '{s}': {t}", .{ resp_file_path, err });
// NOTE: The ArgIteratorResponseFile returns tokens from next() that are slices of an
// internal buffer. This internal buffer is arena allocated, so it is not cleaned up here.
@@ -6156,8 +6162,8 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_ast_check);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_ast_check);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "-t")) {
want_output_text = true;
} else if (mem.eql(u8, arg, "--zon")) {
@@ -6184,12 +6190,12 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const display_path = zig_source_path orelse "<stdin>";
const source: [:0]const u8 = s: {
var f = if (zig_source_path) |p| file: {
- break :file fs.cwd().openFile(p, .{}) catch |err| {
+ break :file Io.Dir.cwd().openFile(io, p, .{}) catch |err| {
fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
};
- } else fs.File.stdin();
- defer if (zig_source_path != null) f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ } else Io.File.stdin();
+ defer if (zig_source_path != null) f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :s std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err| {
fatal("unable to load file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
};
@@ -6207,7 +6213,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
const tree = try Ast.parse(arena, source, mode);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
switch (mode) {
.zig => {
@@ -6218,7 +6224,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZirErrorMessages(zir, tree, source, display_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ try error_bundle.renderToStderr(io, .{}, color);
if (zir.loweringFailed()) {
process.exit(1);
}
@@ -6228,7 +6234,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
if (zir.hasCompileErrors()) {
process.exit(1);
} else {
- return cleanExit();
+ return cleanExit(io);
}
}
if (!build_options.enable_debug_extensions) {
@@ -6279,7 +6285,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
if (zir.hasCompileErrors()) {
process.exit(1);
} else {
- return cleanExit();
+ return cleanExit(io);
}
},
.zon => {
@@ -6289,12 +6295,12 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZoirErrorMessages(zoir, tree, source, display_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
if (!want_output_text) {
- return cleanExit();
+ return cleanExit(io);
}
if (!build_options.enable_debug_extensions) {
@@ -6303,7 +6309,7 @@ fn cmdAstCheck(arena: Allocator, io: Io, args: []const []const u8) !void {
try @import("print_zoir.zig").renderToWriter(zoir, arena, stdout_bw);
try stdout_bw.flush();
- return cleanExit();
+ return cleanExit(io);
},
}
}
@@ -6330,8 +6336,8 @@ fn cmdDetectCpu(io: Io, args: []const []const u8) !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(detect_cpu_usage);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, detect_cpu_usage);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "--llvm")) {
use_llvm = true;
} else {
@@ -6351,10 +6357,10 @@ fn cmdDetectCpu(io: Io, args: []const []const u8) !void {
const name = llvm.GetHostCPUName() orelse fatal("LLVM could not figure out the host cpu name", .{});
const features = llvm.GetHostCPUFeatures() orelse fatal("LLVM could not figure out the host cpu feature set", .{});
const cpu = try detectNativeCpuWithLLVM(builtin.cpu.arch, name, features);
- try printCpu(cpu);
+ try printCpu(io, cpu);
} else {
const host_target = std.zig.resolveTargetQueryOrFatal(io, .{});
- try printCpu(host_target.cpu);
+ try printCpu(io, host_target.cpu);
}
}
@@ -6421,8 +6427,8 @@ fn detectNativeCpuWithLLVM(
return result;
}
-fn printCpu(cpu: std.Target.Cpu) !void {
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+fn printCpu(io: Io, cpu: std.Target.Cpu) !void {
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
if (cpu.model.llvm_name) |llvm_name| {
@@ -6444,6 +6450,7 @@ fn printCpu(cpu: std.Target.Cpu) !void {
fn cmdDumpLlvmInts(
gpa: Allocator,
arena: Allocator,
+ io: Io,
args: []const []const u8,
) !void {
dev.check(.llvm_ints_command);
@@ -6471,7 +6478,7 @@ fn cmdDumpLlvmInts(
const dl = tm.createTargetDataLayout();
const context = llvm.Context.create();
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
for ([_]u16{ 1, 8, 16, 32, 64, 128, 256 }) |bits| {
const int_type = context.intType(bits);
@@ -6480,7 +6487,7 @@ fn cmdDumpLlvmInts(
}
try stdout_bw.flush();
- return cleanExit();
+ return cleanExit(io);
}
/// This is only enabled for debug builds.
@@ -6491,13 +6498,13 @@ fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void {
const cache_file = args[0];
- var f = fs.cwd().openFile(cache_file, .{}) catch |err| {
+ var f = Io.Dir.cwd().openFile(io, cache_file, .{}) catch |err| {
fatal("unable to open zir cache file for dumping '{s}': {s}", .{ cache_file, @errorName(err) });
};
- defer f.close();
+ defer f.close(io);
const zir = try Zcu.loadZirCache(arena, io, f);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
{
const instruction_bytes = zir.instructions.len *
@@ -6538,18 +6545,18 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
const new_source_path = args[1];
const old_source = source: {
- var f = fs.cwd().openFile(old_source_path, .{}) catch |err|
+ var f = Io.Dir.cwd().openFile(io, old_source_path, .{}) catch |err|
fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
- defer f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ defer f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
};
const new_source = source: {
- var f = fs.cwd().openFile(new_source_path, .{}) catch |err|
+ var f = Io.Dir.cwd().openFile(io, new_source_path, .{}) catch |err|
fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
- defer f.close();
- var file_reader: fs.File.Reader = f.reader(io, &stdin_buffer);
+ defer f.close(io);
+ var file_reader: Io.File.Reader = f.reader(io, &stdin_buffer);
break :source std.zig.readSourceFileToEndAlloc(arena, &file_reader) catch |err|
fatal("unable to read new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
};
@@ -6562,7 +6569,7 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZirErrorMessages(old_zir, old_tree, old_source, old_source_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
@@ -6574,14 +6581,14 @@ fn cmdChangelist(arena: Allocator, io: Io, args: []const []const u8) !void {
try wip_errors.init(arena);
try wip_errors.addZirErrorMessages(new_zir, new_tree, new_source, new_source_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- error_bundle.renderToStdErr(.{}, color);
+ error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty;
try Zcu.mapOldZirToNew(arena, old_zir, new_zir, &inst_map);
- var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout_bw = &stdout_writer.interface;
{
try stdout_bw.print("Instruction mappings:\n", .{});
@@ -6623,7 +6630,7 @@ fn warnAboutForeignBinaries(
const host_query: std.Target.Query = .{};
const host_target = std.zig.resolveTargetQueryOrFatal(io, host_query);
- switch (std.zig.system.getExternalExecutor(&host_target, target, .{ .link_libc = link_libc })) {
+ switch (std.zig.system.getExternalExecutor(io, &host_target, target, .{ .link_libc = link_libc })) {
.native => return,
.rosetta => {
const host_name = try host_target.zigTriple(arena);
@@ -6829,6 +6836,7 @@ const ClangSearchSanitizer = struct {
};
fn accessFrameworkPath(
+ io: Io,
test_path: *std.array_list.Managed(u8),
checked_paths: *std.array_list.Managed(u8),
framework_dir_path: []const u8,
@@ -6842,7 +6850,7 @@ fn accessFrameworkPath(
framework_dir_path, framework_name, framework_name, ext,
});
try checked_paths.print("\n {s}", .{test_path.items});
- fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
+ Io.Dir.cwd().access(io, test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| fatal("unable to search for {s} framework '{s}': {s}", .{
ext, test_path.items, @errorName(e),
@@ -6912,8 +6920,8 @@ fn cmdFetch(
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
- try fs.File.stdout().writeAll(usage_fetch);
- return cleanExit();
+ try Io.File.stdout().writeStreamingAll(io, usage_fetch);
+ return cleanExit(io);
} else if (mem.eql(u8, arg, "--global-cache-dir")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
@@ -6946,7 +6954,7 @@ fn cmdFetch(
try http_client.initDefaultProxies(arena);
- var root_prog_node = std.Progress.start(.{
+ var root_prog_node = std.Progress.start(io, .{
.root_name = "Fetch",
});
defer root_prog_node.end();
@@ -6954,11 +6962,11 @@ fn cmdFetch(
var global_cache_directory: Directory = l: {
const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
break :l .{
- .handle = try fs.cwd().makeOpenPath(p, .{}),
+ .handle = try Io.Dir.cwd().createDirPathOpen(io, p, .{}),
.path = p,
};
};
- defer global_cache_directory.handle.close();
+ defer global_cache_directory.handle.close(io);
var job_queue: Package.Fetch.JobQueue = .{
.io = io,
@@ -7009,7 +7017,7 @@ fn cmdFetch(
if (fetch.error_bundle.root_list.items.len > 0) {
var errors = try fetch.error_bundle.toOwnedBundle("");
- errors.renderToStdErr(.{}, color);
+ errors.renderToStderr(io, .{}, color) catch {};
process.exit(1);
}
@@ -7021,10 +7029,10 @@ fn cmdFetch(
const name = switch (save) {
.no => {
- var stdout = fs.File.stdout().writerStreaming(&stdout_buffer);
+ var stdout = Io.File.stdout().writerStreaming(io, &stdout_buffer);
try stdout.interface.print("{s}\n", .{package_hash_slice});
try stdout.interface.flush();
- return cleanExit();
+ return cleanExit(io);
},
.yes, .exact => |name| name: {
if (name) |n| break :name n;
@@ -7036,14 +7044,14 @@ fn cmdFetch(
const cwd_path = try introspect.getResolvedCwd(arena);
- var build_root = try findBuildRoot(arena, .{
+ var build_root = try findBuildRoot(arena, io, .{
.cwd_path = cwd_path,
});
- defer build_root.deinit();
+ defer build_root.deinit(io);
// The name to use in case the manifest file needs to be created now.
const init_root_name = fs.path.basename(build_root.directory.path orelse cwd_path);
- var manifest, var ast = try loadManifest(gpa, arena, .{
+ var manifest, var ast = try loadManifest(gpa, arena, io, .{
.root_name = try sanitizeExampleName(arena, init_root_name),
.dir = build_root.directory.handle,
.color = color,
@@ -7159,15 +7167,16 @@ fn cmdFetch(
try ast.render(gpa, &aw.writer, fixups);
const rendered = aw.written();
- build_root.directory.handle.writeFile(.{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| {
+ build_root.directory.handle.writeFile(io, .{ .sub_path = Package.Manifest.basename, .data = rendered }) catch |err| {
fatal("unable to write {s} file: {t}", .{ Package.Manifest.basename, err });
};
- return cleanExit();
+ return cleanExit(io);
}
fn createEmptyDependenciesModule(
arena: Allocator,
+ io: Io,
main_mod: *Package.Module,
dirs: Compilation.Directories,
global_options: Compilation.Config,
@@ -7176,6 +7185,7 @@ fn createEmptyDependenciesModule(
try Package.Fetch.JobQueue.createEmptyDependenciesSource(&source);
_ = try createDependenciesModule(
arena,
+ io,
source.items,
main_mod,
dirs,
@@ -7187,6 +7197,7 @@ fn createEmptyDependenciesModule(
/// build runner to obtain via `@import("@dependencies")`.
fn createDependenciesModule(
arena: Allocator,
+ io: Io,
source: []const u8,
main_mod: *Package.Module,
dirs: Compilation.Directories,
@@ -7197,9 +7208,9 @@ fn createDependenciesModule(
const rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
{
- var tmp_dir = try dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
- defer tmp_dir.close();
- try tmp_dir.writeFile(.{ .sub_path = basename, .data = source });
+ var tmp_dir = try dirs.local_cache.handle.createDirPathOpen(io, tmp_dir_sub_path, .{});
+ defer tmp_dir.close(io);
+ try tmp_dir.writeFile(io, .{ .sub_path = basename, .data = source });
}
var hh: Cache.HashHelper = .{};
@@ -7208,11 +7219,7 @@ fn createDependenciesModule(
const hex_digest = hh.final();
const o_dir_sub_path = try arena.dupe(u8, "o" ++ fs.path.sep_str ++ hex_digest);
- try Package.Fetch.renameTmpIntoCache(
- dirs.local_cache.handle,
- tmp_dir_sub_path,
- o_dir_sub_path,
- );
+ try Package.Fetch.renameTmpIntoCache(io, dirs.local_cache.handle, tmp_dir_sub_path, o_dir_sub_path);
const deps_mod = try Package.Module.create(arena, .{
.paths = .{
@@ -7232,10 +7239,10 @@ fn createDependenciesModule(
const BuildRoot = struct {
directory: Cache.Directory,
build_zig_basename: []const u8,
- cleanup_build_dir: ?fs.Dir,
+ cleanup_build_dir: ?Io.Dir,
- fn deinit(br: *BuildRoot) void {
- if (br.cleanup_build_dir) |*dir| dir.close();
+ fn deinit(br: *BuildRoot, io: Io) void {
+ if (br.cleanup_build_dir) |*dir| dir.close(io);
br.* = undefined;
}
};
@@ -7245,7 +7252,7 @@ const FindBuildRootOptions = struct {
cwd_path: ?[]const u8 = null,
};
-fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
+fn findBuildRoot(arena: Allocator, io: Io, options: FindBuildRootOptions) !BuildRoot {
const cwd_path = options.cwd_path orelse try introspect.getResolvedCwd(arena);
const build_zig_basename = if (options.build_file) |bf|
fs.path.basename(bf)
@@ -7254,7 +7261,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
if (options.build_file) |bf| {
if (fs.path.dirname(bf)) |dirname| {
- const dir = fs.cwd().openDir(dirname, .{}) catch |err| {
+ const dir = Io.Dir.cwd().openDir(io, dirname, .{}) catch |err| {
fatal("unable to open directory to build file from argument 'build-file', '{s}': {s}", .{ dirname, @errorName(err) });
};
return .{
@@ -7266,7 +7273,7 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
return .{
.build_zig_basename = build_zig_basename,
- .directory = .{ .path = null, .handle = fs.cwd() },
+ .directory = .{ .path = null, .handle = Io.Dir.cwd() },
.cleanup_build_dir = null,
};
}
@@ -7274,8 +7281,8 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
var dirname: []const u8 = cwd_path;
while (true) {
const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig_basename });
- if (fs.cwd().access(joined_path, .{})) |_| {
- const dir = fs.cwd().openDir(dirname, .{}) catch |err| {
+ if (Io.Dir.cwd().access(io, joined_path, .{})) |_| {
+ const dir = Io.Dir.cwd().openDir(io, dirname, .{}) catch |err| {
fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) });
};
return .{
@@ -7304,17 +7311,19 @@ fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
const LoadManifestOptions = struct {
root_name: []const u8,
- dir: fs.Dir,
+ dir: Io.Dir,
color: Color,
};
fn loadManifest(
gpa: Allocator,
arena: Allocator,
+ io: Io,
options: LoadManifestOptions,
) !struct { Package.Manifest, Ast } {
const manifest_bytes = while (true) {
break options.dir.readFileAllocOptions(
+ io,
Package.Manifest.basename,
arena,
.limited(Package.Manifest.max_bytes),
@@ -7322,7 +7331,7 @@ fn loadManifest(
0,
) catch |err| switch (err) {
error.FileNotFound => {
- writeSimpleTemplateFile(Package.Manifest.basename,
+ writeSimpleTemplateFile(io, Package.Manifest.basename,
\\.{{
\\ .name = .{s},
\\ .version = "{s}",
@@ -7348,7 +7357,7 @@ fn loadManifest(
errdefer ast.deinit(gpa);
if (ast.errors.len > 0) {
- try std.zig.printAstErrorsToStderr(gpa, ast, Package.Manifest.basename, options.color);
+ try std.zig.printAstErrorsToStderr(gpa, io, ast, Package.Manifest.basename, options.color);
process.exit(2);
}
@@ -7365,7 +7374,7 @@ fn loadManifest(
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
- error_bundle.renderToStdErr(.{}, options.color);
+ error_bundle.renderToStderr(io, .{}, options.color) catch {};
process.exit(2);
}
@@ -7374,12 +7383,12 @@ fn loadManifest(
const Templates = struct {
zig_lib_directory: Cache.Directory,
- dir: fs.Dir,
+ dir: Io.Dir,
buffer: std.array_list.Managed(u8),
- fn deinit(templates: *Templates) void {
- templates.zig_lib_directory.handle.close();
- templates.dir.close();
+ fn deinit(templates: *Templates, io: Io) void {
+ templates.zig_lib_directory.handle.close(io);
+ templates.dir.close(io);
templates.buffer.deinit();
templates.* = undefined;
}
@@ -7387,20 +7396,21 @@ const Templates = struct {
fn write(
templates: *Templates,
arena: Allocator,
- out_dir: fs.Dir,
+ io: Io,
+ out_dir: Io.Dir,
root_name: []const u8,
template_path: []const u8,
fingerprint: Package.Fingerprint,
) !void {
if (fs.path.dirname(template_path)) |dirname| {
- out_dir.makePath(dirname) catch |err| {
- fatal("unable to make path '{s}': {s}", .{ dirname, @errorName(err) });
+ out_dir.createDirPath(io, dirname) catch |err| {
+ fatal("unable to make path '{s}': {t}", .{ dirname, err });
};
}
const max_bytes = 10 * 1024 * 1024;
- const contents = templates.dir.readFileAlloc(template_path, arena, .limited(max_bytes)) catch |err| {
- fatal("unable to read template file '{s}': {s}", .{ template_path, @errorName(err) });
+ const contents = templates.dir.readFileAlloc(io, template_path, arena, .limited(max_bytes)) catch |err| {
+ fatal("unable to read template file '{s}': {t}", .{ template_path, err });
};
templates.buffer.clearRetainingCapacity();
try templates.buffer.ensureUnusedCapacity(contents.len);
@@ -7428,39 +7438,39 @@ const Templates = struct {
i += 1;
}
- return out_dir.writeFile(.{
+ return out_dir.writeFile(io, .{
.sub_path = template_path,
.data = templates.buffer.items,
.flags = .{ .exclusive = true },
});
}
};
-fn writeSimpleTemplateFile(file_name: []const u8, comptime fmt: []const u8, args: anytype) !void {
- const f = try fs.cwd().createFile(file_name, .{ .exclusive = true });
- defer f.close();
+fn writeSimpleTemplateFile(io: Io, file_name: []const u8, comptime fmt: []const u8, args: anytype) !void {
+ const f = try Io.Dir.cwd().createFile(io, file_name, .{ .exclusive = true });
+ defer f.close(io);
var buf: [4096]u8 = undefined;
- var fw = f.writer(&buf);
+ var fw = f.writer(io, &buf);
try fw.interface.print(fmt, args);
try fw.interface.flush();
}
-fn findTemplates(gpa: Allocator, arena: Allocator) Templates {
+fn findTemplates(gpa: Allocator, arena: Allocator, io: Io) Templates {
const cwd_path = introspect.getResolvedCwd(arena) catch |err| {
- fatal("unable to get cwd: {s}", .{@errorName(err)});
+ fatal("unable to get cwd: {t}", .{err});
};
- const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find self exe path: {s}", .{@errorName(err)});
+ const self_exe_path = process.executablePathAlloc(io, arena) catch |err| {
+ fatal("unable to find self exe path: {t}", .{err});
};
- var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, cwd_path, self_exe_path) catch |err| {
- fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
+ var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, io, cwd_path, self_exe_path) catch |err| {
+ fatal("unable to find zig installation directory '{s}': {t}", .{ self_exe_path, err });
};
const s = fs.path.sep_str;
const template_sub_path = "init";
- const template_dir = zig_lib_directory.handle.openDir(template_sub_path, .{}) catch |err| {
+ const template_dir = zig_lib_directory.handle.openDir(io, template_sub_path, .{}) catch |err| {
const path = zig_lib_directory.path orelse ".";
- fatal("unable to open zig project template directory '{s}{s}{s}': {s}", .{
- path, s, template_sub_path, @errorName(err),
+ fatal("unable to open zig project template directory '{s}{s}{s}': {t}", .{
+ path, s, template_sub_path, err,
});
};
@@ -7574,17 +7584,18 @@ fn anyObjectLinkInputs(link_inputs: []const link.UnresolvedInput) bool {
return false;
}
-fn addLibDirectoryWarn(lib_directories: *std.ArrayList(Directory), path: []const u8) void {
- return addLibDirectoryWarn2(lib_directories, path, false);
+fn addLibDirectoryWarn(io: Io, lib_directories: *std.ArrayList(Directory), path: []const u8) void {
+ return addLibDirectoryWarn2(io, lib_directories, path, false);
}
fn addLibDirectoryWarn2(
+ io: Io,
lib_directories: *std.ArrayList(Directory),
path: []const u8,
ignore_not_found: bool,
) void {
lib_directories.appendAssumeCapacity(.{
- .handle = fs.cwd().openDir(path, .{}) catch |err| {
+ .handle = Io.Dir.cwd().openDir(io, path, .{}) catch |err| {
if (err == error.FileNotFound and ignore_not_found) return;
warn("unable to open library directory '{s}': {s}", .{ path, @errorName(err) });
return;
diff --git a/src/print_env.zig b/src/print_env.zig
index e1847688ad..3540a58d18 100644
--- a/src/print_env.zig
+++ b/src/print_env.zig
@@ -1,13 +1,17 @@
-const std = @import("std");
const builtin = @import("builtin");
-const build_options = @import("build_options");
-const Compilation = @import("Compilation.zig");
+
+const std = @import("std");
+const Io = std.Io;
const Allocator = std.mem.Allocator;
const EnvVar = std.zig.EnvVar;
const fatal = std.process.fatal;
+const build_options = @import("build_options");
+const Compilation = @import("Compilation.zig");
+
pub fn cmdEnv(
arena: Allocator,
+ io: Io,
out: *std.Io.Writer,
args: []const []const u8,
wasi_preopens: switch (builtin.target.os.tag) {
@@ -21,20 +25,21 @@ pub fn cmdEnv(
const self_exe_path = switch (builtin.target.os.tag) {
.wasi => args[0],
- else => std.fs.selfExePathAlloc(arena) catch |err| {
- fatal("unable to find zig self exe path: {s}", .{@errorName(err)});
+ else => std.process.executablePathAlloc(io, arena) catch |err| {
+ fatal("unable to find zig self exe path: {t}", .{err});
},
};
var dirs: Compilation.Directories = .init(
arena,
+ io,
override_lib_dir,
override_global_cache_dir,
.global,
if (builtin.target.os.tag == .wasi) wasi_preopens,
if (builtin.target.os.tag != .wasi) self_exe_path,
);
- defer dirs.deinit();
+ defer dirs.deinit(io);
const zig_lib_dir = dirs.zig_lib.path orelse "";
const zig_std_dir = try dirs.zig_lib.join(arena, &.{"std"});
diff --git a/src/print_targets.zig b/src/print_targets.zig
index d9118b901b..a5e89506ad 100644
--- a/src/print_targets.zig
+++ b/src/print_targets.zig
@@ -1,35 +1,38 @@
const std = @import("std");
+const Io = std.Io;
const fs = std.fs;
const mem = std.mem;
const meta = std.meta;
const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
const Target = std.Target;
-const target = @import("target.zig");
const assert = std.debug.assert;
+
const glibc = @import("libs/glibc.zig");
const introspect = @import("introspect.zig");
+const target = @import("target.zig");
pub fn cmdTargets(
allocator: Allocator,
+ io: Io,
args: []const []const u8,
out: *std.Io.Writer,
native_target: *const Target,
) !void {
_ = args;
- var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| {
- fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)});
- };
- defer zig_lib_directory.handle.close();
+ var zig_lib_directory = introspect.findZigLibDir(allocator, io) catch |err|
+ fatal("unable to find zig installation directory: {t}", .{err});
+ defer zig_lib_directory.handle.close(io);
defer allocator.free(zig_lib_directory.path.?);
const abilists_contents = zig_lib_directory.handle.readFileAlloc(
+ io,
glibc.abilists_path,
allocator,
.limited(glibc.abilists_max_size),
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
- else => fatal("unable to read " ++ glibc.abilists_path ++ ": {s}", .{@errorName(err)}),
+ else => fatal("unable to read " ++ glibc.abilists_path ++ ": {t}", .{err}),
};
defer allocator.free(abilists_contents);
@@ -48,9 +51,7 @@ pub fn cmdTargets(
{
var libc_obj = try root_obj.beginTupleField("libc", .{});
for (std.zig.target.available_libcs) |libc| {
- const tmp = try std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{
- @tagName(libc.arch), @tagName(libc.os), @tagName(libc.abi),
- });
+ const tmp = try std.fmt.allocPrint(allocator, "{t}-{t}-{t}", .{ libc.arch, libc.os, libc.abi });
defer allocator.free(tmp);
try libc_obj.field(tmp, .{});
}