diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2025-12-14 23:35:33 -0800 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2025-12-23 22:15:09 -0800 |
| commit | 16bd2e137e56f842a9ff5e015425f9e08eeb97fd (patch) | |
| tree | 4a4b46fd3eeeb52fa964f8753515e8c0ca4a6781 /src | |
| parent | 4458e423bf2d2cf485031d1f527e407bfc9113df (diff) | |
| download | zig-16bd2e137e56f842a9ff5e015425f9e08eeb97fd.tar.gz zig-16bd2e137e56f842a9ff5e015425f9e08eeb97fd.zip | |
compiler: fix most compilation errors from std.fs changes
Diffstat (limited to 'src')
| -rw-r--r-- | src/Builtin.zig | 2 | ||||
| -rw-r--r-- | src/Compilation.zig | 138 | ||||
| -rw-r--r-- | src/Package/Fetch.zig | 37 | ||||
| -rw-r--r-- | src/Package/Fetch/git.zig | 2 | ||||
| -rw-r--r-- | src/Sema.zig | 2 | ||||
| -rw-r--r-- | src/Zcu.zig | 17 | ||||
| -rw-r--r-- | src/Zcu/PerThread.zig | 50 | ||||
| -rw-r--r-- | src/fmt.zig | 12 | ||||
| -rw-r--r-- | src/libs/freebsd.zig | 4 | ||||
| -rw-r--r-- | src/libs/glibc.zig | 4 | ||||
| -rw-r--r-- | src/libs/netbsd.zig | 4 | ||||
| -rw-r--r-- | src/link.zig | 5 | ||||
| -rw-r--r-- | src/main.zig | 7 |
13 files changed, 149 insertions, 135 deletions
diff --git a/src/Builtin.zig b/src/Builtin.zig index 9e4fae8e6a..a097e88734 100644 --- a/src/Builtin.zig +++ b/src/Builtin.zig @@ -343,7 +343,7 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void { } // `make_path` matters because the dir hasn't actually been created yet. - var af = try root_dir.atomicFile(sub_path, .{ .make_path = true, .write_buffer = &.{} }); + var af = try root_dir.atomicFile(io, sub_path, .{ .make_path = true, .write_buffer = &.{} }); defer af.deinit(); try af.file_writer.interface.writeAll(file.source.?); af.finish() catch |err| switch (err) { diff --git a/src/Compilation.zig b/src/Compilation.zig index 47098317b5..37e15ab171 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -771,8 +771,8 @@ pub const Directories = struct { const zig_lib: Cache.Directory = d: { if (override_zig_lib) |path| break :d openUnresolved(arena, io, cwd, path, .@"zig lib"); if (wasi) break :d openWasiPreopen(wasi_preopens, "/lib"); - break :d introspect.findZigLibDirFromSelfExe(arena, cwd, self_exe_path) catch |err| { - fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) }); + break :d introspect.findZigLibDirFromSelfExe(arena, io, cwd, self_exe_path) catch |err| { + fatal("unable to find zig installation directory '{s}': {t}", .{ self_exe_path, err }); }; }; @@ -780,7 +780,7 @@ pub const Directories = struct { if (override_global_cache) |path| break :d openUnresolved(arena, io, cwd, path, .@"global cache"); if (wasi) break :d openWasiPreopen(wasi_preopens, "/cache"); const path = introspect.resolveGlobalCacheDir(arena) catch |err| { - fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)}); + fatal("unable to resolve zig cache directory: {t}", .{err}); }; break :d openUnresolved(arena, io, cwd, path, .@"global cache"); }; @@ -789,7 +789,7 @@ pub const Directories = struct { .override => |path| openUnresolved(arena, io, cwd, path, .@"local cache"), .search => d: { const maybe_path = introspect.resolveSuitableLocalCacheDir(arena, io, cwd) catch |err| { - fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)}); + fatal("unable to resolve zig cache directory: {t}", .{err}); }; const path = maybe_path orelse break :d global_cache; break :d openUnresolved(arena, io, cwd, path, .@"local cache"); @@ -919,8 +919,8 @@ pub const CrtFile = struct { lock: Cache.Lock, full_object_path: Cache.Path, - pub fn deinit(self: *CrtFile, gpa: Allocator) void { - self.lock.release(); + pub fn deinit(self: *CrtFile, gpa: Allocator, io: Io) void { + self.lock.release(io); gpa.free(self.full_object_path.sub_path); self.* = undefined; } @@ -1317,7 +1317,7 @@ pub const CObject = struct { }; /// Returns if there was failure. - pub fn clearStatus(self: *CObject, gpa: Allocator) bool { + pub fn clearStatus(self: *CObject, gpa: Allocator, io: Io) bool { switch (self.status) { .new => return false, .failure, .failure_retryable => { @@ -1326,15 +1326,15 @@ pub const CObject = struct { }, .success => |*success| { gpa.free(success.object_path.sub_path); - success.lock.release(); + success.lock.release(io); self.status = .new; return false; }, } } - pub fn destroy(self: *CObject, gpa: Allocator) void { - _ = self.clearStatus(gpa); + pub fn destroy(self: *CObject, gpa: Allocator, io: Io) void { + _ = self.clearStatus(gpa, io); gpa.destroy(self); } }; @@ -1364,7 +1364,7 @@ pub const Win32Resource = struct { }, /// Returns true if there was failure. - pub fn clearStatus(self: *Win32Resource, gpa: Allocator) bool { + pub fn clearStatus(self: *Win32Resource, gpa: Allocator, io: Io) bool { switch (self.status) { .new => return false, .failure, .failure_retryable => { @@ -1373,15 +1373,15 @@ pub const Win32Resource = struct { }, .success => |*success| { gpa.free(success.res_path); - success.lock.release(); + success.lock.release(io); self.status = .new; return false; }, } } - pub fn destroy(self: *Win32Resource, gpa: Allocator) void { - _ = self.clearStatus(gpa); + pub fn destroy(self: *Win32Resource, gpa: Allocator, io: Io) void { + _ = self.clearStatus(gpa, io); gpa.destroy(self); } }; @@ -1610,9 +1610,9 @@ const CacheUse = union(CacheMode) { /// Prevents other processes from clobbering files in the output directory. lock: ?Cache.Lock, - fn releaseLock(whole: *Whole) void { + fn releaseLock(whole: *Whole, io: Io) void { if (whole.lock) |*lock| { - lock.release(); + lock.release(io); whole.lock = null; } } @@ -1634,7 +1634,7 @@ const CacheUse = union(CacheMode) { }, .whole => |whole| { assert(whole.tmp_artifact_directory == null); - whole.releaseLock(); + whole.releaseLock(io); }, } } @@ -1903,13 +1903,17 @@ pub const CreateDiagnostic = union(enum) { return error.CreateFail; } }; -pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) error{ + +pub const CreateError = error{ OutOfMemory, + Canceled, Unexpected, CurrentWorkingDirectoryUnlinked, /// An error has been stored to `diag`. CreateFail, -}!*Compilation { +}; + +pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) CreateError!*Compilation { const output_mode = options.config.output_mode; const is_dyn_lib = switch (output_mode) { .Obj, .Exe => false, @@ -1957,6 +1961,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, const libc_dirs = std.zig.LibCDirs.detect( arena, + io, options.dirs.zig_lib.path.?, target, options.root_mod.resolved_target.is_native_abi, @@ -2701,7 +2706,7 @@ pub fn destroy(comp: *Compilation) void { if (comp.bin_file) |lf| lf.destroy(); if (comp.zcu) |zcu| zcu.deinit(); - comp.cache_use.deinit(); + comp.cache_use.deinit(io); for (&comp.work_queues) |*work_queue| work_queue.deinit(gpa); comp.c_object_work_queue.deinit(gpa); @@ -2714,36 +2719,36 @@ pub fn destroy(comp: *Compilation) void { var it = comp.crt_files.iterator(); while (it.next()) |entry| { gpa.free(entry.key_ptr.*); - entry.value_ptr.deinit(gpa); + entry.value_ptr.deinit(gpa, io); } comp.crt_files.deinit(gpa); } - if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa); - if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa); - if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa); - if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa); + if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa, io); + if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa, io); + if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa, io); + if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa, io); if (comp.glibc_so_files) |*glibc_file| { - glibc_file.deinit(gpa); + glibc_file.deinit(gpa, io); } if (comp.freebsd_so_files) |*freebsd_file| { - freebsd_file.deinit(gpa); + freebsd_file.deinit(gpa, io); } if (comp.netbsd_so_files) |*netbsd_file| { - netbsd_file.deinit(gpa); + netbsd_file.deinit(gpa, io); } for (comp.c_object_table.keys()) |key| { - key.destroy(gpa); + key.destroy(gpa, io); } comp.c_object_table.deinit(gpa); @@ -2753,7 +2758,7 @@ pub fn destroy(comp: *Compilation) void { comp.failed_c_objects.deinit(gpa); for (comp.win32_resource_table.keys()) |key| { - key.destroy(gpa); + key.destroy(gpa, io); } comp.win32_resource_table.deinit(gpa); @@ -2906,7 +2911,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE .whole => |whole| { assert(comp.bin_file == null); // We are about to obtain this lock, so here we give other processes a chance first. - whole.releaseLock(); + whole.releaseLock(io); man = comp.cache_parent.obtain(); whole.cache_manifest = &man; @@ -3092,17 +3097,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE } if (build_options.enable_debug_extensions and comp.verbose_intern_pool) { - std.debug.print("intern pool stats for '{s}':\n", .{ - comp.root_name, - }); - zcu.intern_pool.dump(); + std.debug.print("intern pool stats for '{s}':\n", .{comp.root_name}); + zcu.intern_pool.dump(io); } if (build_options.enable_debug_extensions and comp.verbose_generic_instances) { - std.debug.print("generic instances for '{s}:0x{x}':\n", .{ - comp.root_name, - @intFromPtr(zcu), - }); + std.debug.print("generic instances for '{s}:0x{x}':\n", .{ comp.root_name, @intFromPtr(zcu) }); zcu.intern_pool.dumpGenericInstances(gpa); } } @@ -3680,6 +3680,7 @@ pub fn saveState(comp: *Compilation) !void { const lf = comp.bin_file orelse return; const gpa = comp.gpa; + const io = comp.io; var bufs = std.array_list.Managed([]const u8).init(gpa); defer bufs.deinit(); @@ -3900,7 +3901,7 @@ pub fn saveState(comp: *Compilation) !void { // Using an atomic file prevents a crash or power failure from corrupting // the previous incremental compilation state. var write_buffer: [1024]u8 = undefined; - var af = try lf.emit.root_dir.handle.atomicFile(basename, .{ .write_buffer = &write_buffer }); + var af = try lf.emit.root_dir.handle.atomicFile(io, basename, .{ .write_buffer = &write_buffer }); defer af.deinit(); try af.file_writer.interface.writeVecAll(bufs.items); try af.finish(); @@ -4258,8 +4259,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle { // However, we haven't reported any such error. // This is a compiler bug. print_ctx: { - const stderr = try io.lockStderrWriter(&.{}); - defer io.unlockStderrWriter(); + const stderr = std.debug.lockStderrWriter(&.{}); + defer std.debug.unlockStderrWriter(); const w = &stderr.interface; w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx; w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx; @@ -5219,13 +5220,10 @@ fn processOneJob( } } -fn createDepFile( - comp: *Compilation, - depfile: []const u8, - binfile: Cache.Path, -) anyerror!void { +fn createDepFile(comp: *Compilation, depfile: []const u8, binfile: Cache.Path) anyerror!void { + const io = comp.io; var buf: [4096]u8 = undefined; - var af = try Io.Dir.cwd().atomicFile(depfile, .{ .write_buffer = &buf }); + var af = try Io.Dir.cwd().atomicFile(io, depfile, .{ .write_buffer = &buf }); defer af.deinit(); comp.writeDepFile(binfile, &af.file_writer.interface) catch return af.file_writer.err.?; @@ -5280,13 +5278,8 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { for (&[_][]const u8{ "docs/main.js", "docs/index.html" }) |sub_path| { const basename = fs.path.basename(sub_path); - comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, .{}) catch |err| { - comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {s}", .{ - sub_path, - @errorName(err), - }); - return; - }; + comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, io, .{}) catch |err| + return comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {t}", .{ sub_path, err }); } var tar_file = out_dir.createFile(io, "sources.tar", .{}) catch |err| { @@ -5350,7 +5343,7 @@ fn docsCopyModule( var buffer: [1024]u8 = undefined; - while (try walker.next()) |entry| { + while (try walker.next(io)) |entry| { switch (entry.kind) { .file => { if (!std.mem.endsWith(u8, entry.basename, ".zig")) continue; @@ -5505,7 +5498,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU try comp.updateSubCompilation(sub_compilation, .docs_wasm, prog_node); var crt_file = try sub_compilation.toCrtFile(); - defer crt_file.deinit(gpa); + defer crt_file.deinit(gpa, io); const docs_bin_file = crt_file.full_object_path; assert(docs_bin_file.sub_path.len > 0); // emitted binary is not a directory @@ -5521,10 +5514,12 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU }; defer out_dir.close(io); - crt_file.full_object_path.root_dir.handle.copyFile( + Io.Dir.copyFile( + crt_file.full_object_path.root_dir.handle, crt_file.full_object_path.sub_path, out_dir, "main.wasm", + io, .{}, ) catch |err| { comp.lockAndSetMiscFailure(.docs_copy, "unable to copy '{f}' to '{f}': {t}", .{ @@ -5758,7 +5753,7 @@ pub fn translateC( try argv.appendSlice(comp.global_cc_argv); try argv.appendSlice(owner_mod.cc_argv); try argv.appendSlice(&.{ source_path, "-o", translated_path }); - if (comp.verbose_cimport) dumpArgv(io, argv.items); + if (comp.verbose_cimport) try dumpArgv(io, argv.items); } var stdout: []u8 = undefined; @@ -6153,7 +6148,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr const gpa = comp.gpa; const io = comp.io; - if (c_object.clearStatus(gpa)) { + if (c_object.clearStatus(gpa, io)) { // There was previous failure. comp.mutex.lockUncancelable(io); defer comp.mutex.unlock(io); @@ -6500,7 +6495,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - if (win32_resource.clearStatus(comp.gpa)) { + if (win32_resource.clearStatus(comp.gpa, io)) { // There was previous failure. comp.mutex.lockUncancelable(io); defer comp.mutex.unlock(io); @@ -6768,7 +6763,7 @@ fn spawnZigRc( // Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace) const stderr = poller.reader(.stderr); - const term = child.wait() catch |err| { + const term = child.wait(io) catch |err| { return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) }); }; @@ -7781,7 +7776,10 @@ pub fn dumpArgv(io: Io, argv: []const []const u8) Io.Cancelable!void { defer io.unlockStderrWriter(); const w = &stderr.interface; return dumpArgvWriter(w, argv) catch |err| switch (err) { - error.WriteFailed => return stderr.err.?, + error.WriteFailed => switch (stderr.err.?) { + error.Canceled => return error.Canceled, + else => return, + }, }; } diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 33214a47db..823015574b 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -501,7 +501,7 @@ fn runResource( .path = tmp_directory_path, .handle = handle: { const dir = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{ - .iterate = true, + .open_options = .{ .iterate = true }, }) catch |err| { try eb.addRootErrorMessage(.{ .msg = try eb.printString("unable to create temporary directory '{s}': {t}", .{ @@ -525,7 +525,7 @@ fn runResource( // https://github.com/ziglang/zig/issues/17095 pkg_path.root_dir.handle.close(io); pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{ - .iterate = true, + .open_options = .{ .iterate = true }, }) catch @panic("btrfs workaround failed"); } @@ -1334,7 +1334,7 @@ fn unzip( f.location_tok, try eb.printString("failed writing temporary zip file: {t}", .{err}), ); - break :b zip_file_writer.moveToReader(io); + break :b zip_file_writer.moveToReader(); }; var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() }; @@ -1376,7 +1376,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U const fetch_reader = &resource.fetch_stream.reader; _ = try fetch_reader.streamRemaining(&pack_file_writer.interface); try pack_file_writer.interface.flush(); - break :b pack_file_writer.moveToReader(io); + break :b pack_file_writer.moveToReader(); }; var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true }); @@ -1421,26 +1421,21 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void // Recursive directory copy. var it = try dir.walk(gpa); defer it.deinit(); - while (try it.next()) |entry| { + while (try it.next(io)) |entry| { switch (entry.kind) { .directory => {}, // omit empty directories .file => { - dir.copyFile( - entry.path, - tmp_dir, - entry.path, - .{}, - ) catch |err| switch (err) { + dir.copyFile(entry.path, tmp_dir, entry.path, io, .{}) catch |err| switch (err) { error.FileNotFound => { if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(io, dirname); - try dir.copyFile(entry.path, tmp_dir, entry.path, .{}); + try dir.copyFile(entry.path, tmp_dir, entry.path, io, .{}); }, else => |e| return e, }; }, .sym_link => { var buf: [fs.max_path_bytes]u8 = undefined; - const link_name = try dir.readLink(io, entry.path, &buf); + const link_name = buf[0..try dir.readLink(io, entry.path, &buf)]; // TODO: if this would create a symlink to outside // the destination directory, fail with an error instead. tmp_dir.symLink(io, link_name, entry.path, .{}) catch |err| switch (err) { @@ -1524,7 +1519,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute var group: Io.Group = .init; defer group.wait(io); - while (walker.next() catch |err| { + while (walker.next(io) catch |err| { try eb.addRootErrorMessage(.{ .msg = try eb.printString( "unable to walk temporary directory '{f}': {s}", .{ pkg_path, @errorName(err) }, @@ -1575,7 +1570,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute .failure = undefined, // to be populated by the worker .size = undefined, // to be populated by the worker }; - group.async(io, workerHashFile, .{ root_dir, hashed_file }); + group.async(io, workerHashFile, .{ io, root_dir, hashed_file }); try all_files.append(hashed_file); } } @@ -1643,7 +1638,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute assert(!f.job_queue.recursive); // Print something to stdout that can be text diffed to figure out why // the package hash is different. - dumpHashInfo(all_files.items) catch |err| { + dumpHashInfo(io, all_files.items) catch |err| { std.debug.print("unable to write to stdout: {s}\n", .{@errorName(err)}); std.process.exit(1); }; @@ -1655,9 +1650,9 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute }; } -fn dumpHashInfo(all_files: []const *const HashedFile) !void { +fn dumpHashInfo(io: Io, all_files: []const *const HashedFile) !void { var stdout_buffer: [1024]u8 = undefined; - var stdout_writer: Io.File.Writer = .initStreaming(.stdout(), &stdout_buffer); + var stdout_writer: Io.File.Writer = .initStreaming(.stdout(), io, &stdout_buffer); const w = &stdout_writer.interface; for (all_files) |hashed_file| { try w.print("{t}: {x}: {s}\n", .{ hashed_file.kind, &hashed_file.hash, hashed_file.normalized_path }); @@ -1665,8 +1660,8 @@ fn dumpHashInfo(all_files: []const *const HashedFile) !void { try w.flush(); } -fn workerHashFile(dir: Io.Dir, hashed_file: *HashedFile) void { - hashed_file.failure = hashFileFallible(dir, hashed_file); +fn workerHashFile(io: Io, dir: Io.Dir, hashed_file: *HashedFile) void { + hashed_file.failure = hashFileFallible(io, dir, hashed_file); } fn workerDeleteFile(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) void { @@ -1745,7 +1740,7 @@ const HashedFile = struct { Io.File.OpenError || Io.File.Reader.Error || Io.File.StatError || - Io.File.ChmodError || + Io.File.SetPermissionsError || Io.Dir.ReadLinkError; const Kind = enum { file, link }; diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index f53c96cc3d..fd48b0a9b1 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -274,7 +274,7 @@ pub const Repository = struct { continue; }; defer file.close(io); - try file.writeAll(file_object.data); + try file.writePositionalAll(io, file_object.data, 0); }, .symlink => { try repository.odb.seekOid(entry.oid); diff --git a/src/Sema.zig b/src/Sema.zig index 1f6a577f60..298de783b8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2679,7 +2679,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, false) catch @panic("out of memory"); std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory"); - error_bundle.renderToStderr(io, .{}, .auto); + error_bundle.renderToStderr(io, .{}, .auto) catch @panic("failed to print to stderr"); std.debug.panicExtra(@returnAddress(), "unexpected compile error occurred", .{}); } diff --git a/src/Zcu.zig b/src/Zcu.zig index e0e254d807..07fb1bdc94 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2988,11 +2988,10 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader pub fn saveZirCache( gpa: Allocator, - io: Io, - cache_file: Io.File, + cache_file_writer: *Io.File.Writer, stat: Io.File.Stat, zir: Zir, -) (Io.File.WriteError || Allocator.Error)!void { +) (Io.File.Writer.Error || Allocator.Error)!void { const safety_buffer = if (data_has_safety_tag) try gpa.alloc([8]u8, zir.instructions.len) else @@ -3026,13 +3025,12 @@ pub fn saveZirCache( zir.string_bytes, @ptrCast(zir.extra), }; - var cache_fw = cache_file.writer(io, &.{}); - cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) { - error.WriteFailed => return cache_fw.err.?, + cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) { + error.WriteFailed => return cache_file_writer.err.?, }; } -pub fn saveZoirCache(io: Io, cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void { +pub fn saveZoirCache(cache_file_writer: *Io.File.Writer, stat: Io.File.Stat, zoir: Zoir) Io.File.Writer.Error!void { const header: Zoir.Header = .{ .nodes_len = @intCast(zoir.nodes.len), .extra_len = @intCast(zoir.extra.len), @@ -3056,9 +3054,8 @@ pub fn saveZoirCache(io: Io, cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir @ptrCast(zoir.compile_errors), @ptrCast(zoir.error_notes), }; - var cache_fw = cache_file.writer(io, &.{}); - cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) { - error.WriteFailed => return cache_fw.err.?, + cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) { + error.WriteFailed => return cache_file_writer.err.?, }; } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 408f16bd74..76ab3e229c 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -238,18 +238,13 @@ pub fn updateFile( if (builtin.os.tag == .wasi or lock == .exclusive) break true; // Otherwise, unlock to give someone a chance to get the exclusive lock // and then upgrade to an exclusive lock. - cache_file.unlock(); + cache_file.unlock(io); lock = .exclusive; - try cache_file.lock(lock); + try cache_file.lock(io, lock); }; if (need_update) { - // The cache is definitely stale so delete the contents to avoid an underwrite later. - cache_file.setLength(io, 0) catch |err| switch (err) { - error.FileTooBig => unreachable, // 0 is not too big - else => |e| return e, - }; - try cache_file.seekTo(0); + var cache_file_writer: Io.File.Writer = .init(cache_file, io, &.{}); if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; @@ -278,7 +273,7 @@ pub fn updateFile( switch (file.getMode()) { .zig => { file.zir = try AstGen.generate(gpa, file.tree.?); - Zcu.saveZirCache(gpa, io, cache_file, stat, file.zir.?) catch |err| switch (err) { + Zcu.saveZirCache(gpa, &cache_file_writer, stat, file.zir.?) catch |err| switch (err) { error.OutOfMemory => |e| return e, else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {t}", .{ file.path.fmt(comp), cache_directory, &hex_digest, err, @@ -287,13 +282,19 @@ pub fn updateFile( }, .zon => { file.zoir = try ZonGen.generate(gpa, file.tree.?, .{}); - Zcu.saveZoirCache(io, cache_file, stat, file.zoir.?) catch |err| { + Zcu.saveZoirCache(&cache_file_writer, stat, file.zoir.?) catch |err| { log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {t}", .{ file.path.fmt(comp), cache_directory, &hex_digest, err, }); }; }, } + + cache_file_writer.end() catch |err| switch (err) { + error.WriteFailed => return cache_file_writer.err.?, + else => |e| return e, + }; + if (timer.finish()) |ns_astgen| { comp.mutex.lockUncancelable(io); defer comp.mutex.unlock(io); @@ -4524,12 +4525,14 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) Ru .stage2_llvm, => {}, }, + error.Canceled => |e| return e, } return error.AlreadyReported; }; } fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{ OutOfMemory, + Canceled, CodegenFail, NoLinkFile, BackendDoesNotProduceMir, @@ -4555,13 +4558,16 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e null; defer if (liveness) |*l| l.deinit(gpa); - if (build_options.enable_debug_extensions and comp.verbose_air) { + if (build_options.enable_debug_extensions and comp.verbose_air) p: { const io = comp.io; const stderr = try io.lockStderrWriter(&.{}); defer io.unlockStderrWriter(); - stderr.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)}) catch {}; - air.write(stderr, pt, liveness); - stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {}; + printVerboseAir(pt, liveness, fqn, air, &stderr.interface) catch |err| switch (err) { + error.WriteFailed => switch (stderr.err.?) { + error.Canceled => |e| return e, + else => break :p, + }, + }; } if (std.debug.runtime_safety) verify_liveness: { @@ -4576,7 +4582,7 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e verify.verify() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - else => return zcu.codegenFail(nav, "invalid liveness: {s}", .{@errorName(err)}), + else => return zcu.codegenFail(nav, "invalid liveness: {t}", .{err}), }; } @@ -4612,3 +4618,17 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e => return zcu.codegenFail(nav, "unable to codegen: {s}", .{@errorName(err)}), }; } + +fn printVerboseAir( + pt: Zcu.PerThread, + liveness: ?Air.Liveness, + fqn: InternPool.NullTerminatedString, + air: *const Air, + w: *Io.Writer, +) Io.Writer.Error!void { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + try w.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)}); + try air.write(w, pt, liveness); + try w.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}); +} diff --git a/src/fmt.zig b/src/fmt.zig index b9b1cc1363..ca6885d24e 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -124,7 +124,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! try wip_errors.addZirErrorMessages(zir, tree, source_code, "<stdin>"); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStderr(io, .{}, color); + error_bundle.renderToStderr(io, .{}, color) catch {}; process.exit(2); } } else { @@ -138,12 +138,12 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) ! try wip_errors.addZoirErrorMessages(zoir, tree, source_code, "<stdin>"); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStderr(io, .{}, color); + error_bundle.renderToStderr(io, .{}, color) catch {}; process.exit(2); } } } else if (tree.errors.len != 0) { - try std.zig.printAstErrorsToStderr(gpa, tree, "<stdin>", color); + std.zig.printAstErrorsToStderr(gpa, io, tree, "<stdin>", color) catch {}; process.exit(2); } const formatted = try tree.renderAlloc(gpa); @@ -298,7 +298,7 @@ fn fmtPathFile( defer tree.deinit(gpa); if (tree.errors.len != 0) { - try std.zig.printAstErrorsToStderr(gpa, tree, file_path, fmt.color); + try std.zig.printAstErrorsToStderr(gpa, io, tree, file_path, fmt.color); fmt.any_error = true; return; } @@ -319,7 +319,7 @@ fn fmtPathFile( try wip_errors.addZirErrorMessages(zir, tree, source_code, file_path); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStderr(io, .{}, fmt.color); + try error_bundle.renderToStderr(io, .{}, fmt.color); fmt.any_error = true; } }, @@ -334,7 +334,7 @@ fn fmtPathFile( try wip_errors.addZoirErrorMessages(zoir, tree, source_code, file_path); var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStderr(io, .{}, fmt.color); + try error_bundle.renderToStderr(io, .{}, fmt.color); fmt.any_error = true; } }, diff --git a/src/libs/freebsd.zig b/src/libs/freebsd.zig index ee638718b7..44676007e2 100644 --- a/src/libs/freebsd.zig +++ b/src/libs/freebsd.zig @@ -401,8 +401,8 @@ pub const BuiltSharedObjects = struct { lock: Cache.Lock, dir_path: Path, - pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void { - self.lock.release(); + pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void { + self.lock.release(io); gpa.free(self.dir_path.sub_path); self.* = undefined; } diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index de86413cfd..8371b3288d 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -640,8 +640,8 @@ pub const BuiltSharedObjects = struct { lock: Cache.Lock, dir_path: Path, - pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void { - self.lock.release(); + pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void { + self.lock.release(io); gpa.free(self.dir_path.sub_path); self.* = undefined; } diff --git a/src/libs/netbsd.zig b/src/libs/netbsd.zig index 33dd62d851..9c09c35b0a 100644 --- a/src/libs/netbsd.zig +++ b/src/libs/netbsd.zig @@ -346,8 +346,8 @@ pub const BuiltSharedObjects = struct { lock: Cache.Lock, dir_path: Path, - pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void { - self.lock.release(); + pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void { + self.lock.release(io); gpa.free(self.dir_path.sub_path); self.* = undefined; } diff --git a/src/link.zig b/src/link.zig index a4729f296c..3b822b2e69 100644 --- a/src/link.zig +++ b/src/link.zig @@ -2246,13 +2246,12 @@ fn resolvePathInputLib( var error_bundle = try wip_errors.toOwnedBundle(""); defer error_bundle.deinit(gpa); - error_bundle.renderToStderr(io, .{}, color); - + error_bundle.renderToStderr(io, .{}, color) catch {}; std.process.exit(1); } var ld_script = ld_script_result catch |err| - fatal("{f}: failed to parse linker script: {s}", .{ test_path, @errorName(err) }); + fatal("{f}: failed to parse linker script: {t}", .{ test_path, err }); defer ld_script.deinit(gpa); try unresolved_inputs.ensureUnusedCapacity(gpa, ld_script.args.len); diff --git a/src/main.zig b/src/main.zig index 56a014bb92..c5ed278921 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4598,6 +4598,8 @@ const UpdateModuleError = Compilation.UpdateError || error{ /// The update caused compile errors. The error bundle has already been /// reported to the user by being rendered to stderr. CompileErrorsReported, + /// Error occurred printing compilation errors to stderr. + PrintingErrorsFailed, }; fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) UpdateModuleError!void { try comp.update(prog_node); @@ -4607,7 +4609,10 @@ fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) if (errors.errorMessageCount() > 0) { const io = comp.io; - try errors.renderToStderr(io, .{}, color); + errors.renderToStderr(io, .{}, color) catch |err| switch (err) { + error.Canceled => |e| return e, + else => return error.PrintingErrorsFailed, + }; return error.CompileErrorsReported; } } |
