aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig18
-rw-r--r--src/Package/Fetch.zig18
-rw-r--r--src/introspect.zig48
-rw-r--r--src/link/MachO/hasher.zig2
-rw-r--r--src/main.zig3
5 files changed, 55 insertions, 34 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 8e005992ec..a11519d07a 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -4698,7 +4698,7 @@ fn performAllTheWork(
});
}
- astgen_group.wait(io);
+ try astgen_group.await(io);
}
if (comp.zcu) |zcu| {
@@ -4761,7 +4761,7 @@ fn performAllTheWork(
// Since we're skipping analysis, there are no ZCU link tasks.
comp.link_queue.finishZcuQueue(comp);
// Let other compilation work finish to collect as many errors as possible.
- misc_group.wait(io);
+ try misc_group.await(io);
comp.link_queue.wait(io);
return;
}
@@ -4850,18 +4850,22 @@ fn performAllTheWork(
comp.link_queue.finishZcuQueue(comp);
// Main thread work is all done, now just wait for all async work.
- misc_group.wait(io);
+ try misc_group.await(io);
comp.link_queue.wait(io);
}
fn dispatchPrelinkWork(comp: *Compilation, main_progress_node: std.Progress.Node) void {
const io = comp.io;
+ // TODO should this function be cancelable?
+ const prev_cancel_prot = io.swapCancelProtection(.blocked);
+ defer _ = io.swapCancelProtection(prev_cancel_prot);
+
var prelink_group: Io.Group = .init;
defer prelink_group.cancel(io);
comp.queuePrelinkTasks(comp.oneshot_prelink_tasks.items) catch |err| switch (err) {
- error.Canceled => return,
+ error.Canceled => unreachable, // see swapCancelProtection above
};
comp.oneshot_prelink_tasks.clearRetainingCapacity();
@@ -5055,9 +5059,11 @@ fn dispatchPrelinkWork(comp: *Compilation, main_progress_node: std.Progress.Node
});
}
- prelink_group.wait(io);
+ prelink_group.await(io) catch |err| switch (err) {
+ error.Canceled => unreachable, // see swapCancelProtection above
+ };
comp.link_queue.finishPrelinkQueue(comp) catch |err| switch (err) {
- error.Canceled => return,
+ error.Canceled => unreachable, // see swapCancelProtection above
};
}
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
index f8e4b83293..d595465db5 100644
--- a/src/Package/Fetch.zig
+++ b/src/Package/Fetch.zig
@@ -146,6 +146,8 @@ pub const JobQueue = struct {
pub const UnlazySet = std.AutoArrayHashMapUnmanaged(Package.Hash, void);
pub fn deinit(jq: *JobQueue) void {
+ const io = jq.io;
+ jq.group.cancel(io);
if (jq.all_fetches.items.len == 0) return;
const gpa = jq.all_fetches.items[0].arena.child_allocator;
jq.table.deinit(gpa);
@@ -841,13 +843,13 @@ pub fn relativePathDigest(pkg_root: Cache.Path, cache_root: Cache.Directory) Pac
return .initPath(pkg_root.sub_path, pkg_root.root_dir.eql(cache_root));
}
-pub fn workerRun(f: *Fetch, prog_name: []const u8) void {
+pub fn workerRun(f: *Fetch, prog_name: []const u8) Io.Cancelable!void {
const prog_node = f.prog_node.start(prog_name, 0);
defer prog_node.end();
run(f) catch |err| switch (err) {
error.OutOfMemory => f.oom_flag = true,
- error.Canceled => {},
+ error.Canceled => |e| return e,
error.FetchFailed => {
// Nothing to do because the errors are already reported in `error_bundle`,
// and a reference is kept to the `Fetch` task inside `all_fetches`.
@@ -1517,12 +1519,12 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
// The final hash will be a hash of each file hashed independently. This
// allows hashing in parallel.
var group: Io.Group = .init;
- defer group.wait(io);
+ defer group.cancel(io);
while (walker.next(io) catch |err| {
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
- "unable to walk temporary directory '{f}': {s}",
- .{ pkg_path, @errorName(err) },
+ "unable to walk temporary directory '{f}': {t}",
+ .{ pkg_path, err },
) });
return error.FetchFailed;
}) |entry| {
@@ -1552,8 +1554,8 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
.file => .file,
.sym_link => .link,
else => return f.fail(f.location_tok, try eb.printString(
- "package contains '{s}' which has illegal file type '{s}'",
- .{ entry.path, @tagName(entry.kind) },
+ "package contains '{s}' which has illegal file type '{t}'",
+ .{ entry.path, entry.kind },
)),
};
@@ -1573,6 +1575,8 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
group.async(io, workerHashFile, .{ io, root_dir, hashed_file });
try all_files.append(hashed_file);
}
+
+ try group.await(io);
}
{
diff --git a/src/introspect.zig b/src/introspect.zig
index 0a57505aeb..9481201c3b 100644
--- a/src/introspect.zig
+++ b/src/introspect.zig
@@ -1,5 +1,4 @@
const builtin = @import("builtin");
-const build_options = @import("build_options");
const std = @import("std");
const Io = std.Io;
@@ -8,6 +7,8 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
+const build_options = @import("build_options");
+
const Compilation = @import("Compilation.zig");
const Package = @import("Package.zig");
@@ -101,26 +102,35 @@ pub fn findZigLibDirFromSelfExe(
}
/// Caller owns returned memory.
-pub fn resolveGlobalCacheDir(allocator: Allocator) ![]u8 {
- if (builtin.os.tag == .wasi)
- @compileError("on WASI the global cache dir must be resolved with preopens");
-
- if (try std.zig.EnvVar.ZIG_GLOBAL_CACHE_DIR.get(allocator)) |value| return value;
-
- const appname = "zig";
-
- if (builtin.os.tag != .windows) {
- if (std.zig.EnvVar.XDG_CACHE_HOME.getPosix()) |cache_root| {
- if (cache_root.len > 0) {
- return Dir.path.join(allocator, &.{ cache_root, appname });
+pub fn resolveGlobalCacheDir(gpa: Allocator) ![]u8 {
+ if (try std.zig.EnvVar.ZIG_GLOBAL_CACHE_DIR.get(gpa)) |value| return value;
+
+ const app_name = "zig";
+
+ switch (builtin.os.tag) {
+ .wasi => @compileError("on WASI the global cache dir must be resolved with preopens"),
+ .windows => {
+ const local_app_data_dir = (std.zig.EnvVar.LOCALAPPDATA.get(gpa) catch |err| switch (err) {
+ error.OutOfMemory => |e| return e,
+ error.InvalidWtf8 => return error.AppDataDirUnavailable,
+ }) orelse return error.AppDataDirUnavailable;
+ defer gpa.free(local_app_data_dir);
+ return Dir.path.join(gpa, &.{ local_app_data_dir, app_name });
+ },
+ else => {
+ if (std.zig.EnvVar.XDG_CACHE_HOME.getPosix()) |cache_root| {
+ if (cache_root.len > 0) {
+ return Dir.path.join(gpa, &.{ cache_root, app_name });
+ }
}
- }
- if (std.zig.EnvVar.HOME.getPosix()) |home| {
- return Dir.path.join(allocator, &.{ home, ".cache", appname });
- }
+ if (std.zig.EnvVar.HOME.getPosix()) |home| {
+ if (home.len > 0) {
+ return Dir.path.join(gpa, &.{ home, ".cache", app_name });
+ }
+ }
+ return error.AppDataDirUnavailable;
+ },
}
-
- return std.fs.getAppDataDir(allocator, appname);
}
/// Similar to `Dir.path.resolve`, but converts to a cwd-relative path, or, if that would
diff --git a/src/link/MachO/hasher.zig b/src/link/MachO/hasher.zig
index 822474e3e1..209f9495e8 100644
--- a/src/link/MachO/hasher.zig
+++ b/src/link/MachO/hasher.zig
@@ -48,7 +48,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
});
}
- group.wait(io);
+ try group.await(io);
}
for (results) |result| _ = try result;
}
diff --git a/src/main.zig b/src/main.zig
index d07580636c..f71cf5d144 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -1503,6 +1503,7 @@ fn buildOutputType(
} else if (mem.eql(u8, arg, "-fno-emit-bin")) {
emit_bin = .no;
} else if (mem.eql(u8, arg, "-femit-h")) {
+ fatal("-femit-h is currently broken, see https://github.com/ziglang/zig/issues/9698", .{});
emit_h = .yes_default_path;
} else if (mem.cutPrefix(u8, arg, "-femit-h=")) |rest| {
emit_h = .{ .yes = rest };
@@ -5283,7 +5284,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
);
job_queue.group.async(io, Package.Fetch.workerRun, .{ &fetch, "root" });
- job_queue.group.wait(io);
+ try job_queue.group.await(io);
try job_queue.consolidateErrors();