aboutsummaryrefslogtreecommitdiff
path: root/src/Compilation.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2024-07-12 16:56:17 -0700
committerGitHub <noreply@github.com>2024-07-12 16:56:17 -0700
commit1d20ff11d7efc1f07de9a300abc15ad6ae9d6d6f (patch)
treee72a2989fddfa91571999ade7ef68c87c7c2610f /src/Compilation.zig
parent0d79aa01768d600a19f7a7493afce417da7e3810 (diff)
parent5efcc2e9e7c84893b9e418ca82d8d2d4366dde7c (diff)
downloadzig-1d20ff11d7efc1f07de9a300abc15ad6ae9d6d6f.tar.gz
zig-1d20ff11d7efc1f07de9a300abc15ad6ae9d6d6f.zip
Merge pull request #20580 from ziglang/watch
introduce file system watching features to the zig build system
Diffstat (limited to 'src/Compilation.zig')
-rw-r--r--src/Compilation.zig72
1 files changed, 71 insertions, 1 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index cc5fd1a9eb..94ce0ee267 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -235,6 +235,8 @@ astgen_wait_group: WaitGroup = .{},
llvm_opt_bisect_limit: c_int,
+file_system_inputs: ?*std.ArrayListUnmanaged(u8),
+
pub const Emit = struct {
/// Where the output will go.
directory: Directory,
@@ -1157,6 +1159,9 @@ pub const CreateOptions = struct {
error_limit: ?Zcu.ErrorInt = null,
global_cc_argv: []const []const u8 = &.{},
+ /// Tracks all files that can cause the Compilation to be invalidated and need a rebuild.
+ file_system_inputs: ?*std.ArrayListUnmanaged(u8) = null,
+
pub const Entry = link.File.OpenOptions.Entry;
};
@@ -1332,6 +1337,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.gpa = gpa,
.manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}),
};
+ // These correspond to std.zig.Server.Message.PathPrefix.
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
cache.addPrefix(options.zig_lib_directory);
cache.addPrefix(options.local_cache_directory);
@@ -1508,6 +1514,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.force_undefined_symbols = options.force_undefined_symbols,
.link_eh_frame_hdr = link_eh_frame_hdr,
.global_cc_argv = options.global_cc_argv,
+ .file_system_inputs = options.file_system_inputs,
};
// Prevent some footguns by making the "any" fields of config reflect
@@ -2044,6 +2051,9 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
);
};
if (is_hit) {
+ // In this case the cache hit contains the full set of file system inputs. Nice!
+ if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
+
comp.last_update_was_cache_hit = true;
log.debug("CacheMode.whole cache hit for {s}", .{comp.root_name});
const digest = man.final();
@@ -2103,12 +2113,24 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
.incremental => {},
}
+ // From this point we add a preliminary set of file system inputs that
+ // affects both incremental and whole cache mode. For incremental cache
+ // mode, the long-lived compiler state will track additional file system
+ // inputs discovered after this point. For whole cache mode, we rely on
+ // these inputs to make it past AstGen, and once there, we can rely on
+ // learning file system inputs from the Cache object.
+
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each C object.
try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count());
for (comp.c_object_table.keys()) |key| {
comp.c_object_work_queue.writeItemAssumeCapacity(key);
}
+ if (comp.file_system_inputs) |fsi| {
+ for (comp.c_object_table.keys()) |c_object| {
+ try comp.appendFileSystemInput(fsi, c_object.src.owner.root, c_object.src.src_path);
+ }
+ }
// For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each Win32 resource file.
@@ -2117,6 +2139,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
for (comp.win32_resource_table.keys()) |key| {
comp.win32_resource_work_queue.writeItemAssumeCapacity(key);
}
+ if (comp.file_system_inputs) |fsi| {
+ for (comp.win32_resource_table.keys()) |win32_resource| switch (win32_resource.src) {
+ .rc => |f| try comp.appendFileSystemInput(fsi, f.owner.root, f.src_path),
+ .manifest => continue,
+ };
+ }
}
if (comp.module) |zcu| {
@@ -2151,12 +2179,25 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue;
comp.astgen_work_queue.writeItemAssumeCapacity(file_index);
}
+ if (comp.file_system_inputs) |fsi| {
+ for (zcu.import_table.values()) |file_index| {
+ const file = zcu.fileByIndex(file_index);
+ try comp.appendFileSystemInput(fsi, file.mod.root, file.sub_file_path);
+ }
+ }
// Put a work item in for checking if any files used with `@embedFile` changed.
try comp.embed_file_work_queue.ensureUnusedCapacity(zcu.embed_table.count());
for (zcu.embed_table.values()) |embed_file| {
comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
}
+ if (comp.file_system_inputs) |fsi| {
+ const ip = &zcu.intern_pool;
+ for (zcu.embed_table.values()) |embed_file| {
+ const sub_file_path = embed_file.sub_file_path.toSlice(ip);
+ try comp.appendFileSystemInput(fsi, embed_file.owner.root, sub_file_path);
+ }
+ }
try comp.work_queue.writeItem(.{ .analyze_mod = std_mod });
if (comp.config.is_test) {
@@ -2210,6 +2251,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
switch (comp.cache_use) {
.whole => |whole| {
+ if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
+
const digest = man.final();
// Rename the temporary directory into place.
@@ -2297,6 +2340,30 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
}
+fn appendFileSystemInput(
+ comp: *Compilation,
+ file_system_inputs: *std.ArrayListUnmanaged(u8),
+ root: Cache.Path,
+ sub_file_path: []const u8,
+) Allocator.Error!void {
+ const gpa = comp.gpa;
+ const prefixes = comp.cache_parent.prefixes();
+ try file_system_inputs.ensureUnusedCapacity(gpa, root.sub_path.len + sub_file_path.len + 3);
+ if (file_system_inputs.items.len > 0) file_system_inputs.appendAssumeCapacity(0);
+ for (prefixes, 1..) |prefix_directory, i| {
+ if (prefix_directory.eql(root.root_dir)) {
+ file_system_inputs.appendAssumeCapacity(@intCast(i));
+ if (root.sub_path.len > 0) {
+ file_system_inputs.appendSliceAssumeCapacity(root.sub_path);
+ file_system_inputs.appendAssumeCapacity(std.fs.path.sep);
+ }
+ file_system_inputs.appendSliceAssumeCapacity(sub_file_path);
+ return;
+ }
+ }
+ std.debug.panic("missing prefix directory: {}, {s}", .{ root, sub_file_path });
+}
+
fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
if (comp.bin_file) |lf| {
// This is needed before reading the error flags.
@@ -4204,6 +4271,9 @@ fn workerAstGenFile(
.token = item.data.token,
} }) catch continue;
}
+ if (res.is_new) if (comp.file_system_inputs) |fsi| {
+ comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue;
+ };
const imported_path_digest = pt.zcu.filePathDigest(res.file_index);
const imported_root_decl = pt.zcu.fileRootDecl(res.file_index);
break :blk .{ res, imported_path_digest, imported_root_decl };
@@ -4574,7 +4644,7 @@ fn reportRetryableEmbedFileError(
const gpa = mod.gpa;
const src_loc = embed_file.src_loc;
const ip = &mod.intern_pool;
- const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
+ const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{
embed_file.owner.root,
embed_file.sub_file_path.toSlice(ip),
@errorName(err),