aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2024-07-11 18:28:05 -0700
committerAndrew Kelley <andrew@ziglang.org>2024-07-12 00:15:11 -0700
commit5a34e6c3e608e1f526bababd3a2a146f6216d045 (patch)
tree154864ee4b7841598494d5c4a5449b38de09ed3c /src
parenta3c20dffaed77727494d34f7b4b03c0d10771270 (diff)
downloadzig-5a34e6c3e608e1f526bababd3a2a146f6216d045.tar.gz
zig-5a34e6c3e608e1f526bababd3a2a146f6216d045.zip
frontend: add file system inputs for incremental cache mode
These are also used for whole cache mode in the case that any compile errors are emitted.
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig67
-rw-r--r--src/Zcu.zig2
-rw-r--r--src/Zcu/PerThread.zig2
3 files changed, 63 insertions, 8 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 49d4b041ae..a0cf2378da 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -2051,6 +2051,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
);
};
if (is_hit) {
+ // In this case the cache hit contains the full set of file system inputs. Nice!
if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
comp.last_update_was_cache_hit = true;
@@ -2112,12 +2113,24 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
.incremental => {},
}
+ // From this point we add a preliminary set of file system inputs that
+ // affects both incremental and whole cache mode. For incremental cache
+ // mode, the long-lived compiler state will track additional file system
+ // inputs discovered after this point. For whole cache mode, we rely on
+ // these inputs to make it past AstGen, and once there, we can rely on
+ // learning file system inputs from the Cache object.
+
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each C object.
try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count());
for (comp.c_object_table.keys()) |key| {
comp.c_object_work_queue.writeItemAssumeCapacity(key);
}
+ if (comp.file_system_inputs) |fsi| {
+ for (comp.c_object_table.keys()) |c_object| {
+ try comp.appendFileSystemInput(fsi, c_object.src.owner.root, c_object.src.src_path);
+ }
+ }
// For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each Win32 resource file.
@@ -2126,6 +2139,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
for (comp.win32_resource_table.keys()) |key| {
comp.win32_resource_work_queue.writeItemAssumeCapacity(key);
}
+ if (comp.file_system_inputs) |fsi| {
+ for (comp.win32_resource_table.keys()) |win32_resource| switch (win32_resource.src) {
+ .rc => |f| try comp.appendFileSystemInput(fsi, f.owner.root, f.src_path),
+ .manifest => continue,
+ };
+ }
}
if (comp.module) |zcu| {
@@ -2160,12 +2179,24 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue;
comp.astgen_work_queue.writeItemAssumeCapacity(file_index);
}
+ if (comp.file_system_inputs) |fsi| {
+ for (zcu.import_table.values()) |file| {
+ try comp.appendFileSystemInput(fsi, file.mod.root, file.sub_file_path);
+ }
+ }
// Put a work item in for checking if any files used with `@embedFile` changed.
try comp.embed_file_work_queue.ensureUnusedCapacity(zcu.embed_table.count());
for (zcu.embed_table.values()) |embed_file| {
comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
}
+ if (comp.file_system_inputs) |fsi| {
+ const ip = &zcu.intern_pool;
+ for (zcu.embed_table.values()) |embed_file| {
+ const sub_file_path = embed_file.sub_file_path.toSlice(ip);
+ try comp.appendFileSystemInput(fsi, embed_file.owner.root, sub_file_path);
+ }
+ }
try comp.work_queue.writeItem(.{ .analyze_mod = std_mod });
if (comp.config.is_test) {
@@ -2179,11 +2210,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
try comp.performAllTheWork(main_progress_node);
- switch (comp.cache_use) {
- .whole => if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf),
- .incremental => {},
- }
-
if (comp.module) |zcu| {
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
@@ -2224,6 +2250,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
switch (comp.cache_use) {
.whole => |whole| {
+ if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
+
const digest = man.final();
// Rename the temporary directory into place.
@@ -2311,6 +2339,30 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
}
+fn appendFileSystemInput(
+ comp: *Compilation,
+ file_system_inputs: *std.ArrayListUnmanaged(u8),
+ root: Cache.Path,
+ sub_file_path: []const u8,
+) Allocator.Error!void {
+ const gpa = comp.gpa;
+ const prefixes = comp.cache_parent.prefixes();
+ try file_system_inputs.ensureUnusedCapacity(gpa, root.sub_path.len + sub_file_path.len + 3);
+ if (file_system_inputs.items.len > 0) file_system_inputs.appendAssumeCapacity(0);
+ for (prefixes, 1..) |prefix_directory, i| {
+ if (prefix_directory.eql(root.root_dir)) {
+ file_system_inputs.appendAssumeCapacity(@intCast(i));
+ if (root.sub_path.len > 0) {
+ file_system_inputs.appendSliceAssumeCapacity(root.sub_path);
+ file_system_inputs.appendAssumeCapacity(std.fs.path.sep);
+ }
+ file_system_inputs.appendSliceAssumeCapacity(sub_file_path);
+ return;
+ }
+ }
+ std.debug.panic("missing prefix directory: {}, {s}", .{ root, sub_file_path });
+}
+
fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
if (comp.bin_file) |lf| {
// This is needed before reading the error flags.
@@ -4218,6 +4270,9 @@ fn workerAstGenFile(
.token = item.data.token,
} }) catch continue;
}
+ if (res.is_new) if (comp.file_system_inputs) |fsi| {
+ comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue;
+ };
const imported_path_digest = pt.zcu.filePathDigest(res.file_index);
const imported_root_decl = pt.zcu.fileRootDecl(res.file_index);
break :blk .{ res, imported_path_digest, imported_root_decl };
@@ -4588,7 +4643,7 @@ fn reportRetryableEmbedFileError(
const gpa = mod.gpa;
const src_loc = embed_file.src_loc;
const ip = &mod.intern_pool;
- const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
+ const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{
embed_file.owner.root,
embed_file.sub_file_path.toSlice(ip),
@errorName(err),
diff --git a/src/Zcu.zig b/src/Zcu.zig
index a9d80b4fdf..fd1e2f9d61 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -728,7 +728,7 @@ pub const File = struct {
source_loaded: bool,
tree_loaded: bool,
zir_loaded: bool,
- /// Relative to the owning package's root_src_dir.
+ /// Relative to the owning package's root source directory.
/// Memory is stored in gpa, owned by File.
sub_file_path: []const u8,
/// Whether this is populated depends on `source_loaded`.
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 4a1f257ddf..f6a47f626b 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -2666,7 +2666,7 @@ pub fn reportRetryableAstGenError(
},
};
- const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
+ const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{
file.mod.root, file.sub_file_path, @errorName(err),
});
errdefer err_msg.destroy(gpa);