diff options
| author | Matthew Lugg <mlugg@mlugg.co.uk> | 2025-02-05 12:17:13 +0000 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-02-05 12:17:13 +0000 |
| commit | f01f1e33c96f0b00db8e036a654c1b3bf8531cd8 (patch) | |
| tree | 3daca71f83a02d73d4c93d973d7022776e476274 /src/Compilation.zig | |
| parent | cf059ee08716300e924bced08ebdd5bd8f97d789 (diff) | |
| parent | bebfa036ba52076cd03f9ef943f61da64ba6e97b (diff) | |
| download | zig-f01f1e33c96f0b00db8e036a654c1b3bf8531cd8.tar.gz zig-f01f1e33c96f0b00db8e036a654c1b3bf8531cd8.zip | |
Merge pull request #22754 from mlugg/files-and-stuff
ZON and incremental bits
Diffstat (limited to 'src/Compilation.zig')
| -rw-r--r-- | src/Compilation.zig | 146 |
1 files changed, 92 insertions, 54 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig index dc1d7df320..14c216854e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2220,10 +2220,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try comp.astgen_work_queue.ensureUnusedCapacity(zcu.import_table.count()); for (zcu.import_table.values()) |file_index| { if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue; - const file = zcu.fileByIndex(file_index); - if (file.getMode() == .zig) { - comp.astgen_work_queue.writeItemAssumeCapacity(file_index); - } + comp.astgen_work_queue.writeItemAssumeCapacity(file_index); } if (comp.file_system_inputs) |fsi| { for (zcu.import_table.values()) |file_index| { @@ -2906,10 +2903,12 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { thread_count: u32, - file_deps_len: u32, src_hash_deps_len: u32, nav_val_deps_len: u32, nav_ty_deps_len: u32, + interned_deps_len: u32, + zon_file_deps_len: u32, + embed_file_deps_len: u32, namespace_deps_len: u32, namespace_name_deps_len: u32, first_dependency_len: u32, @@ -2950,10 +2949,12 @@ pub fn saveState(comp: *Compilation) !void { const header: Header = .{ .intern_pool = .{ .thread_count = @intCast(ip.locals.len), - .file_deps_len = @intCast(ip.file_deps.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), .nav_val_deps_len = @intCast(ip.nav_val_deps.count()), .nav_ty_deps_len = @intCast(ip.nav_ty_deps.count()), + .interned_deps_len = @intCast(ip.interned_deps.count()), + .zon_file_deps_len = @intCast(ip.zon_file_deps.count()), + .embed_file_deps_len = @intCast(ip.embed_file_deps.count()), .namespace_deps_len = @intCast(ip.namespace_deps.count()), .namespace_name_deps_len = @intCast(ip.namespace_name_deps.count()), .first_dependency_len = @intCast(ip.first_dependency.count()), @@ -2978,14 +2979,18 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs, mem.asBytes(&header)); addBuf(&bufs, mem.sliceAsBytes(pt_headers.items)); - addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.keys())); - addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.keys())); @@ -3203,15 +3208,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } if (comp.zcu) |zcu| { - const ip = &zcu.intern_pool; - for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { try addModuleErrorMsg(zcu, &bundle, msg.*); } else { // Must be ZIR or Zoir errors. Note that this may include AST errors. _ = try file.getTree(gpa); // Tree must be loaded. - if (file.zir_loaded) { + if (file.zir != null) { try addZirErrorMessages(&bundle, file); } else if (file.zoir != null) { try addZoirErrorMessages(&bundle, file); @@ -3277,20 +3280,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (!refs.contains(anal_unit)) continue; } - report_ok: { - const file_index = switch (anal_unit.unwrap()) { - .@"comptime" => |cu| ip.getComptimeUnit(cu).zir_index.resolveFile(ip), - .nav_val, .nav_ty => |nav| ip.getNav(nav).analysis.?.zir_index.resolveFile(ip), - .type => |ty| Type.fromInterned(ty).typeDeclInst(zcu).?.resolveFile(ip), - .func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFile(ip), - .memoized_state => break :report_ok, // always report std.builtin errors - }; - - // Skip errors for AnalUnits within files that had a parse failure. - // We'll try again once parsing succeeds. - if (!zcu.fileByIndex(file_index).okToReportErrors()) continue; - } - std.log.scoped(.zcu).debug("analysis error '{s}' reported from unit '{}'", .{ error_msg.msg, zcu.fmtAnalUnit(anal_unit), @@ -3318,12 +3307,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } } - for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| { - if (!zcu.navFileScope(nav).okToReportErrors()) continue; + for (zcu.failed_codegen.values()) |error_msg| { try addModuleErrorMsg(zcu, &bundle, error_msg.*); } - for (zcu.failed_types.keys(), zcu.failed_types.values()) |ty_index, error_msg| { - if (!zcu.typeFileScope(ty_index).okToReportErrors()) continue; + for (zcu.failed_types.values()) |error_msg| { try addModuleErrorMsg(zcu, &bundle, error_msg.*); } for (zcu.failed_exports.values()) |value| { @@ -3623,22 +3610,17 @@ pub fn addModuleErrorMsg( } pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { - assert(file.zir_loaded); - assert(file.tree_loaded); - assert(file.source_loaded); const gpa = eb.gpa; const src_path = try file.fullPath(gpa); defer gpa.free(src_path); - return eb.addZirErrorMessages(file.zir, file.tree, file.source, src_path); + return eb.addZirErrorMessages(file.zir.?, file.tree.?, file.source.?, src_path); } pub fn addZoirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void { - assert(file.source_loaded); - assert(file.tree_loaded); const gpa = eb.gpa; const src_path = try file.fullPath(gpa); defer gpa.free(src_path); - return eb.addZoirErrorMessages(file.zoir.?, file.tree, file.source, src_path); + return eb.addZoirErrorMessages(file.zoir.?, file.tree.?, file.source.?, src_path); } pub fn performAllTheWork( @@ -3802,7 +3784,7 @@ fn performAllTheWorkInner( // will be needed by the worker threads. const path_digest = zcu.filePathDigest(file_index); const file = zcu.fileByIndex(file_index); - comp.thread_pool.spawnWgId(&astgen_wait_group, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateFile, .{ comp, file, file_index, path_digest, zir_prog_node, &astgen_wait_group, .root, }); } @@ -3810,7 +3792,7 @@ fn performAllTheWorkInner( for (0.., zcu.embed_table.values()) |ef_index_usize, ef| { const ef_index: Zcu.EmbedFile.Index = @enumFromInt(ef_index_usize); - comp.thread_pool.spawnWgId(&astgen_wait_group, workerCheckEmbedFile, .{ + comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateEmbedFile, .{ comp, ef_index, ef, }); } @@ -3832,12 +3814,64 @@ fn performAllTheWorkInner( if (comp.zcu) |zcu| { const pt: Zcu.PerThread = .activate(zcu, .main); defer pt.deactivate(); + + // If the cache mode is `whole`, then add every source file to the cache manifest. + switch (comp.cache_use) { + .whole => |whole| if (whole.cache_manifest) |man| { + const gpa = zcu.gpa; + for (zcu.import_table.values()) |file_index| { + const file = zcu.fileByIndex(file_index); + const source = file.getSource(gpa) catch |err| { + try pt.reportRetryableFileError(file_index, "unable to load source: {s}", .{@errorName(err)}); + continue; + }; + const resolved_path = try std.fs.path.resolve(gpa, &.{ + file.mod.root.root_dir.path orelse ".", + file.mod.root.sub_path, + file.sub_file_path, + }); + errdefer gpa.free(resolved_path); + whole.cache_manifest_mutex.lock(); + defer whole.cache_manifest_mutex.unlock(); + man.addFilePostContents(resolved_path, source.bytes, source.stat) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => { + try pt.reportRetryableFileError(file_index, "unable to update cache: {s}", .{@errorName(err)}); + continue; + }, + }; + } + }, + .incremental => {}, + } + + try reportMultiModuleErrors(pt); + + const any_fatal_files = for (zcu.import_table.values()) |file_index| { + const file = zcu.fileByIndex(file_index); + switch (file.status) { + .never_loaded => unreachable, // everything is loaded by the workers + .retryable_failure, .astgen_failure => break true, + .success => {}, + } + } else false; + + if (any_fatal_files or comp.alloc_failure_occurred) { + // We give up right now! No updating of ZIR refs, no nothing. The idea is that this prevents + // us from invalidating lots of incremental dependencies due to files with e.g. parse errors. + // However, this means our analysis data is invalid, so we want to omit all analysis errors. + // To do that, let's just clear the analysis roots! + + assert(zcu.failed_files.count() > 0); // we will get an error + zcu.analysis_roots.clear(); // no analysis happened + return; + } + if (comp.incremental) { const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0); defer update_zir_refs_node.end(); try pt.updateZirRefs(); } - try reportMultiModuleErrors(pt); try zcu.flushRetryableFailures(); zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); @@ -4280,7 +4314,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye }; } -fn workerAstGenFile( +fn workerUpdateFile( tid: usize, comp: *Compilation, file: *Zcu.File, @@ -4290,40 +4324,44 @@ fn workerAstGenFile( wg: *WaitGroup, src: Zcu.AstGenSrc, ) void { - assert(file.getMode() == .zig); const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); defer pt.deactivate(); - pt.astGenFile(file, path_digest) catch |err| switch (err) { + pt.updateFile(file, path_digest) catch |err| switch (err) { error.AnalysisFail => return, else => { - file.status = .retryable_failure; pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) { - // Swallowing this error is OK because it's implied to be OOM when - // there is a missing `failed_files` error message. - error.OutOfMemory => {}, + error.OutOfMemory => { + comp.mutex.lock(); + defer comp.mutex.unlock(); + comp.setAllocFailure(); + }, }; return; }, }; + switch (file.getMode()) { + .zig => {}, // continue to logic below + .zon => return, // ZON can't import anything so we're done + } + // Pre-emptively look for `@import` paths and queue them up. // If we experience an error preemptively fetching the // file, just ignore it and let it happen again later during Sema. - assert(file.zir_loaded); - const imports_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.imports)]; + const imports_index = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)]; if (imports_index != 0) { - const extra = file.zir.extraData(Zir.Inst.Imports, imports_index); + const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_index); var import_i: u32 = 0; var extra_index = extra.end; while (import_i < extra.data.imports_len) : (import_i += 1) { - const item = file.zir.extraData(Zir.Inst.Imports.Item, extra_index); + const item = file.zir.?.extraData(Zir.Inst.Imports.Item, extra_index); extra_index = item.end; - const import_path = file.zir.nullTerminatedString(item.data.name); + const import_path = file.zir.?.nullTerminatedString(item.data.name); // `@import("builtin")` is handled specially. if (mem.eql(u8, import_path, "builtin")) continue; @@ -4344,7 +4382,7 @@ fn workerAstGenFile( const imported_path_digest = pt.zcu.filePathDigest(res.file_index); break :blk .{ res, imported_path_digest }; }; - if (import_result.is_new and import_result.file.getMode() == .zig) { + if (import_result.is_new) { log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ file.sub_file_path, import_path, import_result.file.sub_file_path, }); @@ -4352,7 +4390,7 @@ fn workerAstGenFile( .importing_file = file_index, .import_tok = item.data.token, } }; - comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ + comp.thread_pool.spawnWgId(wg, workerUpdateFile, .{ comp, import_result.file, import_result.file_index, imported_path_digest, prog_node, wg, sub_src, }); } @@ -4375,7 +4413,7 @@ fn workerUpdateBuiltinZigFile( }; } -fn workerCheckEmbedFile(tid: usize, comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void { +fn workerUpdateEmbedFile(tid: usize, comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void { comp.detectEmbedFileUpdate(@enumFromInt(tid), ef_index, ef) catch |err| switch (err) { error.OutOfMemory => { comp.mutex.lock(); |
