From f0d5e0df4d95c06287e0fe3fe48b69daf5306c8f Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 1 Jan 2025 13:30:27 +0000 Subject: incremental: fix errors not being deleted upon re-analysis Previously, logic in `Compilation.getAllErrorsAlloc` was corrupting the `failed_analysis` hashmap. This meant that on updates after the initial update, attempts to remove entries from this map (because the `AnalUnit` in question is being re-analyzed) silently failed. This resulted in compile errors from earlier updates wrongly getting "stuck", i.e. never being removed. This commit also adds a few log calls which helped me to find this bug. --- src/Compilation.zig | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) (limited to 'src/Compilation.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index cee9513ac5..929ed586f6 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3102,9 +3102,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { for (zcu.failed_embed_files.values()) |error_msg| { try addModuleErrorMsg(zcu, &bundle, error_msg.*); } - { + var sorted_failed_analysis: std.AutoArrayHashMapUnmanaged(InternPool.AnalUnit, *Zcu.ErrorMsg).DataList.Slice = s: { const SortOrder = struct { zcu: *Zcu, + errors: []const *Zcu.ErrorMsg, err: *?Error, const Error = @typeInfo( @@ -3113,12 +3114,11 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool { if (ctx.err.*) |_| return lhs_index < rhs_index; - const errors = ctx.zcu.failed_analysis.values(); - const lhs_src_loc = errors[lhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse { + const lhs_src_loc = ctx.errors[lhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse { // LHS source location lost, so should never be referenced. Just sort it to the end. return false; }; - const rhs_src_loc = errors[rhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse { + const rhs_src_loc = ctx.errors[rhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse { // RHS source location lost, so should never be referenced. Just sort it to the end. return true; }; @@ -3135,13 +3135,24 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }).main; } }; + + // We can't directly sort `zcu.failed_analysis.entries`, because that would leave the map + // in an invalid state, and we need it intact for future incremental updates. The amount + // of data here is only as large as the number of analysis errors, so just dupe it all. + var entries = try zcu.failed_analysis.entries.clone(gpa); + errdefer entries.deinit(gpa); + var err: ?SortOrder.Error = null; - // This leaves `zcu.failed_analysis` an invalid state, but we do not - // need lookups anymore anyway. - zcu.failed_analysis.entries.sort(SortOrder{ .zcu = zcu, .err = &err }); + entries.sort(SortOrder{ + .zcu = zcu, + .errors = entries.items(.value), + .err = &err, + }); if (err) |e| return e; - } - for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { + break :s entries.slice(); + }; + defer sorted_failed_analysis.deinit(gpa); + for (sorted_failed_analysis.items(.key), sorted_failed_analysis.items(.value)) |anal_unit, error_msg| { if (comp.incremental) { const refs = try zcu.resolveReferences(); if (!refs.contains(anal_unit)) continue; @@ -3158,6 +3169,11 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { // We'll try again once parsing succeeds. if (!zcu.fileByIndex(file_index).okToReportErrors()) continue; + std.log.scoped(.zcu).debug("analysis error '{s}' reported from unit '{}'", .{ + error_msg.msg, + zcu.fmtAnalUnit(anal_unit), + }); + try addModuleErrorMsg(zcu, &bundle, error_msg.*); if (zcu.cimport_errors.get(anal_unit)) |errors| { for (errors.getMessages()) |err_msg_index| { -- cgit v1.2.3