From 895267c916f874593b0788b198b7de140b6b335b Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Aug 2024 23:16:06 +0100 Subject: frontend: incremental progress This commit makes more progress towards incremental compilation, fixing some crashes in the frontend. Notably, it fixes the regressions introduced by #20964. It also cleans up the "outdated file root" mechanism, by virtue of deleting it: we now detect outdated file roots just after updating ZIR refs, and re-scan their namespaces. --- src/Zcu/PerThread.zig | 363 ++++++++++++++++++++++++++------------------------ 1 file changed, 191 insertions(+), 172 deletions(-) (limited to 'src/Zcu/PerThread.zig') diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b353331d95..37a3aced09 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -39,7 +39,6 @@ pub fn astGenFile( pt: Zcu.PerThread, file: *Zcu.File, path_digest: Cache.BinDigest, - old_root_type: InternPool.Index, ) !void { dev.check(.ast_gen); assert(!file.mod.isBuiltin()); @@ -299,25 +298,15 @@ pub fn astGenFile( file.status = .astgen_failure; return error.AnalysisFail; } - - if (old_root_type != .none) { - // The root of this file must be re-analyzed, since the file has changed. - comp.mutex.lock(); - defer comp.mutex.unlock(); - - log.debug("outdated file root type: {}", .{old_root_type}); - try zcu.outdated_file_root.put(gpa, old_root_type, {}); - } } const UpdatedFile = struct { - file_index: Zcu.File.Index, file: *Zcu.File, inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index), }; -fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.ArrayListUnmanaged(UpdatedFile)) void { - for (updated_files.items) |*elem| elem.inst_map.deinit(gpa); +fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile)) void { + for (updated_files.values()) |*elem| elem.inst_map.deinit(gpa); updated_files.deinit(gpa); } @@ -328,143 +317,166 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { const gpa = zcu.gpa; // We need to visit every updated File for every TrackedInst in InternPool. - var updated_files: std.ArrayListUnmanaged(UpdatedFile) = .{}; + var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .{}; defer cleanupUpdatedFiles(gpa, &updated_files); for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); const old_zir = file.prev_zir orelse continue; const new_zir = file.zir; - try updated_files.append(gpa, .{ - .file_index = file_index, + const gop = try updated_files.getOrPut(gpa, file_index); + assert(!gop.found_existing); + gop.value_ptr.* = .{ .file = file, .inst_map = .{}, - }); - const inst_map = &updated_files.items[updated_files.items.len - 1].inst_map; - try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, inst_map); + }; + if (!new_zir.hasCompileErrors()) { + try Zcu.mapOldZirToNew(gpa, old_zir.*, file.zir, &gop.value_ptr.inst_map); + } } - if (updated_files.items.len == 0) + if (updated_files.count() == 0) return; for (ip.locals, 0..) |*local, tid| { const tracked_insts_list = local.getMutableTrackedInsts(gpa); - for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| { - for (updated_files.items) |updated_file| { - const file_index = updated_file.file_index; - if (tracked_inst.file != file_index) continue; - - const file = updated_file.file; - const old_zir = file.prev_zir.?.*; - const new_zir = file.zir; - const old_tag = old_zir.instructions.items(.tag); - const old_data = old_zir.instructions.items(.data); - const inst_map = &updated_file.inst_map; - - const old_inst = tracked_inst.inst; - const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ - .tid = @enumFromInt(tid), - .index = @intCast(tracked_inst_unwrapped_index), - }).wrap(ip); - tracked_inst.inst = inst_map.get(old_inst) orelse { - // Tracking failed for this instruction. Invalidate associated `src_hash` deps. - log.debug("tracking failed for %{d}", .{old_inst}); - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); - continue; - }; + for (tracked_insts_list.viewAllowEmpty().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| { + const file_index = tracked_inst.file; + const updated_file = updated_files.get(file_index) orelse continue; - if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { - if (new_zir.getAssociatedSrcHash(tracked_inst.inst)) |new_hash| { - if (std.zig.srcHashEql(old_hash, new_hash)) { - break :hash_changed; - } - log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ - old_inst, - tracked_inst.inst, - std.fmt.fmtSliceHexLower(&old_hash), - std.fmt.fmtSliceHexLower(&new_hash), - }); + const file = updated_file.file; + + if (file.zir.hasCompileErrors()) { + // If we mark this as outdated now, users of this inst will just get a transitive analysis failure. + // Ultimately, they would end up throwing out potentially useful analysis results. + // So, do nothing. We already have the file failure -- that's sufficient for now! + continue; + } + const old_inst = tracked_inst.inst.unwrap() orelse continue; // we can't continue tracking lost insts + const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ + .tid = @enumFromInt(tid), + .index = @intCast(tracked_inst_unwrapped_index), + }).wrap(ip); + const new_inst = updated_file.inst_map.get(old_inst) orelse { + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + log.debug("tracking failed for %{d}", .{old_inst}); + tracked_inst.inst = .lost; + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + continue; + }; + tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst); + + const old_zir = file.prev_zir.?.*; + const new_zir = file.zir; + const old_tag = old_zir.instructions.items(.tag); + const old_data = old_zir.instructions.items(.data); + + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(new_inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; } - // The source hash associated with this instruction changed - invalidate relevant dependencies. - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + new_inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); } + // The source hash associated with this instruction changed - invalidate relevant dependencies. + try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + } - // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. - const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { - .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { - .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, - else => false, - }, + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, else => false, - }; - if (!has_namespace) continue; - - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; - defer old_names.deinit(zcu.gpa); - { - var it = old_zir.declIterator(old_inst); - while (it.next()) |decl_inst| { - const decl_name = old_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(old_zir)) continue, - } - const name_zir = decl_name.toString(old_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - old_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - try old_names.put(zcu.gpa, name_ip, {}); + }, + else => false, + }; + if (!has_namespace) continue; + + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + old_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + try old_names.put(zcu.gpa, name_ip, {}); } - var any_change = false; - { - var it = new_zir.declIterator(tracked_inst.inst); - while (it.next()) |decl_inst| { - const decl_name = new_zir.getDeclaration(decl_inst)[0].name; - switch (decl_name) { - .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, - _ => if (decl_name.isNamedTest(new_zir)) continue, - } - const name_zir = decl_name.toString(new_zir).?; - const name_ip = try zcu.intern_pool.getOrPutString( - zcu.gpa, - pt.tid, - new_zir.nullTerminatedString(name_zir), - .no_embedded_nulls, - ); - if (!old_names.swapRemove(name_ip)) continue; - // Name added - any_change = true; - try zcu.markDependeeOutdated(.{ .namespace_name = .{ - .namespace = tracked_inst_index, - .name = name_ip, - } }); + } + var any_change = false; + { + var it = new_zir.declIterator(new_inst); + while (it.next()) |decl_inst| { + const decl_name = new_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(new_zir)) continue, } - } - // The only elements remaining in `old_names` now are any names which were removed. - for (old_names.keys()) |name_ip| { + const name_zir = decl_name.toString(new_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + pt.tid, + new_zir.nullTerminatedString(name_zir), + .no_embedded_nulls, + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added any_change = true; try zcu.markDependeeOutdated(.{ .namespace_name = .{ .namespace = tracked_inst_index, .name = name_ip, } }); } + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { + any_change = true; + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = tracked_inst_index, + .name = name_ip, + } }); + } - if (any_change) { - try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); - } + if (any_change) { + try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); } } } - for (updated_files.items) |updated_file| { + try ip.rehashTrackedInsts(gpa, pt.tid); + + for (updated_files.keys(), updated_files.values()) |file_index, updated_file| { const file = updated_file.file; - const prev_zir = file.prev_zir.?; - file.prev_zir = null; - prev_zir.deinit(gpa); - gpa.destroy(prev_zir); + if (file.zir.hasCompileErrors()) { + // Keep `prev_zir` around: it's the last non-error ZIR. + // Don't update the namespace, as we have no new data to update *to*. + } else { + const prev_zir = file.prev_zir.?; + file.prev_zir = null; + prev_zir.deinit(gpa); + gpa.destroy(prev_zir); + + // For every file which has changed, re-scan the namespace of the file's root struct type. + // These types are special-cased because they don't have an enclosing declaration which will + // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this + // now because this work is fast (no actual Sema work is happening, we're just updating the + // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace` + // will track some instructions. + try pt.updateFileNamespace(file_index); + } } } @@ -473,6 +485,8 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const file_root_type = pt.zcu.fileRootType(file_index); if (file_root_type != .none) { + // The namespace is already up-to-date thanks to the `updateFileNamespace` calls at the + // start of this update. We just have to check whether the type itself is okay! const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?; return pt.ensureCauAnalyzed(file_root_type_cau); } else { @@ -493,7 +507,6 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - const inst_info = cau.zir_index.resolveFull(ip); log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); @@ -516,12 +529,9 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu _ = zcu.outdated_ready.swapRemove(anal_unit); } - // TODO: this only works if namespace lookups in Sema trigger `ensureCauAnalyzed`, because - // `outdated_file_root` information is not "viral", so we need that a namespace lookup first - // handles the case where the file root is not an outdated *type* but does have an outdated - // *namespace*. A more logically simple alternative may be for a file's root struct to register - // a dependency on the file's entire source code (hash). Alternatively, we could make sure that - // these are always handled first in an update. Actually, that's probably the best option. + const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + + // TODO: document this elsewhere mlugg! // For my own benefit, here's how a namespace update for a normal (non-file-root) type works: // `const S = struct { ... };` // We are adding or removing a declaration within this `struct`. @@ -535,16 +545,12 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // * we basically do `scanDecls`, updating the namespace as needed // * TODO: optimize this to make sure we only do it once a generation i guess? // * so everyone lived happily ever after - const file_root_outdated = switch (cau.owner.unwrap()) { - .type => |ty| zcu.outdated_file_root.swapRemove(ty), - .nav, .none => false, - }; if (zcu.fileByIndex(inst_info.file).status != .success_zir) { return error.AnalysisFail; } - if (!cau_outdated and !file_root_outdated) { + if (!cau_outdated) { // We can trust the current information about this `Cau`. if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { return error.AnalysisFail; @@ -571,10 +577,13 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const sema_result: SemaCauResult = res: { if (inst_info.inst == .main_struct_inst) { - const changed = try pt.semaFileUpdate(inst_info.file, cau_outdated); + // Note that this is definitely a *recreation* due to outdated, because + // this instruction indicates that `cau.owner` is a `type`, which only + // reaches here if `cau_outdated`. + try pt.recreateFileRoot(inst_info.file); break :res .{ - .invalidate_decl_val = changed, - .invalidate_decl_ref = changed, + .invalidate_decl_val = true, + .invalidate_decl_ref = true, }; } @@ -690,8 +699,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter zcu.potentially_outdated.swapRemove(anal_unit); if (func_outdated) { - dev.check(.incremental); _ = zcu.outdated_ready.swapRemove(anal_unit); + dev.check(.incremental); zcu.deleteUnitExports(anal_unit); zcu.deleteUnitReferences(anal_unit); } @@ -920,12 +929,9 @@ fn createFileRootStruct( return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); } -/// Re-analyze the root type of a file on an incremental update. -/// If `type_outdated`, the struct type itself is considered outdated and is -/// reconstructed at a new InternPool index. Otherwise, the namespace is just -/// re-analyzed. Returns whether the decl's tyval was invalidated. -/// Returns `error.AnalysisFail` if the file has an error. -fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: bool) Zcu.SemaError!bool { +/// Recreate the root type of a file after it becomes outdated. A new struct type +/// is constructed at a new InternPool index, reusing the namespace for efficiency. +fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const file = zcu.fileByIndex(file_index); @@ -934,48 +940,58 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: assert(file_root_type != .none); - log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{ + log.debug("recreateFileRoot mod={s} sub_file_path={s}", .{ file.mod.fully_qualified_name, file.sub_file_path, - type_outdated, }); if (file.status != .success_zir) { return error.AnalysisFail; } - if (type_outdated) { - // Invalidate the existing type, reusing its namespace. - const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; - ip.removeDependenciesForDepender( - zcu.gpa, - InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), - ); - ip.remove(pt.tid, file_root_type); - _ = try pt.createFileRootStruct(file_index, namespace_index); - return true; - } - - // Only the struct's namespace is outdated. - // Preserve the type - just scan the namespace again. + // Invalidate the existing type, reusing its namespace. + const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; + ip.removeDependenciesForDepender( + zcu.gpa, + InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), + ); + ip.remove(pt.tid, file_root_type); + _ = try pt.createFileRootStruct(file_index, namespace_index); +} - const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; - const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); +/// Re-scan the namespace of a file's root struct type on an incremental update. +/// The file must have successfully populated ZIR. +/// If the file's root struct type is not populated (the file is unreferenced), nothing is done. +/// This is called by `updateZirRefs` for all updated files before the main work loop. +/// This function does not perform any semantic analysis. +fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.Error!void { + const zcu = pt.zcu; - var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; - extra_index += @intFromBool(small.has_fields_len); - const decls_len = if (small.has_decls_len) blk: { - const decls_len = file.zir.extra[extra_index]; - extra_index += 1; - break :blk decls_len; - } else 0; - const decls = file.zir.bodySlice(extra_index, decls_len); + const file = zcu.fileByIndex(file_index); + assert(file.status == .success_zir); + const file_root_type = zcu.fileRootType(file_index); + if (file_root_type == .none) return; - if (!type_outdated) { - try pt.scanNamespace(namespace_index, decls); - } + log.debug("updateFileNamespace mod={s} sub_file_path={s}", .{ + file.mod.fully_qualified_name, + file.sub_file_path, + }); - return false; + const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu); + const decls = decls: { + const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended; + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = file.zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + break :decls file.zir.bodySlice(extra_index, decls_len); + }; + try pt.scanNamespace(namespace_index, decls); } /// Regardless of the file status, will create a `Decl` if none exists so that we can track @@ -1052,7 +1068,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - const inst_info = cau.zir_index.resolveFull(ip); + const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); const zir = file.zir; @@ -1944,6 +1960,9 @@ const ScanDeclIter = struct { const cau, const nav = if (existing_cau) |cau_index| cau_nav: { const nav_index = ip.getCau(cau_index).owner.unwrap().nav; const nav = ip.getNav(nav_index); + if (nav.name != name) { + std.debug.panic("'{}' vs '{}'", .{ nav.name.fmt(ip), name.fmt(ip) }); + } assert(nav.name == name); assert(nav.fqn == fqn); break :cau_nav .{ cau_index, nav_index }; @@ -2011,7 +2030,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError! const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); const func = zcu.funcInfo(func_index); - const inst_info = func.zir_body_inst.resolveFull(ip); + const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); const zir = file.zir; @@ -2097,7 +2116,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError! }; defer inner_block.instructions.deinit(gpa); - const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip)); + const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip) orelse return error.AnalysisFail); // Here we are performing "runtime semantic analysis" for a function body, which means // we must map the parameter ZIR instructions to `arg` AIR instructions. -- cgit v1.2.3 From aa6c1c40ec29d581844ebb5db09a33453c76d4ba Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 03:05:19 +0100 Subject: frontend: yet more incremental work --- src/Compilation.zig | 19 ++-- src/Sema.zig | 50 ++++----- src/Zcu.zig | 241 ++++++++++++++++++++++++++++++++-------- src/Zcu/PerThread.zig | 300 +++++++++++++++++++++++++++++++------------------- 4 files changed, 418 insertions(+), 192 deletions(-) (limited to 'src/Zcu/PerThread.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index f2d61db0fd..61f07d3e3b 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2300,7 +2300,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { zcu.intern_pool.dumpGenericInstances(gpa); } - if (comp.config.is_test and comp.totalErrorCount() == 0) { + if (comp.config.is_test and try comp.totalErrorCount() == 0) { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that // have been discovered and not filtered out. @@ -2310,7 +2310,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { try pt.processExports(); } - if (comp.totalErrorCount() != 0) { + if (try comp.totalErrorCount() != 0) { // Skip flushing and keep source files loaded for error reporting. comp.link_error_flags = .{}; return; @@ -2394,7 +2394,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } try flush(comp, arena, .main, main_progress_node); - if (comp.totalErrorCount() != 0) return; + if (try comp.totalErrorCount() != 0) return; // Failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { @@ -2411,7 +2411,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { }, .incremental => { try flush(comp, arena, .main, main_progress_node); - if (comp.totalErrorCount() != 0) return; + if (try comp.totalErrorCount() != 0) return; }, } } @@ -3048,7 +3048,7 @@ fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void { } /// This function is temporally single-threaded. -pub fn totalErrorCount(comp: *Compilation) u32 { +pub fn totalErrorCount(comp: *Compilation) Allocator.Error!u32 { var total: usize = comp.misc_failures.count() + @intFromBool(comp.alloc_failure_occurred) + @@ -3088,7 +3088,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (zcu.failed_analysis.keys()) |anal_unit| { - if (!all_references.contains(anal_unit)) continue; + if (comp.incremental and !all_references.contains(anal_unit)) continue; const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, .func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file, @@ -3225,7 +3225,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (err) |e| return e; } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { - if (!all_references.contains(anal_unit)) continue; + if (comp.incremental and !all_references.contains(anal_unit)) continue; const file_index = switch (anal_unit.unwrap()) { .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, @@ -3341,10 +3341,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } - assert(comp.totalErrorCount() == bundle.root_list.items.len); + assert(try comp.totalErrorCount() == bundle.root_list.items.len); if (comp.module) |zcu| { - if (bundle.root_list.items.len == 0) { + if (comp.incremental and bundle.root_list.items.len == 0) { const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| { if (all_references.contains(failed_unit)) break true; } else false; @@ -3448,6 +3448,7 @@ pub fn addModuleErrorMsg( const span = try src.span(gpa); const loc = std.zig.findLineColumn(source.bytes, span.main); const rt_file_path = try src.file_scope.fullPath(gpa); + defer gpa.free(rt_file_path); const name = switch (ref.referencer.unwrap()) { .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { .nav => |nav| ip.getNav(nav).name.toSlice(ip), diff --git a/src/Sema.zig b/src/Sema.zig index 1a74b88e05..2e970c43f4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -112,6 +112,11 @@ exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, +/// All dependencies registered so far by this `Sema`. This is a temporary duplicate +/// of the main dependency data. It exists to avoid adding dependencies to a given +/// `AnalUnit` multiple times. +dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{}, + const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. runtime_index: Value.RuntimeIndex, @@ -879,6 +884,7 @@ pub fn deinit(sema: *Sema) void { sema.exports.deinit(gpa); sema.references.deinit(gpa); sema.type_references.deinit(gpa); + sema.dependencies.deinit(gpa); sema.* = undefined; } @@ -2740,7 +2746,7 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { _ = zcu.outdated_ready.swapRemove(cau_unit); zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit); zcu.intern_pool.remove(pt.tid, ty); - try zcu.markDependeeOutdated(.{ .interned = ty }); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); return true; } @@ -6066,7 +6072,9 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); - return Air.internedToRef(zcu.fileRootType(result.file_index)); + const ty = zcu.fileRootType(result.file_index); + try sema.addTypeReferenceEntry(src, ty); + return Air.internedToRef(ty); } fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6820,6 +6828,13 @@ fn lookupInNamespace( const src_file = zcu.namespacePtr(block.namespace).file_scope; + if (Type.fromInterned(namespace.owner_type).typeDeclInst(zcu)) |type_decl_inst| { + try sema.declareDependency(.{ .namespace_name = .{ + .namespace = type_decl_inst, + .name = ident_name, + } }); + } + if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) { const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; @@ -13981,12 +13996,6 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }); try sema.checkNamespaceType(block, lhs_src, container_type); - if (container_type.typeDeclInst(mod)) |type_decl_inst| { - try sema.declareDependency(.{ .namespace_name = .{ - .namespace = type_decl_inst, - .name = decl_name, - } }); - } const namespace = container_type.getNamespace(mod).unwrap() orelse return .bool_false; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| { @@ -14026,7 +14035,9 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); - return Air.internedToRef(zcu.fileRootType(result.file_index)); + const ty = zcu.fileRootType(result.file_index); + try sema.addTypeReferenceEntry(operand_src, ty); + return Air.internedToRef(ty); } fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -27696,13 +27707,6 @@ fn fieldVal( const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; const child_type = val.toType(); - if (child_type.typeDeclInst(mod)) |type_decl_inst| { - try sema.declareDependency(.{ .namespace_name = .{ - .namespace = type_decl_inst, - .name = field_name, - } }); - } - switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { switch (ip.indexToKey(child_type.toIntern())) { @@ -27934,13 +27938,6 @@ fn fieldPtr( const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; const child_type = val.toType(); - if (child_type.typeDeclInst(mod)) |type_decl_inst| { - try sema.declareDependency(.{ .namespace_name = .{ - .namespace = type_decl_inst, - .name = field_name, - } }); - } - switch (child_type.zigTypeTag(mod)) { .ErrorSet => { switch (ip.indexToKey(child_type.toIntern())) { @@ -32260,7 +32257,7 @@ fn addReferenceEntry( referenced_unit: AnalUnit, ) !void { const zcu = sema.pt.zcu; - if (zcu.comp.reference_trace == 0) return; + if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return; const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); if (gop.found_existing) return; // TODO: we need to figure out how to model inline calls here. @@ -32275,7 +32272,7 @@ fn addTypeReferenceEntry( referenced_type: InternPool.Index, ) !void { const zcu = sema.pt.zcu; - if (zcu.comp.reference_trace == 0) return; + if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return; const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type); if (gop.found_existing) return; try zcu.addTypeReference(sema.owner, referenced_type, src); @@ -38272,6 +38269,9 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { const zcu = sema.pt.zcu; if (!zcu.comp.incremental) return; + const gop = try sema.dependencies.getOrPut(sema.gpa, dependee); + if (gop.found_existing) return; + // Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields // of a type and they use `@This()`. This dependency would be unnecessary, and in fact would // just result in over-analysis since `Zcu.findOutdatedToAnalyze` would never be able to resolve diff --git a/src/Zcu.zig b/src/Zcu.zig index 2ce001b92a..c78abb69bf 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -10,7 +10,7 @@ const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; -const log = std.log.scoped(.module); +const log = std.log.scoped(.zcu); const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; @@ -153,9 +153,11 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = . /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, -/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +/// Value is the number of PO dependencies of this AnalUnit. +/// This value will decrease as we perform semantic analysis to learn what is outdated. +/// If any of these PO deps is outdated, this value will be moved to `outdated`. potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, -/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +/// Value is the number of PO dependencies of this AnalUnit. /// Once this value drops to 0, the AnalUnit is a candidate for re-analysis. outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, /// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0. @@ -2276,55 +2278,90 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F return zir; } -pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { - log.debug("outdated dependee: {}", .{dependee}); +pub fn markDependeeOutdated( + zcu: *Zcu, + /// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO. + /// However, when we discover during analysis that something was outdated, the `Dependee` was already + /// marked as PO, so we need to decrement the PO dep count for each depender. + marked_po: enum { not_marked_po, marked_po }, + dependee: InternPool.Dependee, +) !void { + log.debug("outdated dependee: {}", .{fmtDependee(dependee, zcu)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { - if (zcu.outdated.contains(depender)) { - // We do not need to increment the PO dep count, as if the outdated - // dependee is a Decl, we had already marked this as PO. + if (zcu.outdated.getPtr(depender)) |po_dep_count| { + switch (marked_po) { + .not_marked_po => {}, + .marked_po => { + po_dep_count.* -= 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); + if (po_dep_count.* == 0) { + log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + try zcu.outdated_ready.put(zcu.gpa, depender, {}); + } + }, + } continue; } const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender); + const new_po_dep_count = switch (marked_po) { + .not_marked_po => if (opt_po_entry) |e| e.value else 0, + .marked_po => if (opt_po_entry) |e| e.value - 1 else { + // This dependency has been registered during in-progress analysis, but the unit is + // not in `potentially_outdated` because analysis is in-progress. Nothing to do. + continue; + }, + }; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), new_po_dep_count }); try zcu.outdated.putNoClobber( zcu.gpa, depender, - // We do not need to increment this count for the same reason as above. - if (opt_po_entry) |e| e.value else 0, + new_po_dep_count, ); - log.debug("outdated: {}", .{depender}); - if (opt_po_entry == null) { - // This is a new entry with no PO dependencies. + log.debug("outdated: {}", .{fmtAnalUnit(depender, zcu)}); + if (new_po_dep_count == 0) { + log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } // If this is a Decl and was not previously PO, we must recursively // mark dependencies on its tyval as PO. if (opt_po_entry == null) { + assert(marked_po == .not_marked_po); try zcu.markTransitiveDependersPotentiallyOutdated(depender); } } } pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { + log.debug("up-to-date dependee: {}", .{fmtDependee(dependee, zcu)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { if (zcu.outdated.getPtr(depender)) |po_dep_count| { // This depender is already outdated, but it now has one // less PO dependency! po_dep_count.* -= 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); if (po_dep_count.* == 0) { + log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } continue; } // This depender is definitely at least PO, because this Decl was just analyzed // due to being outdated. - const ptr = zcu.potentially_outdated.getPtr(depender).?; + const ptr = zcu.potentially_outdated.getPtr(depender) orelse { + // This dependency has been registered during in-progress analysis, but the unit is + // not in `potentially_outdated` because analysis is in-progress. Nothing to do. + continue; + }; if (ptr.* > 1) { ptr.* -= 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), ptr.* }); continue; } + log.debug("up-to-date (po deps = 0): {}", .{fmtAnalUnit(depender, zcu)}); + // This dependency is no longer PO, i.e. is known to be up-to-date. assert(zcu.potentially_outdated.swapRemove(depender)); // If this is a Decl, we must recursively mark dependencies on its tyval @@ -2344,14 +2381,16 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { /// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES. fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void { const ip = &zcu.intern_pool; - var it = ip.dependencyIterator(switch (maybe_outdated.unwrap()) { + const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) { .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { .nav => |nav| .{ .nav_val = nav }, // TODO: also `nav_ref` deps when introduced - .none, .type => return, // analysis of this `Cau` can't outdate any dependencies + .type => |ty| .{ .interned = ty }, + .none => return, // analysis of this `Cau` can't outdate any dependencies }, .func => |func_index| .{ .interned = func_index }, // IES - }); - + }; + log.debug("marking dependee po: {}", .{fmtDependee(dependee, zcu)}); + var it = ip.dependencyIterator(dependee); while (it.next()) |po| { if (zcu.outdated.getPtr(po)) |po_dep_count| { // This dependency is already outdated, but it now has one more PO @@ -2360,14 +2399,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni _ = zcu.outdated_ready.swapRemove(po); } po_dep_count.* += 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), po_dep_count.* }); continue; } if (zcu.potentially_outdated.getPtr(po)) |n| { // There is now one more PO dependency. n.* += 1; + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), n.* }); continue; } try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); + log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), 1 }); // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO. try zcu.markTransitiveDependersPotentiallyOutdated(po); } @@ -2391,13 +2433,9 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { // In this case, we must defer to more complex logic below. if (zcu.outdated_ready.count() > 0) { - log.debug("findOutdatedToAnalyze: trivial '{s} {d}'", .{ - @tagName(zcu.outdated_ready.keys()[0].unwrap()), - switch (zcu.outdated_ready.keys()[0].unwrap()) { - inline else => |x| @intFromEnum(x), - }, - }); - return zcu.outdated_ready.keys()[0]; + const unit = zcu.outdated_ready.keys()[0]; + log.debug("findOutdatedToAnalyze: trivial {}", .{fmtAnalUnit(unit, zcu)}); + return unit; } // There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some @@ -2445,8 +2483,16 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { } } - log.debug("findOutdatedToAnalyze: heuristic returned Cau {d} ({d} dependers)", .{ - @intFromEnum(chosen_cau.?), + if (chosen_cau == null) { + for (zcu.outdated.keys(), zcu.outdated.values()) |o, opod| { + const func = o.unwrap().func; + const nav = zcu.funcInfo(func).owner_nav; + std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {}; + } + } + + log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{ + fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? }), zcu), chosen_cau_dependers, }); @@ -3090,7 +3136,6 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve }); defer gpa.free(resolved_path); const file = zcu.import_table.get(resolved_path).?; - if (zcu.fileByIndex(file).status != .success_zir) continue; const root_ty = zcu.fileRootType(file); if (root_ty == .none) continue; type_queue.putAssumeCapacityNoClobber(root_ty, null); @@ -3102,6 +3147,8 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const referencer = kv.value; try checked_types.putNoClobber(gpa, ty, {}); + log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)}); + // If this type has a `Cau` for resolution, it's automatically referenced. const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) { .struct_type => ip.loadStructType(ty).cau, @@ -3132,13 +3179,14 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve // Queue any decls within this type which would be automatically analyzed. // Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`. - const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap() orelse continue; + const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?; for (zcu.namespacePtr(ns).other_decls.items) |cau| { // These are `comptime` and `test` declarations. // `comptime` decls are always analyzed; `test` declarations are analyzed depending on the test filter. const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; const file = zcu.fileByIndex(inst_info.file); - const zir = file.zir; + // If the file failed AstGen, the TrackedInst refers to the old ZIR. + const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; const declaration = zir.getDeclaration(inst_info.inst)[0]; const want_analysis = switch (declaration.name) { .@"usingnamespace" => unreachable, @@ -3158,27 +3206,51 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve }; if (want_analysis) { const unit = AnalUnit.wrap(.{ .cau = cau }); - if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + if (!result.contains(unit)) { + log.debug("type '{}': ref cau %{}", .{ + Type.fromInterned(ty).containerTypeName(ip).fmt(ip), + @intFromEnum(inst_info.inst), + }); + try unit_queue.put(gpa, unit, referencer); + } } } for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| { // These are named declarations. They are analyzed only if marked `export`. const cau = ip.getNav(nav).analysis_owner.unwrap().?; const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; - const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0]; + const file = zcu.fileByIndex(inst_info.file); + // If the file failed AstGen, the TrackedInst refers to the old ZIR. + const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; + const declaration = zir.getDeclaration(inst_info.inst)[0]; if (declaration.flags.is_export) { const unit = AnalUnit.wrap(.{ .cau = cau }); - if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + if (!result.contains(unit)) { + log.debug("type '{}': ref cau %{}", .{ + Type.fromInterned(ty).containerTypeName(ip).fmt(ip), + @intFromEnum(inst_info.inst), + }); + try unit_queue.put(gpa, unit, referencer); + } } } for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| { // These are named declarations. They are analyzed only if marked `export`. const cau = ip.getNav(nav).analysis_owner.unwrap().?; const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue; - const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0]; + const file = zcu.fileByIndex(inst_info.file); + // If the file failed AstGen, the TrackedInst refers to the old ZIR. + const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*; + const declaration = zir.getDeclaration(inst_info.inst)[0]; if (declaration.flags.is_export) { const unit = AnalUnit.wrap(.{ .cau = cau }); - if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer); + if (!result.contains(unit)) { + log.debug("type '{}': ref cau %{}", .{ + Type.fromInterned(ty).containerTypeName(ip).fmt(ip), + @intFromEnum(inst_info.inst), + }); + try unit_queue.put(gpa, unit, referencer); + } } } // Incremental compilation does not support `usingnamespace`. @@ -3199,15 +3271,23 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const unit = kv.key; try result.putNoClobber(gpa, unit, kv.value); + log.debug("handle unit '{}'", .{fmtAnalUnit(unit, zcu)}); + if (zcu.reference_table.get(unit)) |first_ref_idx| { assert(first_ref_idx != std.math.maxInt(u32)); var ref_idx = first_ref_idx; while (ref_idx != std.math.maxInt(u32)) { const ref = zcu.all_references.items[ref_idx]; - if (!result.contains(ref.referenced)) try unit_queue.put(gpa, ref.referenced, .{ - .referencer = unit, - .src = ref.src, - }); + if (!result.contains(ref.referenced)) { + log.debug("unit '{}': ref unit '{}'", .{ + fmtAnalUnit(unit, zcu), + fmtAnalUnit(ref.referenced, zcu), + }); + try unit_queue.put(gpa, ref.referenced, .{ + .referencer = unit, + .src = ref.src, + }); + } ref_idx = ref.next; } } @@ -3216,10 +3296,16 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve var ref_idx = first_ref_idx; while (ref_idx != std.math.maxInt(u32)) { const ref = zcu.all_type_references.items[ref_idx]; - if (!checked_types.contains(ref.referenced)) try type_queue.put(gpa, ref.referenced, .{ - .referencer = unit, - .src = ref.src, - }); + if (!checked_types.contains(ref.referenced)) { + log.debug("unit '{}': ref type '{}'", .{ + fmtAnalUnit(unit, zcu), + Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip), + }); + try type_queue.put(gpa, ref.referenced, .{ + .referencer = unit, + .src = ref.src, + }); + } ref_idx = ref.next; } } @@ -3293,3 +3379,72 @@ pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File { const file_index = ip.getCau(cau).zir_index.resolveFile(ip); return zcu.fileByIndex(file_index); } + +fn fmtAnalUnit(unit: AnalUnit, zcu: *Zcu) std.fmt.Formatter(formatAnalUnit) { + return .{ .data = .{ .unit = unit, .zcu = zcu } }; +} +fn fmtDependee(d: InternPool.Dependee, zcu: *Zcu) std.fmt.Formatter(formatDependee) { + return .{ .data = .{ .dependee = d, .zcu = zcu } }; +} + +fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = .{ fmt, options }; + const zcu = data.zcu; + const ip = &zcu.intern_pool; + switch (data.unit.unwrap()) { + .cau => |cau_index| { + const cau = ip.getCau(cau_index); + switch (cau.owner.unwrap()) { + .nav => |nav| return writer.print("cau(decl='{}')", .{ip.getNav(nav).fqn.fmt(ip)}), + .type => |ty| return writer.print("cau(ty='{}')", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)}), + .none => if (cau.zir_index.resolveFull(ip)) |resolved| { + const file_path = zcu.fileByIndex(resolved.file).sub_file_path; + return writer.print("cau(inst=('{s}', %{}))", .{ file_path, @intFromEnum(resolved.inst) }); + } else { + return writer.writeAll("cau(inst=)"); + }, + } + }, + .func => |func| { + const nav = zcu.funcInfo(func).owner_nav; + return writer.print("func('{}')", .{ip.getNav(nav).fqn.fmt(ip)}); + }, + } +} +fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = .{ fmt, options }; + const zcu = data.zcu; + const ip = &zcu.intern_pool; + switch (data.dependee) { + .src_hash => |ti| { + const info = ti.resolveFull(ip) orelse { + return writer.writeAll("inst()"); + }; + const file_path = zcu.fileByIndex(info.file).sub_file_path; + return writer.print("inst('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) }); + }, + .nav_val => |nav| { + const fqn = ip.getNav(nav).fqn; + return writer.print("nav('{}')", .{fqn.fmt(ip)}); + }, + .interned => |ip_index| switch (ip.indexToKey(ip_index)) { + .struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}), + .func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}), + else => unreachable, + }, + .namespace => |ti| { + const info = ti.resolveFull(ip) orelse { + return writer.writeAll("namespace()"); + }; + const file_path = zcu.fileByIndex(info.file).sub_file_path; + return writer.print("namespace('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) }); + }, + .namespace_name => |k| { + const info = k.namespace.resolveFull(ip) orelse { + return writer.print("namespace(, '{}')", .{k.name.fmt(ip)}); + }; + const file_path = zcu.fileByIndex(info.file).sub_file_path; + return writer.print("namespace('{s}', %{d}, '{}')", .{ file_path, @intFromEnum(info.inst), k.name.fmt(ip) }); + }, + } +} diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 37a3aced09..3c22abb4b8 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -360,7 +360,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { // Tracking failed for this instruction. Invalidate associated `src_hash` deps. log.debug("tracking failed for %{d}", .{old_inst}); tracked_inst.inst = .lost; - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index }); continue; }; tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst); @@ -383,7 +383,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { }); } // The source hash associated with this instruction changed - invalidate relevant dependencies. - try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); + try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index }); } // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. @@ -435,7 +435,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { if (!old_names.swapRemove(name_ip)) continue; // Name added any_change = true; - try zcu.markDependeeOutdated(.{ .namespace_name = .{ + try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{ .namespace = tracked_inst_index, .name = name_ip, } }); @@ -444,14 +444,14 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { // The only elements remaining in `old_names` now are any names which were removed. for (old_names.keys()) |name_ip| { any_change = true; - try zcu.markDependeeOutdated(.{ .namespace_name = .{ + try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{ .namespace = tracked_inst_index, .name = name_ip, } }); } if (any_change) { - try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); + try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace = tracked_inst_index }); } } } @@ -508,7 +508,7 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); + //log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); assert(!zcu.analysis_in_progress.contains(anal_unit)); @@ -527,8 +527,91 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu if (cau_outdated) { _ = zcu.outdated_ready.swapRemove(anal_unit); + } else { + // We can trust the current information about this `Cau`. + if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + return error.AnalysisFail; + } + // If it wasn't failed and wasn't marked outdated, then either... + // * it is a type and is up-to-date, or + // * it is a `comptime` decl and is up-to-date, or + // * it is another decl and is EITHER up-to-date OR never-referenced (so unresolved) + // We just need to check for that last case. + switch (cau.owner.unwrap()) { + .type, .none => return, + .nav => |nav| if (ip.getNav(nav).status == .resolved) return, + } + } + + const sema_result: SemaCauResult, const analysis_fail = if (pt.ensureCauAnalyzedInner(cau_index, cau_outdated)) |result| + .{ result, false } + else |err| switch (err) { + error.AnalysisFail => res: { + if (!zcu.failed_analysis.contains(anal_unit)) { + // If this `Cau` caused the error, it would have an entry in `failed_analysis`. + // Since it does not, this must be a transitive failure. + try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); + } + // We treat errors as up-to-date, since those uses would just trigger a transitive error + break :res .{ .{ + .invalidate_decl_val = false, + .invalidate_decl_ref = false, + }, true }; + }, + error.OutOfMemory => res: { + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + try zcu.retryable_failures.ensureUnusedCapacity(gpa, 1); + const msg = try Zcu.ErrorMsg.create( + gpa, + .{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) }, + "unable to analyze: OutOfMemory", + .{}, + ); + zcu.retryable_failures.appendAssumeCapacity(anal_unit); + zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, msg); + // We treat errors as up-to-date, since those uses would just trigger a transitive error + break :res .{ .{ + .invalidate_decl_val = false, + .invalidate_decl_ref = false, + }, true }; + }, + }; + + if (cau_outdated) { + // TODO: we do not yet have separate dependencies for decl values vs types. + const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref; + const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) { + .none => return, // there are no dependencies on a `comptime` decl! + .nav => |nav_index| .{ .nav_val = nav_index }, + .type => |ty| .{ .interned = ty }, + }; + + if (invalidate) { + // This dependency was marked as PO, meaning dependees were waiting + // on its analysis result, and it has turned out to be outdated. + // Update dependees accordingly. + try zcu.markDependeeOutdated(.marked_po, dependee); + } else { + // This dependency was previously PO, but turned out to be up-to-date. + // We do not need to queue successive analysis. + try zcu.markPoDependeeUpToDate(dependee); + } } + if (analysis_fail) return error.AnalysisFail; +} + +fn ensureCauAnalyzedInner( + pt: Zcu.PerThread, + cau_index: InternPool.Cau.Index, + cau_outdated: bool, +) Zcu.SemaError!SemaCauResult { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + + const cau = ip.getCau(cau_index); + const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; // TODO: document this elsewhere mlugg! @@ -550,22 +633,6 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu return error.AnalysisFail; } - if (!cau_outdated) { - // We can trust the current information about this `Cau`. - if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { - return error.AnalysisFail; - } - // If it wasn't failed and wasn't marked outdated, then either... - // * it is a type and is up-to-date, or - // * it is a `comptime` decl and is up-to-date, or - // * it is another decl and is EITHER up-to-date OR never-referenced (so unresolved) - // We just need to check for that last case. - switch (cau.owner.unwrap()) { - .type, .none => return, - .nav => |nav| if (ip.getNav(nav).status == .resolved) return, - } - } - // `cau_outdated` can be true in the initial update for `comptime` declarations, // so this isn't a `dev.check`. if (cau_outdated and dev.env.supports(.incremental)) { @@ -573,76 +640,34 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // prior to re-analysis. zcu.deleteUnitExports(anal_unit); zcu.deleteUnitReferences(anal_unit); - } - - const sema_result: SemaCauResult = res: { - if (inst_info.inst == .main_struct_inst) { - // Note that this is definitely a *recreation* due to outdated, because - // this instruction indicates that `cau.owner` is a `type`, which only - // reaches here if `cau_outdated`. - try pt.recreateFileRoot(inst_info.file); - break :res .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; + if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| { + kv.value.destroy(zcu.gpa); } + _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); + } - const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) { - .nav => |nav| ip.getNav(nav).fqn.toSlice(ip), - .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), - .none => "comptime", - }, 0); - defer decl_prog_node.end(); - - break :res pt.semaCau(cau_index) catch |err| switch (err) { - error.AnalysisFail => { - if (!zcu.failed_analysis.contains(anal_unit)) { - // If this `Cau` caused the error, it would have an entry in `failed_analysis`. - // Since it does not, this must be a transitive failure. - try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); - } - return error.AnalysisFail; - }, - error.GenericPoison => unreachable, - error.ComptimeBreak => unreachable, - error.ComptimeReturn => unreachable, - error.OutOfMemory => { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - try zcu.retryable_failures.append(gpa, anal_unit); - zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, try Zcu.ErrorMsg.create( - gpa, - .{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) }, - "unable to analyze: OutOfMemory", - .{}, - )); - return error.AnalysisFail; - }, + if (inst_info.inst == .main_struct_inst) { + // Note that this is definitely a *recreation* due to outdated, because + // this instruction indicates that `cau.owner` is a `type`, which only + // reaches here if `cau_outdated`. + try pt.recreateFileRoot(inst_info.file); + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, }; - }; - - if (!cau_outdated) { - // We definitely don't need to do any dependency tracking, so our work is done. - return; } - // TODO: we do not yet have separate dependencies for decl values vs types. - const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref; - const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) { - .none => return, // there are no dependencies on a `comptime` decl! - .nav => |nav_index| .{ .nav_val = nav_index }, - .type => |ty| .{ .interned = ty }, - }; + const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) { + .nav => |nav| ip.getNav(nav).fqn.toSlice(ip), + .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), + .none => "comptime", + }, 0); + defer decl_prog_node.end(); - if (invalidate) { - // This dependency was marked as PO, meaning dependees were waiting - // on its analysis result, and it has turned out to be outdated. - // Update dependees accordingly. - try zcu.markDependeeOutdated(dependee); - } else { - // This dependency was previously PO, but turned out to be up-to-date. - // We do not need to queue successive analysis. - try zcu.markPoDependeeUpToDate(dependee); - } + return pt.semaCau(cau_index) catch |err| switch (err) { + error.GenericPoison, error.ComptimeBreak, error.ComptimeReturn => unreachable, + error.AnalysisFail, error.OutOfMemory => |e| return e, + }; } pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: InternPool.Index) Zcu.SemaError!void { @@ -660,7 +685,64 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter const func = zcu.funcInfo(maybe_coerced_func_index); - log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); + //log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); + + const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const func_outdated = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + + if (func_outdated) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + } else { + // We can trust the current information about this function. + if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + return error.AnalysisFail; + } + switch (func.analysisUnordered(ip).state) { + .unreferenced => {}, // this is the first reference + .queued => {}, // we're waiting on first-time analysis + .analyzed => return, // up-to-date + } + } + + const ies_outdated, const analysis_fail = if (pt.ensureFuncBodyAnalyzedInner(func_index, func_outdated)) |result| + .{ result.ies_outdated, false } + else |err| switch (err) { + error.AnalysisFail => res: { + if (!zcu.failed_analysis.contains(anal_unit)) { + // If this function caused the error, it would have an entry in `failed_analysis`. + // Since it does not, this must be a transitive failure. + try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); + } + break :res .{ false, true }; // we treat errors as up-to-date IES, since those uses would just trigger a transitive error + }, + error.OutOfMemory => return error.OutOfMemory, // TODO: graceful handling like `ensureCauAnalyzed` + }; + + if (func_outdated) { + if (ies_outdated) { + log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); + } else { + log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markPoDependeeUpToDate(.{ .interned = func_index }); + } + } + + if (analysis_fail) return error.AnalysisFail; +} + +fn ensureFuncBodyAnalyzedInner( + pt: Zcu.PerThread, + func_index: InternPool.Index, + func_outdated: bool, +) Zcu.SemaError!struct { ies_outdated: bool } { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const func = zcu.funcInfo(func_index); + const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); // Here's an interesting question: is this function actually valid? // Maybe the signature changed, so we'll end up creating a whole different `func` @@ -681,7 +763,9 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter }); if (ip.isRemoved(func_index) or (func.generic_owner != .none and ip.isRemoved(func.generic_owner))) { - try zcu.markDependeeOutdated(.{ .interned = func_index }); // IES + if (func_outdated) { + try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); // IES + } ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); ip.remove(pt.tid, func_index); @panic("TODO: remove orphaned function from binary"); @@ -694,15 +778,14 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter else .none; - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); - const func_outdated = zcu.outdated.swapRemove(anal_unit) or - zcu.potentially_outdated.swapRemove(anal_unit); - if (func_outdated) { - _ = zcu.outdated_ready.swapRemove(anal_unit); dev.check(.incremental); zcu.deleteUnitExports(anal_unit); zcu.deleteUnitReferences(anal_unit); + if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| { + kv.value.destroy(gpa); + } + _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); } if (!func_outdated) { @@ -713,7 +796,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter switch (func.analysisUnordered(ip).state) { .unreferenced => {}, // this is the first reference .queued => {}, // we're waiting on first-time analysis - .analyzed => return, // up-to-date + .analyzed => return .{ .ies_outdated = false }, // up-to-date } } @@ -722,28 +805,11 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter if (func_outdated) "outdated" else "never analyzed", }); - var air = pt.analyzeFnBody(func_index) catch |err| switch (err) { - error.AnalysisFail => { - if (!zcu.failed_analysis.contains(anal_unit)) { - // If this function caused the error, it would have an entry in `failed_analysis`. - // Since it does not, this must be a transitive failure. - try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); - } - return error.AnalysisFail; - }, - error.OutOfMemory => return error.OutOfMemory, - }; + var air = try pt.analyzeFnBody(func_index); errdefer air.deinit(gpa); - if (func_outdated) { - if (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies) { - log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markDependeeOutdated(.{ .interned = func_index }); - } else { - log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markPoDependeeUpToDate(.{ .interned = func_index }); - } - } + const ies_outdated = func_outdated and + (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies); const comp = zcu.comp; @@ -752,13 +818,15 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { air.deinit(gpa); - return; + return .{ .ies_outdated = ies_outdated }; } try comp.queueJob(.{ .codegen_func = .{ .func = func_index, .air = air, } }); + + return .{ .ies_outdated = ies_outdated }; } /// Takes ownership of `air`, even on error. @@ -1935,6 +2003,8 @@ const ScanDeclIter = struct { .@"comptime" => cau: { const cau = existing_cau orelse try ip.createComptimeCau(gpa, pt.tid, tracked_inst, namespace_index); + try namespace.other_decls.append(gpa, cau); + // For a `comptime` declaration, whether to re-analyze is based solely on whether the // `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already. const unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); -- cgit v1.2.3 From 93f2d9a77f659a344fc0c003ce149885fc7df99a Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 04:02:31 +0100 Subject: Zcu: typo We were accidentally over-reporting most `namespace_name` deps and *not* reporting some actually outdated ones! --- src/Zcu/PerThread.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/Zcu/PerThread.zig') diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 3c22abb4b8..5f1856f6eb 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -432,7 +432,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { new_zir.nullTerminatedString(name_zir), .no_embedded_nulls, ); - if (!old_names.swapRemove(name_ip)) continue; + if (old_names.swapRemove(name_ip)) continue; // Name added any_change = true; try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{ -- cgit v1.2.3 From 8f8fe892761c9c5c9f7b89d8c53ac287d02b1474 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Aug 2024 04:06:11 +0100 Subject: Zcu: panic on usingnamespace with -fincremental --- src/Zcu/PerThread.zig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src/Zcu/PerThread.zig') diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 5f1856f6eb..2720edd2f2 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2040,6 +2040,9 @@ const ScanDeclIter = struct { const want_analysis = switch (kind) { .@"comptime" => unreachable, .@"usingnamespace" => a: { + if (comp.incremental) { + @panic("'usingnamespace' is not supported by incremental compilation"); + } if (declaration.flags.is_pub) { try namespace.pub_usingnamespace.append(gpa, nav); } else { -- cgit v1.2.3 From 46388d338a93a35d139866411f80115a03b30a6a Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 14 Aug 2024 08:10:49 +0100 Subject: InternPool: don't remove outdated types When a type becomes outdated, there will still be lingering references to the old index -- for instance, any declaration whose value was that type holds a reference to that index. These references may live for an arbitrarily long time in some cases. So, we can't just remove the type from the pool -- the old `Index` must remain valid! Instead, we want to preserve the old `Index`, but avoid it from ever appearing in lookups. (It's okay if analysis of something referencing the old `Index` does weird stuff -- such analysis are guaranteed by the incremental compilation model to always be unreferenced.) So, we use the new `InternPool.putKeyReplace` to replace the shard entry for this index with the newly-created index. --- src/InternPool.zig | 71 ++++++++++++++++++++++++++++++++++++++++++++++----- src/Sema.zig | 33 ++++++++++++------------ src/Zcu/PerThread.zig | 15 +++++------ 3 files changed, 87 insertions(+), 32 deletions(-) (limited to 'src/Zcu/PerThread.zig') diff --git a/src/InternPool.zig b/src/InternPool.zig index 91a58e10e7..7c1b37d3d4 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -7077,6 +7077,7 @@ fn getOrPutKeyEnsuringAdditionalCapacity( const index = entry.acquire(); if (index == .none) break; if (entry.hash != hash) continue; + if (ip.isRemoved(index)) continue; if (ip.indexToKey(index).eql(key, ip)) return .{ .existing = index }; } shard.mutate.map.mutex.lock(); @@ -7151,6 +7152,43 @@ fn getOrPutKeyEnsuringAdditionalCapacity( .map_index = map_index, } }; } +/// Like `getOrPutKey`, but asserts that the key already exists, and prepares to replace +/// its shard entry with a new `Index` anyway. After finalizing this, the old index remains +/// valid (in that `indexToKey` and similar queries will behave as before), but it will +/// never be returned from a lookup (`getOrPutKey` etc). +/// This is used by incremental compilation when an existing container type is outdated. In +/// this case, the type must be recreated at a new `InternPool.Index`, but the old index must +/// remain valid since now-unreferenced `AnalUnit`s may retain references to it. The old index +/// will be cleaned up when the `Zcu` undergoes garbage collection. +fn putKeyReplace( + ip: *InternPool, + tid: Zcu.PerThread.Id, + key: Key, +) GetOrPutKey { + const full_hash = key.hash64(ip); + const hash: u32 = @truncate(full_hash >> 32); + const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; + shard.mutate.map.mutex.lock(); + errdefer shard.mutate.map.mutex.unlock(); + const map = shard.shared.map; + const map_mask = map.header().mask(); + var map_index = hash; + while (true) : (map_index += 1) { + map_index &= map_mask; + const entry = &map.entries[map_index]; + const index = entry.value; + assert(index != .none); // key not present + if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) { + break; // we found the entry to replace + } + } + return .{ .new = .{ + .ip = ip, + .tid = tid, + .shard = shard, + .map_index = map_index, + } }; +} pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index { var gop = try ip.getOrPutKey(gpa, tid, key); @@ -7990,8 +8028,11 @@ pub fn getUnionType( gpa: Allocator, tid: Zcu.PerThread.Id, ini: UnionTypeInit, + /// If it is known that there is an existing type with this key which is outdated, + /// this is passed as `true`, and the type is replaced with one at a fresh index. + replace_existing: bool, ) Allocator.Error!WipNamespaceType.Result { - var gop = try ip.getOrPutKey(gpa, tid, .{ .union_type = switch (ini.key) { + const key: Key = .{ .union_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -8000,7 +8041,11 @@ pub fn getUnionType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }); + } }; + var gop = if (replace_existing) + ip.putKeyReplace(tid, key) + else + try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -8166,8 +8211,11 @@ pub fn getStructType( gpa: Allocator, tid: Zcu.PerThread.Id, ini: StructTypeInit, + /// If it is known that there is an existing type with this key which is outdated, + /// this is passed as `true`, and the type is replaced with one at a fresh index. + replace_existing: bool, ) Allocator.Error!WipNamespaceType.Result { - var gop = try ip.getOrPutKey(gpa, tid, .{ .struct_type = switch (ini.key) { + const key: Key = .{ .struct_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -8176,7 +8224,11 @@ pub fn getStructType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }); + } }; + var gop = if (replace_existing) + ip.putKeyReplace(tid, key) + else + try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -9200,8 +9252,11 @@ pub fn getEnumType( gpa: Allocator, tid: Zcu.PerThread.Id, ini: EnumTypeInit, + /// If it is known that there is an existing type with this key which is outdated, + /// this is passed as `true`, and the type is replaced with one at a fresh index. + replace_existing: bool, ) Allocator.Error!WipEnumType.Result { - var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = switch (ini.key) { + const key: Key = .{ .enum_type = switch (ini.key) { .declared => |d| .{ .declared = .{ .zir_index = d.zir_index, .captures = .{ .external = d.captures }, @@ -9210,7 +9265,11 @@ pub fn getEnumType( .zir_index = r.zir_index, .type_hash = r.type_hash, } }, - } }); + } }; + var gop = if (replace_existing) + ip.putKeyReplace(tid, key) + else + try ip.getOrPutKey(gpa, tid, key); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; diff --git a/src/Sema.zig b/src/Sema.zig index 1aedd745ea..d760927c4d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2724,9 +2724,9 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { } /// Given a type just looked up in the `InternPool`, check whether it is -/// considered outdated on this update. If so, remove it from the pool -/// and return `true`. -fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { +/// considered outdated on this update. If so, returns `true`, and the +/// caller must replace the outdated type with a fresh one. +fn checkOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -2745,7 +2745,6 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(cau_unit); zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit); - zcu.intern_pool.remove(pt.tid, ty); try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); return true; } @@ -2815,14 +2814,14 @@ fn zirStructDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) { + if (!try sema.checkOutdatedType(ty)) { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } - break :wip (try ip.getStructType(gpa, pt.tid, struct_init)).wip; + break :wip (try ip.getStructType(gpa, pt.tid, struct_init, true)).wip; }, .wip => |wip| wip, }); @@ -3041,14 +3040,14 @@ fn zirEnumDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) { + if (!try sema.checkOutdatedType(ty)) { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } - break :wip (try ip.getEnumType(gpa, pt.tid, enum_init)).wip; + break :wip (try ip.getEnumType(gpa, pt.tid, enum_init, true)).wip; }, .wip => |wip| wip, }); @@ -3311,14 +3310,14 @@ fn zirUnionDecl( .captures = captures, } }, }; - const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init)) { + const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) { .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) { + if (!try sema.checkOutdatedType(ty)) { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } - break :wip (try ip.getUnionType(gpa, pt.tid, union_init)).wip; + break :wip (try ip.getUnionType(gpa, pt.tid, union_init, true)).wip; }, .wip => |wip| wip, }); @@ -3407,7 +3406,7 @@ fn zirOpaqueDecl( }; // No `wrapWipTy` needed as no std.builtin types are opaque. const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { - // No `maybeRemoveOutdatedType` as opaque types are never outdated. + // No `checkOutdatedType` as opaque types are never outdated. .existing => |ty| { try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); @@ -22054,7 +22053,7 @@ fn reifyEnum( .zir_index = tracked_inst, .type_hash = hasher.final(), } }, - })) { + }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); @@ -22224,7 +22223,7 @@ fn reifyUnion( .zir_index = tracked_inst, .type_hash = hasher.final(), } }, - })) { + }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); @@ -22494,7 +22493,7 @@ fn reifyStruct( .zir_index = tracked_inst, .type_hash = hasher.final(), } }, - })) { + }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 2720edd2f2..83a7dce4fc 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -925,6 +925,7 @@ fn createFileRootStruct( pt: Zcu.PerThread, file_index: Zcu.File.Index, namespace_index: Zcu.Namespace.Index, + replace_existing: bool, ) Allocator.Error!InternPool.Index { const zcu = pt.zcu; const gpa = zcu.gpa; @@ -968,7 +969,7 @@ fn createFileRootStruct( .zir_index = tracked_inst, .captures = &.{}, } }, - })) { + }, replace_existing)) { .existing => unreachable, // we wouldn't be analysing the file root if this type existed .wip => |wip| wip, }; @@ -1023,8 +1024,7 @@ fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError zcu.gpa, InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), ); - ip.remove(pt.tid, file_root_type); - _ = try pt.createFileRootStruct(file_index, namespace_index); + _ = try pt.createFileRootStruct(file_index, namespace_index, true); } /// Re-scan the namespace of a file's root struct type on an incremental update. @@ -1062,8 +1062,6 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator. try pt.scanNamespace(namespace_index, decls); } -/// Regardless of the file status, will create a `Decl` if none exists so that we can track -/// dependencies and re-analyze when the file becomes outdated. fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1083,7 +1081,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { .owner_type = undefined, // set in `createFileRootStruct` .file_scope = file_index, }); - const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index); + const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false); errdefer zcu.intern_pool.remove(pt.tid, struct_ty); switch (zcu.comp.cache_use) { @@ -1153,11 +1151,10 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { // This declaration has no value so is definitely not a std.builtin type. break :ip_index .none; }, - .type => |ty| { + .type => { // This is an incremental update, and this type is being re-analyzed because it is outdated. // The type must be recreated at a new `InternPool.Index`. - // Remove it from the InternPool and mark it outdated so that creation sites are re-analyzed. - ip.remove(pt.tid, ty); + // Mark it outdated so that creation sites are re-analyzed. return .{ .invalidate_decl_val = true, .invalidate_decl_ref = true, -- cgit v1.2.3 From 84c2ebd6c6b16752d8d030d5904d0a525283cbf5 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 16 Aug 2024 12:46:52 +0100 Subject: frontend: incremental compilation progress Another big commit, sorry! This commit makes all fixes necessary for incremental updates of the compiler itself (specifically, adding a breakpoint to `zirCompileLog`) to succeed, at least on the frontend. The biggest change here is a reform to how types are handled. It works like this: * When a type is first created in `zirStructDecl` etc, its namespace is scanned. If the type requires resolution, an `interned` dependency is declared for the containing `AnalUnit`. * `zirThis` also declared an `interned` dependency for its `AnalUnit` on the namespace's owner type. * If the type's namespace changes, the surrounding source declaration changes hash, so `zirStructDecl` etc will be hit again. We check whether the namespace has been scanned this generation, and re-scan it if not. * Namespace lookups also check whether the namespace in question requires a re-scan based on the generation. This is because there's no guarantee that the `zirStructDecl` is re-analyzed before the namespace lookup is re-analyzed. * If a type's structure (essentially its fields) change, then the type's `Cau` is considered outdated. When the type is re-analyzed due to being outdated, or the `zirStructDecl` is re-analyzed by being transitively outdated, or a corresponding `zirThis` is re-analyzed by being transitively outdated, the struct type is recreated at a new `InternPool` index. The namespace's owner is updated (but not re-scanned, since that is handled by the mechanisms above), and the old type, while remaining a valid `Index`, is removed from the map metadata so it will never be found by lookups. `zirStructDecl` and `zirThis` store an `interned` dependency on the *new* type. --- src/Compilation.zig | 2 + src/InternPool.zig | 113 +++++++-- src/Sema.zig | 452 +++++++++++++++++++--------------- src/Zcu.zig | 61 ++--- src/Zcu/PerThread.zig | 654 +++++++++++++++++++++++++++++++++++++++++++------- 5 files changed, 949 insertions(+), 333 deletions(-) (limited to 'src/Zcu/PerThread.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index af98fc6f6e..9c66d17507 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3569,6 +3569,8 @@ pub fn performAllTheWork( mod.sema_prog_node = std.Progress.Node.none; mod.codegen_prog_node.end(); mod.codegen_prog_node = std.Progress.Node.none; + + mod.generation += 1; }; try comp.performAllTheWorkInner(main_progress_node); if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error; diff --git a/src/InternPool.zig b/src/InternPool.zig index 7c1b37d3d4..8259f94812 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -684,10 +684,6 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI .ip = ip, .next_entry = .none, }; - if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{ - .ip = ip, - .next_entry = .none, - }; return .{ .ip = ip, .next_entry = first_entry.toOptional(), @@ -724,7 +720,6 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) { // Dummy entry, so we can reuse it rather than allocating a new one! - ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none; break :new_index gop.value_ptr.*; } @@ -732,7 +727,12 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: { break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] }; } else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() }; - ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none; + if (gop.found_existing) { + ptr.next = gop.value_ptr.*.toOptional(); + ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].prev = new_index.toOptional(); + } else { + ptr.next = .none; + } gop.value_ptr.* = new_index; break :new_index new_index; }, @@ -754,10 +754,9 @@ pub const NamespaceNameKey = struct { }; pub const DepEntry = extern struct { - /// If null, this is a dummy entry - all other fields are `undefined`. It is - /// the first and only entry in one of `intern_pool.*_deps`, and does not - /// appear in any list by `first_dependency`, but is not in - /// `free_dep_entries` since `*_deps` stores a reference to it. + /// If null, this is a dummy entry. `next_dependee` is undefined. This is the first + /// entry in one of `*_deps`, and does not appear in any list by `first_dependency`, + /// but is not in `free_dep_entries` since `*_deps` stores a reference to it. depender: AnalUnit.Optional, /// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee. /// Used to iterate all dependers for a given dependee during an update. @@ -2689,7 +2688,12 @@ pub const Key = union(enum) { .variable => |a_info| { const b_info = b.variable; - return a_info.owner_nav == b_info.owner_nav; + return a_info.owner_nav == b_info.owner_nav and + a_info.ty == b_info.ty and + a_info.init == b_info.init and + a_info.lib_name == b_info.lib_name and + a_info.is_threadlocal == b_info.is_threadlocal and + a_info.is_weak_linkage == b_info.is_weak_linkage; }, .@"extern" => |a_info| { const b_info = b.@"extern"; @@ -8016,6 +8020,10 @@ pub const UnionTypeInit = struct { zir_index: TrackedInst.Index, captures: []const CaptureValue, }, + declared_owned_captures: struct { + zir_index: TrackedInst.Index, + captures: CaptureValue.Slice, + }, reified: struct { zir_index: TrackedInst.Index, type_hash: u64, @@ -8037,6 +8045,10 @@ pub fn getUnionType( .zir_index = d.zir_index, .captures = .{ .external = d.captures }, } }, + .declared_owned_captures => |d| .{ .declared = .{ + .zir_index = d.zir_index, + .captures = .{ .owned = d.captures }, + } }, .reified => |r| .{ .reified = .{ .zir_index = r.zir_index, .type_hash = r.type_hash, @@ -8060,7 +8072,7 @@ pub fn getUnionType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len, + inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -8069,7 +8081,10 @@ pub fn getUnionType( const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{ .flags = .{ - .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, + .any_captures = switch (ini.key) { + inline .declared, .declared_owned_captures => |d| d.captures.len != 0, + .reified => false, + }, .runtime_tag = ini.flags.runtime_tag, .any_aligned_fields = ini.flags.any_aligned_fields, .layout = ini.flags.layout, @@ -8078,7 +8093,10 @@ pub fn getUnionType( .assumed_runtime_bits = ini.flags.assumed_runtime_bits, .assumed_pointer_aligned = ini.flags.assumed_pointer_aligned, .alignment = ini.flags.alignment, - .is_reified = ini.key == .reified, + .is_reified = switch (ini.key) { + .declared, .declared_owned_captures => false, + .reified => true, + }, }, .fields_len = ini.fields_len, .size = std.math.maxInt(u32), @@ -8102,6 +8120,10 @@ pub fn getUnionType( extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, + .declared_owned_captures => |d| if (d.captures.len != 0) { + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}); + }, .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } @@ -8199,6 +8221,10 @@ pub const StructTypeInit = struct { zir_index: TrackedInst.Index, captures: []const CaptureValue, }, + declared_owned_captures: struct { + zir_index: TrackedInst.Index, + captures: CaptureValue.Slice, + }, reified: struct { zir_index: TrackedInst.Index, type_hash: u64, @@ -8220,6 +8246,10 @@ pub fn getStructType( .zir_index = d.zir_index, .captures = .{ .external = d.captures }, } }, + .declared_owned_captures => |d| .{ .declared = .{ + .zir_index = d.zir_index, + .captures = .{ .owned = d.captures }, + } }, .reified => |r| .{ .reified = .{ .zir_index = r.zir_index, .type_hash = r.type_hash, @@ -8251,7 +8281,7 @@ pub fn getStructType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len, + inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -8267,10 +8297,16 @@ pub fn getStructType( .backing_int_ty = .none, .names_map = names_map, .flags = .{ - .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, + .any_captures = switch (ini.key) { + inline .declared, .declared_owned_captures => |d| d.captures.len != 0, + .reified => false, + }, .field_inits_wip = false, .inits_resolved = ini.inits_resolved, - .is_reified = ini.key == .reified, + .is_reified = switch (ini.key) { + .declared, .declared_owned_captures => false, + .reified => true, + }, }, }); try items.append(.{ @@ -8282,6 +8318,10 @@ pub fn getStructType( extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, + .declared_owned_captures => |d| if (d.captures.len != 0) { + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}); + }, .reified => |r| { _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, @@ -8309,7 +8349,7 @@ pub fn getStructType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len, + inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -8324,7 +8364,10 @@ pub fn getStructType( .fields_len = ini.fields_len, .size = std.math.maxInt(u32), .flags = .{ - .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0, + .any_captures = switch (ini.key) { + inline .declared, .declared_owned_captures => |d| d.captures.len != 0, + .reified => false, + }, .is_extern = is_extern, .known_non_opv = ini.known_non_opv, .requires_comptime = ini.requires_comptime, @@ -8342,7 +8385,10 @@ pub fn getStructType( .field_inits_wip = false, .inits_resolved = ini.inits_resolved, .fully_resolved = false, - .is_reified = ini.key == .reified, + .is_reified = switch (ini.key) { + .declared, .declared_owned_captures => false, + .reified => true, + }, }, }); try items.append(.{ @@ -8354,6 +8400,10 @@ pub fn getStructType( extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); }, + .declared_owned_captures => |d| if (d.captures.len != 0) { + extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); + extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}); + }, .reified => |r| { _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); }, @@ -9157,6 +9207,10 @@ pub const EnumTypeInit = struct { zir_index: TrackedInst.Index, captures: []const CaptureValue, }, + declared_owned_captures: struct { + zir_index: TrackedInst.Index, + captures: CaptureValue.Slice, + }, reified: struct { zir_index: TrackedInst.Index, type_hash: u64, @@ -9261,6 +9315,10 @@ pub fn getEnumType( .zir_index = d.zir_index, .captures = .{ .external = d.captures }, } }, + .declared_owned_captures => |d| .{ .declared = .{ + .zir_index = d.zir_index, + .captures = .{ .owned = d.captures }, + } }, .reified => |r| .{ .reified = .{ .zir_index = r.zir_index, .type_hash = r.type_hash, @@ -9288,7 +9346,7 @@ pub fn getEnumType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| d.captures.len, + inline .declared, .declared_owned_captures => |d| d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -9298,7 +9356,7 @@ pub fn getEnumType( const extra_index = addExtraAssumeCapacity(extra, EnumAuto{ .name = undefined, // set by `prepare` .captures_len = switch (ini.key) { - .declared => |d| @intCast(d.captures.len), + inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len), .reified => std.math.maxInt(u32), }, .namespace = undefined, // set by `prepare` @@ -9317,6 +9375,7 @@ pub fn getEnumType( extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish` switch (ini.key) { .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}), .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } const names_start = extra.mutate.len; @@ -9347,7 +9406,7 @@ pub fn getEnumType( // TODO: fmt bug // zig fmt: off switch (ini.key) { - .declared => |d| d.captures.len, + inline .declared, .declared_owned_captures => |d| d.captures.len, .reified => 2, // type_hash: PackedU64 } + // zig fmt: on @@ -9358,7 +9417,7 @@ pub fn getEnumType( const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{ .name = undefined, // set by `prepare` .captures_len = switch (ini.key) { - .declared => |d| @intCast(d.captures.len), + inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len), .reified => std.math.maxInt(u32), }, .namespace = undefined, // set by `prepare` @@ -9382,6 +9441,7 @@ pub fn getEnumType( extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish` switch (ini.key) { .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), + .declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}), .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), } const names_start = extra.mutate.len; @@ -9445,10 +9505,12 @@ pub fn getGeneratedTagEnumType( .tid = tid, .index = items.mutate.len, }, ip); + const parent_namespace = ip.namespacePtr(ini.parent_namespace); const namespace = try ip.createNamespace(gpa, tid, .{ .parent = ini.parent_namespace.toOptional(), .owner_type = enum_index, - .file_scope = ip.namespacePtr(ini.parent_namespace).file_scope, + .file_scope = parent_namespace.file_scope, + .generation = parent_namespace.generation, }); errdefer ip.destroyNamespace(tid, namespace); @@ -11044,6 +11106,7 @@ pub fn destroyNamespace( .parent = undefined, .file_scope = undefined, .owner_type = undefined, + .generation = undefined, }; @field(namespace, Local.namespace_next_free_field) = @enumFromInt(local.mutate.namespaces.free_list); diff --git a/src/Sema.zig b/src/Sema.zig index dab4262bdd..c4345c4464 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2723,32 +2723,6 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { return new; } -/// Given a type just looked up in the `InternPool`, check whether it is -/// considered outdated on this update. If so, returns `true`, and the -/// caller must replace the outdated type with a fresh one. -fn checkOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { - const pt = sema.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - - if (!zcu.comp.incremental) return false; - - const cau_index = switch (ip.indexToKey(ty)) { - .struct_type => ip.loadStructType(ty).cau.unwrap().?, - .union_type => ip.loadUnionType(ty).cau, - .enum_type => ip.loadEnumType(ty).cau.unwrap().?, - else => unreachable, - }; - const cau_unit = AnalUnit.wrap(.{ .cau = cau_index }); - const was_outdated = zcu.outdated.swapRemove(cau_unit) or - zcu.potentially_outdated.swapRemove(cau_unit); - if (!was_outdated) return false; - _ = zcu.outdated_ready.swapRemove(cau_unit); - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit); - try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); - return true; -} - fn zirStructDecl( sema: *Sema, block: *Block, @@ -2815,13 +2789,16 @@ fn zirStructDecl( } }, }; const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) { - .existing => |ty| wip: { - if (!try sema.checkOutdatedType(ty)) { - try sema.declareDependency(.{ .interned = ty }); - try sema.addTypeReferenceEntry(src, ty); - return Air.internedToRef(ty); - } - break :wip (try ip.getStructType(gpa, pt.tid, struct_init, true)).wip; + .existing => |ty| { + const new_ty = try pt.ensureTypeUpToDate(ty, false); + + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = new_ty }); + try sema.addTypeReferenceEntry(src, new_ty); + return Air.internedToRef(new_ty); }, .wip => |wip| wip, }); @@ -2839,6 +2816,7 @@ fn zirStructDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); @@ -2977,7 +2955,6 @@ fn zirEnumDecl( const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; - const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } }; const tag_type_ref = if (small.has_tag_type) blk: { const tag_type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); @@ -3041,13 +3018,16 @@ fn zirEnumDecl( } }, }; const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) { - .existing => |ty| wip: { - if (!try sema.checkOutdatedType(ty)) { - try sema.declareDependency(.{ .interned = ty }); - try sema.addTypeReferenceEntry(src, ty); - return Air.internedToRef(ty); - } - break :wip (try ip.getEnumType(gpa, pt.tid, enum_init, true)).wip; + .existing => |ty| { + const new_ty = try pt.ensureTypeUpToDate(ty, false); + + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = new_ty }); + try sema.addTypeReferenceEntry(src, new_ty); + return Air.internedToRef(new_ty); }, .wip => |wip| wip, }); @@ -3071,19 +3051,12 @@ fn zirEnumDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer if (!done) pt.destroyNamespace(new_namespace_index); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); - if (pt.zcu.comp.incremental) { - try mod.intern_pool.addDependency( - gpa, - AnalUnit.wrap(.{ .cau = new_cau_index }), - .{ .src_hash = try block.trackZir(inst) }, - ); - } - try pt.scanNamespace(new_namespace_index, decls); try sema.declareDependency(.{ .interned = wip_ty.index }); @@ -3094,144 +3067,22 @@ fn zirEnumDecl( wip_ty.prepare(ip, new_cau_index, new_namespace_index); done = true; - const int_tag_ty = ty: { - // We create a block for the field type instructions because they - // may need to reference Decls from inside the enum namespace. - // Within the field type, default value, and alignment expressions, the owner should be the enum's `Cau`. - - const prev_owner = sema.owner; - sema.owner = AnalUnit.wrap(.{ .cau = new_cau_index }); - defer sema.owner = prev_owner; - - const prev_func_index = sema.func_index; - sema.func_index = .none; - defer sema.func_index = prev_func_index; - - var enum_block: Block = .{ - .parent = null, - .sema = sema, - .namespace = new_namespace_index, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = tracked_inst, - .type_name_ctx = type_name, - }; - defer enum_block.instructions.deinit(sema.gpa); - - if (body.len != 0) { - _ = try sema.analyzeInlineBody(&enum_block, body, inst); - } - - if (tag_type_ref != .none) { - const ty = try sema.resolveType(&enum_block, tag_ty_src, tag_type_ref); - if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { - return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)}); - } - break :ty ty; - } else if (fields_len == 0) { - break :ty try pt.intType(.unsigned, 0); - } else { - const bits = std.math.log2_int_ceil(usize, fields_len); - break :ty try pt.intType(.unsigned, bits); - } - }; - - wip_ty.setTagTy(ip, int_tag_ty.toIntern()); - - if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) { - return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); - } - } - - var bit_bag_index: usize = body_end; - var cur_bit_bag: u32 = undefined; - var field_i: u32 = 0; - var last_tag_val: ?Value = null; - while (field_i < fields_len) : (field_i += 1) { - if (field_i % 32 == 0) { - cur_bit_bag = sema.code.extra[bit_bag_index]; - bit_bag_index += 1; - } - const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; - cur_bit_bag >>= 1; - - const field_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); - const field_name_zir = sema.code.nullTerminatedString(field_name_index); - extra_index += 2; // field name, doc comment - - const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); - - const value_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = field_i }, - }; - - const tag_overflow = if (has_tag_value) overflow: { - const tag_val_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); - extra_index += 1; - const tag_inst = try sema.resolveInst(tag_val_ref); - last_tag_val = try sema.resolveConstDefinedValue(block, .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_name = field_i }, - }, tag_inst, .{ - .needed_comptime_reason = "enum tag value must be comptime-known", - }); - if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; - last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); - if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| { - assert(conflict.kind == .value); // AstGen validated names are unique - const other_field_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = conflict.prev_field_idx }, - }; - const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)}); - errdefer msg.destroy(gpa); - try sema.errNote(other_field_src, msg, "other occurrence here", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); - } - break :overflow false; - } else if (any_values) overflow: { - var overflow: ?usize = null; - last_tag_val = if (last_tag_val) |val| - try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow) - else - try pt.intValue(int_tag_ty, 0); - if (overflow != null) break :overflow true; - if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| { - assert(conflict.kind == .value); // AstGen validated names are unique - const other_field_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = conflict.prev_field_idx }, - }; - const msg = msg: { - const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)}); - errdefer msg.destroy(gpa); - try sema.errNote(other_field_src, msg, "other occurrence here", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); - } - break :overflow false; - } else overflow: { - assert(wip_ty.nextField(&mod.intern_pool, field_name, .none) == null); - last_tag_val = try pt.intValue(Type.comptime_int, field_i); - if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; - last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); - break :overflow false; - }; - - if (tag_overflow) { - const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{ - last_tag_val.?.fmtValueSema(pt, sema), int_tag_ty.fmt(pt), - }); - return sema.failWithOwnedErrorMsg(block, msg); - } - } + try Sema.resolveDeclaredEnum( + pt, + wip_ty, + inst, + tracked_inst, + new_namespace_index, + type_name, + new_cau_index, + small, + body, + tag_type_ref, + any_values, + fields_len, + sema.code, + body_end, + ); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; @@ -3311,13 +3162,16 @@ fn zirUnionDecl( } }, }; const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) { - .existing => |ty| wip: { - if (!try sema.checkOutdatedType(ty)) { - try sema.declareDependency(.{ .interned = ty }); - try sema.addTypeReferenceEntry(src, ty); - return Air.internedToRef(ty); - } - break :wip (try ip.getUnionType(gpa, pt.tid, union_init, true)).wip; + .existing => |ty| { + const new_ty = try pt.ensureTypeUpToDate(ty, false); + + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = new_ty }); + try sema.addTypeReferenceEntry(src, new_ty); + return Air.internedToRef(new_ty); }, .wip => |wip| wip, }); @@ -3335,6 +3189,7 @@ fn zirUnionDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); @@ -3344,7 +3199,7 @@ fn zirUnionDecl( try mod.intern_pool.addDependency( gpa, AnalUnit.wrap(.{ .cau = new_cau_index }), - .{ .src_hash = try block.trackZir(inst) }, + .{ .src_hash = tracked_inst }, ); } @@ -3406,8 +3261,12 @@ fn zirOpaqueDecl( }; // No `wrapWipTy` needed as no std.builtin types are opaque. const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { - // No `checkOutdatedType` as opaque types are never outdated. .existing => |ty| { + // Make sure we update the namespace if the declaration is re-analyzed, to pick + // up on e.g. changed comptime decls. + try pt.ensureNamespaceUpToDate(Type.fromInterned(ty).getNamespaceIndex(mod)); + + try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); }, @@ -3427,6 +3286,7 @@ fn zirOpaqueDecl( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); @@ -6072,6 +5932,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); + try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } @@ -6821,6 +6682,8 @@ fn lookupInNamespace( const zcu = pt.zcu; const ip = &zcu.intern_pool; + try pt.ensureNamespaceUpToDate(namespace_index); + const namespace = zcu.namespacePtr(namespace_index); const adapter: Zcu.Namespace.NameAdapter = .{ .zcu = zcu }; @@ -14038,6 +13901,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); + try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(operand_src, ty); return Air.internedToRef(ty); } @@ -17703,7 +17567,13 @@ fn zirThis( _ = extended; const pt = sema.pt; const namespace = pt.zcu.namespacePtr(block.namespace); - return Air.internedToRef(namespace.owner_type); + const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type, false); + switch (pt.zcu.intern_pool.indexToKey(new_ty)) { + .struct_type, .union_type, .enum_type => try sema.declareDependency(.{ .interned = new_ty }), + .opaque_type => {}, + else => unreachable, + } + return Air.internedToRef(new_ty); } fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -19005,6 +18875,7 @@ fn typeInfoNamespaceDecls( const ip = &zcu.intern_pool; const namespace_index = opt_namespace_index.unwrap() orelse return; + try pt.ensureNamespaceUpToDate(namespace_index); const namespace = zcu.namespacePtr(namespace_index); const gop = try seen_namespaces.getOrPut(namespace); @@ -21871,6 +21742,7 @@ fn zirReify( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); try sema.addTypeReferenceEntry(src, wip_ty.index); @@ -22080,6 +21952,7 @@ fn reifyEnum( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); @@ -22384,6 +22257,7 @@ fn reifyUnion( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); @@ -22667,6 +22541,7 @@ fn reifyStruct( .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), + .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); @@ -35373,7 +35248,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { if (struct_type.haveLayout(ip)) return; - try ty.resolveFields(pt); + try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); if (struct_type.layout == .@"packed") { semaBackingIntType(pt, struct_type) catch |err| switch (err) { @@ -38499,6 +38374,187 @@ fn getOwnerFuncDeclInst(sema: *Sema) InternPool.TrackedInst.Index { return ip.getCau(cau).zir_index; } +/// Called as soon as a `declared` enum type is created. +/// Resolves the tag type and field inits. +/// Marks the `src_inst` dependency on the enum's declaration, so call sites need not do this. +pub fn resolveDeclaredEnum( + pt: Zcu.PerThread, + wip_ty: InternPool.WipEnumType, + inst: Zir.Inst.Index, + tracked_inst: InternPool.TrackedInst.Index, + namespace: InternPool.NamespaceIndex, + type_name: InternPool.NullTerminatedString, + enum_cau: InternPool.Cau.Index, + small: Zir.Inst.EnumDecl.Small, + body: []const Zir.Inst.Index, + tag_type_ref: Zir.Inst.Ref, + any_values: bool, + fields_len: u32, + zir: Zir, + body_end: usize, +) Zcu.CompileError!void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; + + const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; + const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } }; + + const anal_unit = AnalUnit.wrap(.{ .cau = enum_cau }); + + var arena = std.heap.ArenaAllocator.init(gpa); + defer arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .pt = pt, + .gpa = gpa, + .arena = arena.allocator(), + .code = zir, + .owner = anal_unit, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + try sema.declareDependency(.{ .src_hash = tracked_inst }); + + var block: Block = .{ + .parent = null, + .sema = &sema, + .namespace = namespace, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + .src_base_inst = tracked_inst, + .type_name_ctx = type_name, + }; + defer block.instructions.deinit(gpa); + + const int_tag_ty = ty: { + if (body.len != 0) { + _ = try sema.analyzeInlineBody(&block, body, inst); + } + + if (tag_type_ref != .none) { + const ty = try sema.resolveType(&block, tag_ty_src, tag_type_ref); + if (ty.zigTypeTag(zcu) != .Int and ty.zigTypeTag(zcu) != .ComptimeInt) { + return sema.fail(&block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)}); + } + break :ty ty; + } else if (fields_len == 0) { + break :ty try pt.intType(.unsigned, 0); + } else { + const bits = std.math.log2_int_ceil(usize, fields_len); + break :ty try pt.intType(.unsigned, bits); + } + }; + + wip_ty.setTagTy(ip, int_tag_ty.toIntern()); + + if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) { + return sema.fail(&block, src, "non-exhaustive enum specifies every value", .{}); + } + } + + var extra_index = body_end + bit_bags_count; + var bit_bag_index: usize = body_end; + var cur_bit_bag: u32 = undefined; + var last_tag_val: ?Value = null; + for (0..fields_len) |field_i_usize| { + const field_i: u32 = @intCast(field_i_usize); + if (field_i % 32 == 0) { + cur_bit_bag = zir.extra[bit_bag_index]; + bit_bag_index += 1; + } + const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; + cur_bit_bag >>= 1; + + const field_name_index: Zir.NullTerminatedString = @enumFromInt(zir.extra[extra_index]); + const field_name_zir = zir.nullTerminatedString(field_name_index); + extra_index += 2; // field name, doc comment + + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); + + const value_src: LazySrcLoc = .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_value = field_i }, + }; + + const tag_overflow = if (has_tag_value) overflow: { + const tag_val_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); + extra_index += 1; + const tag_inst = try sema.resolveInst(tag_val_ref); + last_tag_val = try sema.resolveConstDefinedValue(&block, .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_name = field_i }, + }, tag_inst, .{ + .needed_comptime_reason = "enum tag value must be comptime-known", + }); + if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; + last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); + if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| { + assert(conflict.kind == .value); // AstGen validated names are unique + const other_field_src: LazySrcLoc = .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_value = conflict.prev_field_idx }, + }; + const msg = msg: { + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)}); + errdefer msg.destroy(gpa); + try sema.errNote(other_field_src, msg, "other occurrence here", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(&block, msg); + } + break :overflow false; + } else if (any_values) overflow: { + var overflow: ?usize = null; + last_tag_val = if (last_tag_val) |val| + try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow) + else + try pt.intValue(int_tag_ty, 0); + if (overflow != null) break :overflow true; + if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| { + assert(conflict.kind == .value); // AstGen validated names are unique + const other_field_src: LazySrcLoc = .{ + .base_node_inst = tracked_inst, + .offset = .{ .container_field_value = conflict.prev_field_idx }, + }; + const msg = msg: { + const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)}); + errdefer msg.destroy(gpa); + try sema.errNote(other_field_src, msg, "other occurrence here", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(&block, msg); + } + break :overflow false; + } else overflow: { + assert(wip_ty.nextField(ip, field_name, .none) == null); + last_tag_val = try pt.intValue(Type.comptime_int, field_i); + if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; + last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); + break :overflow false; + }; + + if (tag_overflow) { + const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{ + last_tag_val.?.fmtValueSema(pt, &sema), int_tag_ty.fmt(pt), + }); + return sema.failWithOwnedErrorMsg(&block, msg); + } + } +} + pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice; diff --git a/src/Zcu.zig b/src/Zcu.zig index 8626a147b6..63feb2d00c 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -215,6 +215,8 @@ panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId panic_func_index: InternPool.Index = .none, null_stack_trace: InternPool.Index = .none, +generation: u32 = 0, + pub const PerThread = @import("Zcu/PerThread.zig"); pub const PanicId = enum { @@ -332,6 +334,7 @@ pub const TypeReference = struct { pub const Namespace = struct { parent: OptionalIndex, file_scope: File.Index, + generation: u32, /// Will be a struct, enum, union, or opaque. owner_type: InternPool.Index, /// Members of the namespace which are marked `pub`. @@ -2295,7 +2298,7 @@ pub fn markDependeeOutdated( marked_po: enum { not_marked_po, marked_po }, dependee: InternPool.Dependee, ) !void { - log.debug("outdated dependee: {}", .{fmtDependee(dependee, zcu)}); + log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { if (zcu.outdated.getPtr(depender)) |po_dep_count| { @@ -2303,9 +2306,9 @@ pub fn markDependeeOutdated( .not_marked_po => {}, .marked_po => { po_dep_count.* -= 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); + log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* }); if (po_dep_count.* == 0) { - log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } }, @@ -2316,20 +2319,19 @@ pub fn markDependeeOutdated( const new_po_dep_count = switch (marked_po) { .not_marked_po => if (opt_po_entry) |e| e.value else 0, .marked_po => if (opt_po_entry) |e| e.value - 1 else { - // This dependency has been registered during in-progress analysis, but the unit is - // not in `potentially_outdated` because analysis is in-progress. Nothing to do. + // This `AnalUnit` has already been re-analyzed this update, and registered a dependency + // on this thing, but already has sufficiently up-to-date information. Nothing to do. continue; }, }; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), new_po_dep_count }); try zcu.outdated.putNoClobber( zcu.gpa, depender, new_po_dep_count, ); - log.debug("outdated: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count }); if (new_po_dep_count == 0) { - log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } // If this is a Decl and was not previously PO, we must recursively @@ -2342,16 +2344,16 @@ pub fn markDependeeOutdated( } pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { - log.debug("up-to-date dependee: {}", .{fmtDependee(dependee, zcu)}); + log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { if (zcu.outdated.getPtr(depender)) |po_dep_count| { // This depender is already outdated, but it now has one // less PO dependency! po_dep_count.* -= 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* }); + log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* }); if (po_dep_count.* == 0) { - log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)}); try zcu.outdated_ready.put(zcu.gpa, depender, {}); } continue; @@ -2365,11 +2367,11 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { }; if (ptr.* > 1) { ptr.* -= 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), ptr.* }); + log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* }); continue; } - log.debug("up-to-date (po deps = 0): {}", .{fmtAnalUnit(depender, zcu)}); + log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) }); // This dependency is no longer PO, i.e. is known to be up-to-date. assert(zcu.potentially_outdated.swapRemove(depender)); @@ -2398,7 +2400,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni }, .func => |func_index| .{ .interned = func_index }, // IES }; - log.debug("marking dependee po: {}", .{fmtDependee(dependee, zcu)}); + log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)}); var it = ip.dependencyIterator(dependee); while (it.next()) |po| { if (zcu.outdated.getPtr(po)) |po_dep_count| { @@ -2408,17 +2410,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni _ = zcu.outdated_ready.swapRemove(po); } po_dep_count.* += 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), po_dep_count.* }); + log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* }); continue; } if (zcu.potentially_outdated.getPtr(po)) |n| { // There is now one more PO dependency. n.* += 1; - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), n.* }); + log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* }); continue; } try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); - log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), 1 }); + log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) }); // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO. try zcu.markTransitiveDependersPotentiallyOutdated(po); } @@ -2443,7 +2445,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (zcu.outdated_ready.count() > 0) { const unit = zcu.outdated_ready.keys()[0]; - log.debug("findOutdatedToAnalyze: trivial {}", .{fmtAnalUnit(unit, zcu)}); + log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)}); return unit; } @@ -2498,10 +2500,15 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { const nav = zcu.funcInfo(func).owner_nav; std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {}; } + for (zcu.potentially_outdated.keys(), zcu.potentially_outdated.values()) |o, opod| { + const func = o.unwrap().func; + const nav = zcu.funcInfo(func).owner_nav; + std.io.getStdErr().writer().print("po: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {}; + } } log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{ - fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? }), zcu), + zcu.fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? })), chosen_cau_dependers, }); @@ -2744,7 +2751,7 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { const gpa = zcu.gpa; unit_refs: { - const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; + const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs; var idx = kv.value; while (idx != std.math.maxInt(u32)) { @@ -2758,7 +2765,7 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { } type_refs: { - const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse return; + const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse break :type_refs; var idx = kv.value; while (idx != std.math.maxInt(u32)) { @@ -3280,7 +3287,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const unit = kv.key; try result.putNoClobber(gpa, unit, kv.value); - log.debug("handle unit '{}'", .{fmtAnalUnit(unit, zcu)}); + log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)}); if (zcu.reference_table.get(unit)) |first_ref_idx| { assert(first_ref_idx != std.math.maxInt(u32)); @@ -3289,8 +3296,8 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const ref = zcu.all_references.items[ref_idx]; if (!result.contains(ref.referenced)) { log.debug("unit '{}': ref unit '{}'", .{ - fmtAnalUnit(unit, zcu), - fmtAnalUnit(ref.referenced, zcu), + zcu.fmtAnalUnit(unit), + zcu.fmtAnalUnit(ref.referenced), }); try unit_queue.put(gpa, ref.referenced, .{ .referencer = unit, @@ -3307,7 +3314,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const ref = zcu.all_type_references.items[ref_idx]; if (!checked_types.contains(ref.referenced)) { log.debug("unit '{}': ref type '{}'", .{ - fmtAnalUnit(unit, zcu), + zcu.fmtAnalUnit(unit), Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip), }); try type_queue.put(gpa, ref.referenced, .{ @@ -3389,10 +3396,10 @@ pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File { return zcu.fileByIndex(file_index); } -fn fmtAnalUnit(unit: AnalUnit, zcu: *Zcu) std.fmt.Formatter(formatAnalUnit) { +pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(formatAnalUnit) { return .{ .data = .{ .unit = unit, .zcu = zcu } }; } -fn fmtDependee(d: InternPool.Dependee, zcu: *Zcu) std.fmt.Formatter(formatDependee) { +pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDependee) { return .{ .data = .{ .dependee = d, .zcu = zcu } }; } diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 83a7dce4fc..b2f6d600e6 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -485,10 +485,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const file_root_type = pt.zcu.fileRootType(file_index); if (file_root_type != .none) { - // The namespace is already up-to-date thanks to the `updateFileNamespace` calls at the - // start of this update. We just have to check whether the type itself is okay! - const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?; - return pt.ensureCauAnalyzed(file_root_type_cau); + _ = try pt.ensureTypeUpToDate(file_root_type, false); } else { return pt.semaFile(file_index); } @@ -505,10 +502,10 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); - //log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); + log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); assert(!zcu.analysis_in_progress.contains(anal_unit)); @@ -552,10 +549,12 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // Since it does not, this must be a transitive failure. try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); } - // We treat errors as up-to-date, since those uses would just trigger a transitive error + // We treat errors as up-to-date, since those uses would just trigger a transitive error. + // The exception is types, since type declarations may require re-analysis if the type, e.g. its captures, changed. + const outdated = cau.owner.unwrap() == .type; break :res .{ .{ - .invalidate_decl_val = false, - .invalidate_decl_ref = false, + .invalidate_decl_val = outdated, + .invalidate_decl_ref = outdated, }, true }; }, error.OutOfMemory => res: { @@ -610,7 +609,7 @@ fn ensureCauAnalyzedInner( const ip = &zcu.intern_pool; const cau = ip.getCau(cau_index); - const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; @@ -626,7 +625,6 @@ fn ensureCauAnalyzedInner( // * so, it uses the same `struct` // * but this doesn't stop it from updating the namespace! // * we basically do `scanDecls`, updating the namespace as needed - // * TODO: optimize this to make sure we only do it once a generation i guess? // * so everyone lived happily ever after if (zcu.fileByIndex(inst_info.file).status != .success_zir) { @@ -646,17 +644,6 @@ fn ensureCauAnalyzedInner( _ = zcu.transitive_failed_analysis.swapRemove(anal_unit); } - if (inst_info.inst == .main_struct_inst) { - // Note that this is definitely a *recreation* due to outdated, because - // this instruction indicates that `cau.owner` is a `type`, which only - // reaches here if `cau_outdated`. - try pt.recreateFileRoot(inst_info.file); - return .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; - } - const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) { .nav => |nav| ip.getNav(nav).fqn.toSlice(ip), .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), @@ -685,9 +672,9 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter const func = zcu.funcInfo(maybe_coerced_func_index); - //log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); + log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const anal_unit = AnalUnit.wrap(.{ .func = func_index }); const func_outdated = zcu.outdated.swapRemove(anal_unit) or zcu.potentially_outdated.swapRemove(anal_unit); @@ -742,7 +729,7 @@ fn ensureFuncBodyAnalyzedInner( const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const anal_unit = AnalUnit.wrap(.{ .func = func_index }); // Here's an interesting question: is this function actually valid? // Maybe the signature changed, so we'll end up creating a whole different `func` @@ -766,7 +753,7 @@ fn ensureFuncBodyAnalyzedInner( if (func_outdated) { try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); // IES } - ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); ip.remove(pt.tid, func_index); @panic("TODO: remove orphaned function from binary"); } @@ -901,7 +888,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai "unable to codegen: {s}", .{@errorName(err)}, )); - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { @@ -982,7 +969,7 @@ fn createFileRootStruct( if (zcu.comp.incremental) { try ip.addDependency( gpa, - InternPool.AnalUnit.wrap(.{ .cau = new_cau_index }), + AnalUnit.wrap(.{ .cau = new_cau_index }), .{ .src_hash = tracked_inst }, ); } @@ -998,35 +985,6 @@ fn createFileRootStruct( return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); } -/// Recreate the root type of a file after it becomes outdated. A new struct type -/// is constructed at a new InternPool index, reusing the namespace for efficiency. -fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const file = zcu.fileByIndex(file_index); - const file_root_type = zcu.fileRootType(file_index); - const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu); - - assert(file_root_type != .none); - - log.debug("recreateFileRoot mod={s} sub_file_path={s}", .{ - file.mod.fully_qualified_name, - file.sub_file_path, - }); - - if (file.status != .success_zir) { - return error.AnalysisFail; - } - - // Invalidate the existing type, reusing its namespace. - const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; - ip.removeDependenciesForDepender( - zcu.gpa, - InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), - ); - _ = try pt.createFileRootStruct(file_index, namespace_index, true); -} - /// Re-scan the namespace of a file's root struct type on an incremental update. /// The file must have successfully populated ZIR. /// If the file's root struct type is not populated (the file is unreferenced), nothing is done. @@ -1060,6 +1018,7 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator. break :decls file.zir.bodySlice(extra_index, decls_len); }; try pt.scanNamespace(namespace_index, decls); + zcu.namespacePtr(namespace_index).generation = zcu.generation; } fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { @@ -1080,6 +1039,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { .parent = .none, .owner_type = undefined, // set in `createFileRootStruct` .file_scope = file_index, + .generation = zcu.generation, }); const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false); errdefer zcu.intern_pool.remove(pt.tid, struct_ty); @@ -1131,7 +1091,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); const cau = ip.getCau(cau_index); const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail; @@ -1151,10 +1111,12 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { // This declaration has no value so is definitely not a std.builtin type. break :ip_index .none; }, - .type => { + .type => |ty| { // This is an incremental update, and this type is being re-analyzed because it is outdated. - // The type must be recreated at a new `InternPool.Index`. - // Mark it outdated so that creation sites are re-analyzed. + // Create a new type in its place, and mark the old one as outdated so that use sites will + // be re-analyzed and discover an up-to-date type. + const new_ty = try pt.ensureTypeUpToDate(ty, true); + assert(new_ty != ty); return .{ .invalidate_decl_val = true, .invalidate_decl_ref = true, @@ -2002,21 +1964,23 @@ const ScanDeclIter = struct { try namespace.other_decls.append(gpa, cau); - // For a `comptime` declaration, whether to re-analyze is based solely on whether the - // `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already. - const unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); - if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| { - try zcu.outdated.ensureUnusedCapacity(gpa, 1); - try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); - zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value); - if (kv.value == 0) { // no PO deps + if (existing_cau == null) { + // For a `comptime` declaration, whether to analyze is based solely on whether the + // `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already. + const unit = AnalUnit.wrap(.{ .cau = cau }); + if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| { + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value); + if (kv.value == 0) { // no PO deps + zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); + } + } else if (!zcu.outdated.contains(unit)) { + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + zcu.outdated.putAssumeCapacityNoClobber(unit, 0); zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); } - } else if (!zcu.outdated.contains(unit)) { - try zcu.outdated.ensureUnusedCapacity(gpa, 1); - try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); - zcu.outdated.putAssumeCapacityNoClobber(unit, 0); - zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); } break :cau .{ cau, true }; @@ -2027,9 +1991,6 @@ const ScanDeclIter = struct { const cau, const nav = if (existing_cau) |cau_index| cau_nav: { const nav_index = ip.getCau(cau_index).owner.unwrap().nav; const nav = ip.getNav(nav_index); - if (nav.name != name) { - std.debug.panic("'{}' vs '{}'", .{ nav.name.fmt(ip), name.fmt(ip) }); - } assert(nav.name == name); assert(nav.fqn == fqn); break :cau_nav .{ cau_index, nav_index }; @@ -2078,7 +2039,7 @@ const ScanDeclIter = struct { }, }; - if (want_analysis or declaration.flags.is_export) { + if (existing_cau == null and (want_analysis or declaration.flags.is_export)) { log.debug( "scanDecl queue analyze_cau file='{s}' cau_index={d}", .{ namespace.fileScope(zcu).sub_file_path, cau }, @@ -2098,7 +2059,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError! const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const anal_unit = AnalUnit.wrap(.{ .func = func_index }); const func = zcu.funcInfo(func_index); const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail; const file = zcu.fileByIndex(inst_info.file); @@ -2484,7 +2445,7 @@ fn processExportsInner( const nav = ip.getNav(nav_index); if (zcu.failed_codegen.contains(nav_index)) break :failed true; if (nav.analysis_owner.unwrap()) |cau| { - const cau_unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); + const cau_unit = AnalUnit.wrap(.{ .cau = cau }); if (zcu.failed_analysis.contains(cau_unit)) break :failed true; if (zcu.transitive_failed_analysis.contains(cau_unit)) break :failed true; } @@ -2494,7 +2455,7 @@ fn processExportsInner( }; // If the value is a function, we also need to check if that function succeeded analysis. if (val.typeOf(zcu).zigTypeTag(zcu) == .Fn) { - const func_unit = InternPool.AnalUnit.wrap(.{ .func = val.toIntern() }); + const func_unit = AnalUnit.wrap(.{ .func = val.toIntern() }); if (zcu.failed_analysis.contains(func_unit)) break :failed true; if (zcu.transitive_failed_analysis.contains(func_unit)) break :failed true; } @@ -2669,7 +2630,7 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void .{@errorName(err)}, )); if (nav.analysis_owner.unwrap()) |cau| { - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .cau = cau })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .cau = cau })); } else { // TODO: we don't have a way to indicate that this failure is retryable! // Since these are really rare, we could as a cop-out retry the whole build next update. @@ -2782,7 +2743,7 @@ pub fn reportRetryableFileError( gop.value_ptr.* = err_msg; } -/// Shortcut for calling `intern_pool.get`. +///Shortcut for calling `intern_pool.get`. pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index { return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key); } @@ -3367,6 +3328,532 @@ pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPo return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt); } +/// Given a container type requiring resolution, ensures that it is up-to-date. +/// If not, the type is recreated at a new `InternPool.Index`. +/// The new index is returned. This is the same as the old index if the fields were up-to-date. +/// If `already_updating` is set, assumes the type is already outdated and undergoing re-analysis rather than checking `zcu.outdated`. +pub fn ensureTypeUpToDate(pt: Zcu.PerThread, ty: InternPool.Index, already_updating: bool) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty)) { + .struct_type => |key| { + const struct_obj = ip.loadStructType(ty); + const outdated = already_updating or o: { + const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? }); + const o = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + if (o) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); + } + break :o o; + }; + if (!outdated) return ty; + return pt.recreateStructType(ty, key, struct_obj); + }, + .union_type => |key| { + const union_obj = ip.loadUnionType(ty); + const outdated = already_updating or o: { + const anal_unit = AnalUnit.wrap(.{ .cau = union_obj.cau }); + const o = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + if (o) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); + } + break :o o; + }; + if (!outdated) return ty; + return pt.recreateUnionType(ty, key, union_obj); + }, + .enum_type => |key| { + const enum_obj = ip.loadEnumType(ty); + const outdated = already_updating or o: { + const anal_unit = AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? }); + const o = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + if (o) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty }); + } + break :o o; + }; + if (!outdated) return ty; + return pt.recreateEnumType(ty, key, enum_obj); + }, + .opaque_type => { + assert(!already_updating); + return ty; + }, + else => unreachable, + } +} + +fn recreateStructType( + pt: Zcu.PerThread, + ty: InternPool.Index, + full_key: InternPool.Key.NamespaceType, + struct_obj: InternPool.LoadedStructType, +) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const key = switch (full_key) { + .reified => unreachable, // never outdated + .empty_struct => unreachable, // never outdated + .generated_tag => unreachable, // not a struct + .declared => |d| d, + }; + + if (@intFromEnum(ty) <= InternPool.static_len) { + @panic("TODO: recreate resolved builtin type"); + } + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + assert(extended.opcode == .struct_decl); + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand); + var extra_index = extra.end; + + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + const fields_len = if (small.has_fields_len) blk: { + const fields_len = zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + + if (captures_len != key.captures.owned.len) return error.AnalysisFail; + if (fields_len != struct_obj.field_types.len) return error.AnalysisFail; + + // The old type will be unused, so drop its dependency information. + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? })); + + const namespace_index = struct_obj.namespace.unwrap().?; + + const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ + .layout = small.layout, + .fields_len = fields_len, + .known_non_opv = small.known_non_opv, + .requires_comptime = if (small.known_comptime_only) .yes else .unknown, + .is_tuple = small.is_tuple, + .any_comptime_fields = small.any_comptime_fields, + .any_default_inits = small.any_default_inits, + .inits_resolved = false, + .any_aligned_fields = small.any_aligned_fields, + .key = .{ .declared_owned_captures = .{ + .zir_index = key.zir_index, + .captures = key.captures.owned, + } }, + }, true)) { + .wip => |wip| wip, + .existing => unreachable, // we passed `replace_existing` + }; + errdefer wip_ty.cancel(ip, pt.tid); + + wip_ty.setName(ip, struct_obj.name); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index); + try ip.addDependency( + gpa, + AnalUnit.wrap(.{ .cau = new_cau_index }), + .{ .src_hash = key.zir_index }, + ); + zcu.namespacePtr(namespace_index).owner_type = wip_ty.index; + // No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive. + try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); + + const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); + if (inst_info.inst == .main_struct_inst) { + // This is the root type of a file! Update the reference. + zcu.setFileRootType(inst_info.file, new_ty); + } + return new_ty; +} + +fn recreateUnionType( + pt: Zcu.PerThread, + ty: InternPool.Index, + full_key: InternPool.Key.NamespaceType, + union_obj: InternPool.LoadedUnionType, +) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const key = switch (full_key) { + .reified => unreachable, // never outdated + .empty_struct => unreachable, // never outdated + .generated_tag => unreachable, // not a union + .declared => |d| d, + }; + + if (@intFromEnum(ty) <= InternPool.static_len) { + @panic("TODO: recreate resolved builtin type"); + } + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + assert(extended.opcode == .union_decl); + const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand); + var extra_index = extra.end; + + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_body_len); + const fields_len = if (small.has_fields_len) blk: { + const fields_len = zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + + if (captures_len != key.captures.owned.len) return error.AnalysisFail; + if (fields_len != union_obj.field_types.len) return error.AnalysisFail; + + // The old type will be unused, so drop its dependency information. + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = union_obj.cau })); + + const namespace_index = union_obj.namespace; + + const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{ + .flags = .{ + .layout = small.layout, + .status = .none, + .runtime_tag = if (small.has_tag_type or small.auto_enum_tag) + .tagged + else if (small.layout != .auto) + .none + else switch (true) { // TODO + true => .safety, + false => .none, + }, + .any_aligned_fields = small.any_aligned_fields, + .requires_comptime = .unknown, + .assumed_runtime_bits = false, + .assumed_pointer_aligned = false, + .alignment = .none, + }, + .fields_len = fields_len, + .enum_tag_ty = .none, // set later + .field_types = &.{}, // set later + .field_aligns = &.{}, // set later + .key = .{ .declared_owned_captures = .{ + .zir_index = key.zir_index, + .captures = key.captures.owned, + } }, + }, true)) { + .wip => |wip| wip, + .existing => unreachable, // we passed `replace_existing` + }; + errdefer wip_ty.cancel(ip, pt.tid); + + wip_ty.setName(ip, union_obj.name); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index); + try ip.addDependency( + gpa, + AnalUnit.wrap(.{ .cau = new_cau_index }), + .{ .src_hash = key.zir_index }, + ); + zcu.namespacePtr(namespace_index).owner_type = wip_ty.index; + // No need to re-scan the namespace -- `zirUnionDecl` will ultimately do that if the type is still alive. + try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); + return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); +} + +fn recreateEnumType( + pt: Zcu.PerThread, + ty: InternPool.Index, + full_key: InternPool.Key.NamespaceType, + enum_obj: InternPool.LoadedEnumType, +) Zcu.SemaError!InternPool.Index { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const key = switch (full_key) { + .reified => unreachable, // never outdated + .empty_struct => unreachable, // never outdated + .generated_tag => unreachable, // never outdated + .declared => |d| d, + }; + + if (@intFromEnum(ty) <= InternPool.static_len) { + @panic("TODO: recreate resolved builtin type"); + } + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + assert(extended.opcode == .enum_decl); + const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand); + var extra_index = extra.end; + + const tag_type_ref = if (small.has_tag_type) blk: { + const tag_type_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); + extra_index += 1; + break :blk tag_type_ref; + } else .none; + + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + + const body_len = if (small.has_body_len) blk: { + const body_len = zir.extra[extra_index]; + extra_index += 1; + break :blk body_len; + } else 0; + + const fields_len = if (small.has_fields_len) blk: { + const fields_len = zir.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; + + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + + if (captures_len != key.captures.owned.len) return error.AnalysisFail; + if (fields_len != enum_obj.names.len) return error.AnalysisFail; + + extra_index += captures_len; + extra_index += decls_len; + + const body = zir.bodySlice(extra_index, body_len); + extra_index += body.len; + + const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; + const body_end = extra_index; + extra_index += bit_bags_count; + + const any_values = for (zir.extra[body_end..][0..bit_bags_count]) |bag| { + if (bag != 0) break true; + } else false; + + // The old type will be unused, so drop its dependency information. + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? })); + + const namespace_index = enum_obj.namespace; + + const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{ + .has_values = any_values, + .tag_mode = if (small.nonexhaustive) + .nonexhaustive + else if (tag_type_ref == .none) + .auto + else + .explicit, + .fields_len = fields_len, + .key = .{ .declared_owned_captures = .{ + .zir_index = key.zir_index, + .captures = key.captures.owned, + } }, + }, true)) { + .wip => |wip| wip, + .existing => unreachable, // we passed `replace_existing` + }; + var done = true; + errdefer if (!done) wip_ty.cancel(ip, pt.tid); + + wip_ty.setName(ip, enum_obj.name); + + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index); + + zcu.namespacePtr(namespace_index).owner_type = wip_ty.index; + // No need to re-scan the namespace -- `zirEnumDecl` will ultimately do that if the type is still alive. + + wip_ty.prepare(ip, new_cau_index, namespace_index); + done = true; + + Sema.resolveDeclaredEnum( + pt, + wip_ty, + inst_info.inst, + key.zir_index, + namespace_index, + enum_obj.name, + new_cau_index, + small, + body, + tag_type_ref, + any_values, + fields_len, + zir, + body_end, + ) catch |err| switch (err) { + error.GenericPoison => unreachable, + error.ComptimeBreak => unreachable, + error.ComptimeReturn => unreachable, + error.AnalysisFail, error.OutOfMemory => |e| return e, + }; + + return wip_ty.index; +} + +/// Given a namespace, re-scan its declarations from the type definition if they have not +/// yet been re-scanned on this update. +/// If the type declaration instruction has been lost, returns `error.AnalysisFail`. +/// This will effectively short-circuit the caller, which will be semantic analysis of a +/// guaranteed-unreferenced `AnalUnit`, to trigger a transitive analysis error. +pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) Zcu.SemaError!void { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const namespace = zcu.namespacePtr(namespace_index); + + if (namespace.generation == zcu.generation) return; + + const Container = enum { @"struct", @"union", @"enum", @"opaque" }; + const container: Container, const full_key = switch (ip.indexToKey(namespace.owner_type)) { + .struct_type => |k| .{ .@"struct", k }, + .union_type => |k| .{ .@"union", k }, + .enum_type => |k| .{ .@"enum", k }, + .opaque_type => |k| .{ .@"opaque", k }, + else => unreachable, // namespaces are owned by a container type + }; + + const key = switch (full_key) { + .reified, .empty_struct, .generated_tag => { + // Namespace always empty, so up-to-date. + namespace.generation = zcu.generation; + return; + }, + .declared => |d| d, + }; + + // Namespace outdated -- re-scan the type if necessary. + + const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail; + const file = zcu.fileByIndex(inst_info.file); + if (file.status != .success_zir) return error.AnalysisFail; + const zir = file.zir; + + assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended); + const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended; + + const decls = switch (container) { + .@"struct" => decls: { + assert(extended.opcode == .struct_decl); + const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand); + var extra_index = extra.end; + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + if (small.has_backing_int) { + const backing_int_body_len = zir.extra[extra_index]; + extra_index += 1; // backing_int_body_len + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + extra_index += backing_int_body_len; // backing_int_body_inst + } + } + break :decls zir.bodySlice(extra_index, decls_len); + }, + .@"union" => decls: { + assert(extended.opcode == .union_decl); + const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand); + var extra_index = extra.end; + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_body_len); + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + break :decls zir.bodySlice(extra_index, decls_len); + }, + .@"enum" => decls: { + assert(extended.opcode == .enum_decl); + const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand); + var extra_index = extra.end; + extra_index += @intFromBool(small.has_tag_type); + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + extra_index += @intFromBool(small.has_body_len); + extra_index += @intFromBool(small.has_fields_len); + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + break :decls zir.bodySlice(extra_index, decls_len); + }, + .@"opaque" => decls: { + assert(extended.opcode == .opaque_decl); + const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small); + const extra = zir.extraData(Zir.Inst.OpaqueDecl, extended.operand); + var extra_index = extra.end; + const captures_len = if (small.has_captures_len) blk: { + const captures_len = zir.extra[extra_index]; + extra_index += 1; + break :blk captures_len; + } else 0; + const decls_len = if (small.has_decls_len) blk: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :blk decls_len; + } else 0; + extra_index += captures_len; + break :decls zir.bodySlice(extra_index, decls_len); + }, + }; + + try pt.scanNamespace(namespace_index, decls); + namespace.generation = zcu.generation; +} + const Air = @import("../Air.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -3379,6 +3866,7 @@ const builtin = @import("builtin"); const Cache = std.Build.Cache; const dev = @import("../dev.zig"); const InternPool = @import("../InternPool.zig"); +const AnalUnit = InternPool.AnalUnit; const isUpDir = @import("../introspect.zig").isUpDir; const Liveness = @import("../Liveness.zig"); const log = std.log.scoped(.zcu); -- cgit v1.2.3 From 9e6318a4ea042e3fab7a1b2347600cde3d804e1a Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 17 Aug 2024 12:05:12 +0100 Subject: compiler: add some doc comments --- src/InternPool.zig | 23 +++++++++++++++++++++-- src/Zcu/PerThread.zig | 3 +++ 2 files changed, 24 insertions(+), 2 deletions(-) (limited to 'src/Zcu/PerThread.zig') diff --git a/src/InternPool.zig b/src/InternPool.zig index 8259f94812..d0dc16c47d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -62,10 +62,18 @@ const want_multi_threaded = true; /// Whether a single-threaded intern pool impl is in use. pub const single_threaded = builtin.single_threaded or !want_multi_threaded; +/// A `TrackedInst.Index` provides a single, unchanging reference to a ZIR instruction across a whole +/// compilation. From this index, you can acquire a `TrackedInst`, which containss a reference to both +/// the file which the instruction lives in, and the instruction index itself, which is updated on +/// incremental updates by `Zcu.updateZirRefs`. pub const TrackedInst = extern struct { file: FileIndex, inst: Zir.Inst.Index, + /// It is possible on an incremental update that we "lose" a ZIR instruction: some tracked `%x` in + /// the old ZIR failed to map to any `%y` in the new ZIR. For this reason, we actually store values + /// of type `MaybeLost`, which uses `ZirIndex.lost` to represent this case. `Index.resolve` etc + /// return `null` when the `TrackedInst` being resolved has been lost. pub const MaybeLost = extern struct { file: FileIndex, inst: ZirIndex, @@ -244,14 +252,17 @@ pub fn trackZir( return index; } +/// At the start of an incremental update, we update every entry in `tracked_insts` to include +/// the new ZIR index. Once this is done, we must update the hashmap metadata so that lookups +/// return correct entries where they already exist. pub fn rehashTrackedInsts( ip: *InternPool, gpa: Allocator, - /// TODO: maybe don't take this? it doesn't actually matter, only one thread is running at this point tid: Zcu.PerThread.Id, ) Allocator.Error!void { + assert(tid == .main); // we shouldn't have any other threads active right now + // TODO: this function doesn't handle OOM well. What should it do? - // Indeed, what should anyone do when they run out of memory? // We don't lock anything, as this function assumes that no other thread is // accessing `tracked_insts`. This is necessary because we're going to be @@ -795,6 +806,14 @@ const Local = struct { /// This state is fully local to the owning thread and does not require any /// atomic access. mutate: struct { + /// When we need to allocate any long-lived buffer for mutating the `InternPool`, it is + /// allocated into this `arena` (for the `Id` of the thread performing the mutation). An + /// arena is used to avoid contention on the GPA, and to ensure that any code which retains + /// references to old state remains valid. For instance, when reallocing hashmap metadata, + /// a racing lookup on another thread may still retain a handle to the old metadata pointer, + /// so it must remain valid. + /// This arena's lifetime is tied to that of `Compilation`, although it can be cleared on + /// garbage collection (currently vaporware). arena: std.heap.ArenaAllocator.State, items: ListMutate, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b2f6d600e6..700d8708b1 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1,3 +1,6 @@ +//! This type provides a wrapper around a `*Zcu` for uses which require a thread `Id`. +//! Any operation which mutates `InternPool` state lives here rather than on `Zcu`. + zcu: *Zcu, /// Dense, per-thread unique index. -- cgit v1.2.3