From 7f4bd247c7735ccf277c9bba42222e014cacf856 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 2 Feb 2024 03:19:23 +0000 Subject: compiler: re-introduce dependencies for incremental compilation Sema now tracks dependencies appropriately. Early logic in Zcu for resolving outdated decls/functions is in place. The setup used does not support `usingnamespace`; compilations using this construct are not yet supported by this incremental compilation model. --- src/Module.zig | 177 +++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 140 insertions(+), 37 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Module.zig b/src/Module.zig index 1b3342f775..d6ee0485ce 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -149,6 +149,10 @@ error_limit: ErrorInt, /// previous analysis. generation: u32 = 0, +/// Value is the number of PO dependencies of this Depender. +potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{}, +outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, void) = .{}, + stage1_flags: packed struct { have_winmain: bool = false, have_wwinmain: bool = false, @@ -680,14 +684,6 @@ pub const Decl = struct { return mod.namespacePtr(decl.src_namespace).file_scope; } - pub fn removeDependant(decl: *Decl, other: Decl.Index) void { - assert(decl.dependants.swapRemove(other)); - } - - pub fn removeDependency(decl: *Decl, other: Decl.Index) void { - assert(decl.dependencies.swapRemove(other)); - } - pub fn getExternDecl(decl: Decl, mod: *Module) OptionalIndex { assert(decl.has_tv); return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { @@ -838,14 +834,6 @@ pub const File = struct { /// undefined until `zir_loaded == true`. path_digest: Cache.BinDigest = undefined, - /// Used by change detection algorithm, after astgen, contains the - /// set of decls that existed in the previous ZIR but not in the new one. - deleted_decls: ArrayListUnmanaged(Decl.Index) = .{}, - /// Used by change detection algorithm, after astgen, contains the - /// set of decls that existed both in the previous ZIR and in the new one, - /// but their source code has been modified. - outdated_decls: ArrayListUnmanaged(Decl.Index) = .{}, - /// The most recent successful ZIR for this file, with no errors. /// This is only populated when a previously successful ZIR /// newly introduces compile errors during an update. When ZIR is @@ -898,8 +886,6 @@ pub const File = struct { gpa.free(file.sub_file_path); file.unload(gpa); } - file.deleted_decls.deinit(gpa); - file.outdated_decls.deinit(gpa); file.references.deinit(gpa); if (file.root_decl.unwrap()) |root_decl| { mod.destroyDecl(root_decl); @@ -2498,6 +2484,8 @@ pub fn deinit(zcu: *Zcu) void { zcu.global_error_set.deinit(gpa); + zcu.potentially_outdated.deinit(gpa); + zcu.test_functions.deinit(gpa); for (zcu.global_assembly.values()) |s| { @@ -2856,27 +2844,18 @@ pub fn astGenFile(mod: *Module, file: *File) !void { } if (file.prev_zir) |prev_zir| { - // Iterate over all Namespace objects contained within this File, looking at the - // previous and new ZIR together and update the references to point - // to the new one. For example, Decl name, Decl zir_decl_index, and Namespace - // decl_table keys need to get updated to point to the new memory, even if the - // underlying source code is unchanged. - // We do not need to hold any locks at this time because all the Decl and Namespace - // objects being touched are specific to this File, and the only other concurrent - // tasks are touching other File objects. try updateZirRefs(mod, file, prev_zir.*); - // At this point, `file.outdated_decls` and `file.deleted_decls` are populated, - // and semantic analysis will deal with them properly. // No need to keep previous ZIR. prev_zir.deinit(gpa); gpa.destroy(prev_zir); file.prev_zir = null; - } else if (file.root_decl.unwrap()) |root_decl| { - // This is an update, but it is the first time the File has succeeded - // ZIR. We must mark it outdated since we have already tried to - // semantically analyze it. - try file.outdated_decls.resize(gpa, 1); - file.outdated_decls.items[0] = root_decl; + } + + if (file.root_decl.unwrap()) |root_decl| { + // The root of this file must be re-analyzed, since the file has changed. + comp.mutex.lock(); + defer comp.mutex.unlock(); + try mod.outdated.put(gpa, InternPool.Depender.wrap(.{ .decl = root_decl }), {}); } } @@ -2950,25 +2929,142 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File) return zir; } +/// This is called from the AstGen thread pool, so must acquire +/// the Compilation mutex when acting on shared state. fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void { const gpa = zcu.gpa; + const new_zir = file.zir; var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; defer inst_map.deinit(gpa); - try mapOldZirToNew(gpa, old_zir, file.zir, &inst_map); + try mapOldZirToNew(gpa, old_zir, new_zir, &inst_map); + + const old_tag = old_zir.instructions.items(.tag); + const old_data = old_zir.instructions.items(.data); // TODO: this should be done after all AstGen workers complete, to avoid // iterating over this full set for every updated file. - for (zcu.intern_pool.tracked_insts.keys()) |*ti| { + for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| { + const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw); if (!std.mem.eql(u8, &ti.path_digest, &file.path_digest)) continue; + const old_inst = ti.inst; ti.inst = inst_map.get(ti.inst) orelse { - // TODO: invalidate this `TrackedInst` via the dependency mechanism + // Tracking failed for this instruction. Invalidate associated `src_hash` deps. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); continue; }; + + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. + const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { + .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { + .struct_decl, .union_decl, .opaque_decl, .enum_decl => true, + else => false, + }, + else => false, + }; + if (!has_namespace) continue; + + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + defer old_names.deinit(zcu.gpa); + { + var it = old_zir.declIterator(old_inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + old_zir.nullTerminatedString(name_zir), + ); + try old_names.put(zcu.gpa, name_ip, {}); + } + } + var any_change = false; + { + var it = new_zir.declIterator(ti.inst); + while (it.next()) |decl_inst| { + const decl_name = old_zir.getDeclaration(decl_inst)[0].name; + switch (decl_name) { + .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue, + _ => if (decl_name.isNamedTest(old_zir)) continue, + } + const name_zir = decl_name.toString(old_zir).?; + const name_ip = try zcu.intern_pool.getOrPutString( + zcu.gpa, + old_zir.nullTerminatedString(name_zir), + ); + if (!old_names.swapRemove(name_ip)) continue; + // Name added + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + } + // The only elements remaining in `old_names` now are any names which were removed. + for (old_names.keys()) |name_ip| { + any_change = true; + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace_name = .{ + .namespace = ti_idx, + .name = name_ip, + } }); + } + + if (any_change) { + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .namespace = ti_idx }); + } + } +} + +pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { + var it = zcu.intern_pool.dependencyIterator(dependee); + while (it.next()) |depender| { + if (zcu.outdated.contains(depender)) continue; + const was_po = zcu.potentially_outdated.swapRemove(depender); + try zcu.outdated.putNoClobber(zcu.gpa, depender, {}); + // If this is a Decl and was not previously PO, we must recursively + // mark dependencies on its tyval as PO. + if (was_po) switch (depender.unwrap()) { + .decl => |decl_index| try zcu.markDeclDependenciesPotentiallyOutdated(decl_index), + .func => {}, + }; } } +/// Given a Decl which is newly outdated or PO, mark all dependers which depend +/// on its tyval as PO. +fn markDeclDependenciesPotentiallyOutdated(zcu: *Zcu, decl_index: Decl.Index) !void { + var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); + while (it.next()) |po| { + if (zcu.potentially_outdated.getPtr(po)) |n| { + // There is now one more PO dependency. + n.* += 1; + continue; + } + try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); + // If this ia a Decl, we must recursively mark dependencies + // on its tyval as PO. + switch (po.unwrap()) { + .decl => |po_decl| try zcu.markDeclDependenciesPotentiallyOutdated(po_decl), + .func => {}, + } + } + // TODO: repeat the above for `decl_ty` dependencies when they are introduced +} + pub fn mapOldZirToNew( gpa: Allocator, old_zir: Zir, @@ -3535,6 +3631,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk .none; }; + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); + decl.analysis = .in_progress; var analysis_arena = std.heap.ArenaAllocator.init(gpa); @@ -3564,6 +3662,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }; defer sema.deinit(); + // Every Decl has a dependency on its own source. + try sema.declareDependency(.{ .src_hash = try ip.trackZir(sema.gpa, decl.getFileScope(mod), decl.zir_decl_index.unwrap().?) }); + assert(!mod.declIsRoot(decl_index)); var block_scope: Sema.Block = .{ @@ -4362,6 +4463,8 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index })); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); -- cgit v1.2.3 From 1e91ee1e05f08013e3a4edec5d9f0aef978f3f0b Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 3 Feb 2024 01:22:56 +0000 Subject: Zir: store extra source hashes required for incremental Also add corresponding invaidation logic to Zcu. Therefore, the only invalidation logic which is not yet in place is `decl_val` dependencies. --- lib/std/zig.zig | 5 +- src/AstGen.zig | 109 +++++++++++++++++++++++++++++++++++++------- src/Autodoc.zig | 6 +-- src/Module.zig | 12 +++++ src/Sema.zig | 14 +++--- src/Zir.zig | 134 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- src/print_zir.zig | 37 +++++++++++++-- 7 files changed, 283 insertions(+), 34 deletions(-) (limited to 'src/Module.zig') diff --git a/lib/std/zig.zig b/lib/std/zig.zig index 84feb2cf0a..e0787e7312 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -27,11 +27,12 @@ pub const parseNumberLiteral = number_literal.parseNumberLiteral; pub const c_builtins = @import("zig/c_builtins.zig"); pub const c_translation = @import("zig/c_translation.zig"); +pub const SrcHasher = std.crypto.hash.Blake3; pub const SrcHash = [16]u8; pub fn hashSrc(src: []const u8) SrcHash { var out: SrcHash = undefined; - std.crypto.hash.Blake3.hash(src, &out, .{}); + SrcHasher.hash(src, &out, .{}); return out; } @@ -41,7 +42,7 @@ pub fn srcHashEql(a: SrcHash, b: SrcHash) bool { pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash { var out: SrcHash = undefined; - var hasher = std.crypto.hash.Blake3.init(.{}); + var hasher = SrcHasher.init(.{}); hasher.update(&parent_hash); hasher.update(sep); hasher.update(name); diff --git a/src/AstGen.zig b/src/AstGen.zig index caa0efa851..e444f836bb 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -4815,6 +4815,7 @@ fn structDeclInner( .any_comptime_fields = false, .any_default_inits = false, .any_aligned_fields = false, + .fields_hash = std.zig.hashSrc(@tagName(layout)), }); return decl_inst.toRef(); } @@ -4936,6 +4937,12 @@ fn structDeclInner( } }; + var fields_hasher = std.zig.SrcHasher.init(.{}); + fields_hasher.update(@tagName(layout)); + if (backing_int_node != 0) { + fields_hasher.update(tree.getNodeSource(backing_int_node)); + } + var sfba = std.heap.stackFallback(256, astgen.arena); const sfba_allocator = sfba.get(); @@ -4956,6 +4963,8 @@ fn structDeclInner( .field => |field| field, }; + fields_hasher.update(tree.getNodeSource(member_node)); + if (!is_tuple) { const field_name = try astgen.identAsString(member.ast.main_token); @@ -5083,6 +5092,9 @@ fn structDeclInner( return error.AnalysisFail; } + var fields_hash: std.zig.SrcHash = undefined; + fields_hasher.final(&fields_hash); + try gz.setStruct(decl_inst, .{ .src_node = node, .layout = layout, @@ -5096,6 +5108,7 @@ fn structDeclInner( .any_comptime_fields = any_comptime_fields, .any_default_inits = any_default_inits, .any_aligned_fields = any_aligned_fields, + .fields_hash = fields_hash, }); wip_members.finishBits(bits_per_field); @@ -5174,6 +5187,13 @@ fn unionDeclInner( var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); defer wip_members.deinit(); + var fields_hasher = std.zig.SrcHasher.init(.{}); + fields_hasher.update(@tagName(layout)); + fields_hasher.update(&.{@intFromBool(auto_enum_tok != null)}); + if (arg_node != 0) { + fields_hasher.update(astgen.tree.getNodeSource(arg_node)); + } + var sfba = std.heap.stackFallback(256, astgen.arena); const sfba_allocator = sfba.get(); @@ -5188,6 +5208,7 @@ fn unionDeclInner( .decl => continue, .field => |field| field, }; + fields_hasher.update(astgen.tree.getNodeSource(member_node)); member.convertToNonTupleLike(astgen.tree.nodes); if (member.ast.tuple_like) { return astgen.failTok(member.ast.main_token, "union field missing name", .{}); @@ -5289,6 +5310,9 @@ fn unionDeclInner( return error.AnalysisFail; } + var fields_hash: std.zig.SrcHash = undefined; + fields_hasher.final(&fields_hash); + if (!block_scope.isEmpty()) { _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); } @@ -5305,6 +5329,7 @@ fn unionDeclInner( .decls_len = decl_count, .auto_enum_tag = auto_enum_tok != null, .any_aligned_fields = any_aligned_fields, + .fields_hash = fields_hash, }); wip_members.finishBits(bits_per_field); @@ -5498,6 +5523,12 @@ fn containerDecl( var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(counts.decls), @intCast(counts.total_fields), bits_per_field, max_field_size); defer wip_members.deinit(); + var fields_hasher = std.zig.SrcHasher.init(.{}); + if (container_decl.ast.arg != 0) { + fields_hasher.update(tree.getNodeSource(container_decl.ast.arg)); + } + fields_hasher.update(&.{@intFromBool(nonexhaustive)}); + var sfba = std.heap.stackFallback(256, astgen.arena); const sfba_allocator = sfba.get(); @@ -5510,6 +5541,7 @@ fn containerDecl( for (container_decl.ast.members) |member_node| { if (member_node == counts.nonexhaustive_node) continue; + fields_hasher.update(tree.getNodeSource(member_node)); namespace.base.tag = .namespace; var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { .decl => continue, @@ -5590,6 +5622,9 @@ fn containerDecl( _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); } + var fields_hash: std.zig.SrcHash = undefined; + fields_hasher.final(&fields_hash); + const body = block_scope.instructionsSlice(); const body_len = astgen.countBodyLenAfterFixups(body); @@ -5600,6 +5635,7 @@ fn containerDecl( .body_len = body_len, .fields_len = @intCast(counts.total_fields), .decls_len = @intCast(counts.decls), + .fields_hash = fields_hash, }); wip_members.finishBits(bits_per_field); @@ -11900,8 +11936,8 @@ const GenZir = struct { var body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; var ret_body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; - var src_locs_buffer: [3]u32 = undefined; - var src_locs: []u32 = src_locs_buffer[0..0]; + var src_locs_and_hash_buffer: [7]u32 = undefined; + var src_locs_and_hash: []u32 = src_locs_and_hash_buffer[0..0]; if (args.body_gz) |body_gz| { const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); @@ -11916,10 +11952,27 @@ const GenZir = struct { const rbrace_column: u32 = @intCast(astgen.source_column); const columns = args.lbrace_column | (rbrace_column << 16); - src_locs_buffer[0] = args.lbrace_line; - src_locs_buffer[1] = rbrace_line; - src_locs_buffer[2] = columns; - src_locs = &src_locs_buffer; + + const proto_hash: std.zig.SrcHash = switch (node_tags[fn_decl]) { + .fn_decl => sig_hash: { + const proto_node = node_datas[fn_decl].lhs; + break :sig_hash std.zig.hashSrc(tree.getNodeSource(proto_node)); + }, + .test_decl => std.zig.hashSrc(""), // tests don't have a prototype + else => unreachable, + }; + const proto_hash_arr: [4]u32 = @bitCast(proto_hash); + + src_locs_and_hash_buffer = .{ + args.lbrace_line, + rbrace_line, + columns, + proto_hash_arr[0], + proto_hash_arr[1], + proto_hash_arr[2], + proto_hash_arr[3], + }; + src_locs_and_hash = &src_locs_and_hash_buffer; body = body_gz.instructionsSlice(); if (args.ret_gz) |ret_gz| @@ -11953,7 +12006,7 @@ const GenZir = struct { fancyFnExprExtraLen(astgen, section_body, args.section_ref) + fancyFnExprExtraLen(astgen, cc_body, args.cc_ref) + fancyFnExprExtraLen(astgen, ret_body, ret_ref) + - body_len + src_locs.len + + body_len + src_locs_and_hash.len + @intFromBool(args.lib_name != .empty) + @intFromBool(args.noalias_bits != 0), ); @@ -12040,7 +12093,7 @@ const GenZir = struct { } astgen.appendBodyWithFixups(body); - astgen.extra.appendSliceAssumeCapacity(src_locs); + astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash); // Order is important when unstacking. if (args.body_gz) |body_gz| body_gz.unstack(); @@ -12068,7 +12121,7 @@ const GenZir = struct { gpa, @typeInfo(Zir.Inst.Func).Struct.fields.len + 1 + fancyFnExprExtraLen(astgen, ret_body, ret_ref) + - body_len + src_locs.len, + body_len + src_locs_and_hash.len, ); const ret_body_len = if (ret_body.len != 0) @@ -12092,7 +12145,7 @@ const GenZir = struct { astgen.extra.appendAssumeCapacity(@intFromEnum(ret_ref)); } astgen.appendBodyWithFixups(body); - astgen.extra.appendSliceAssumeCapacity(src_locs); + astgen.extra.appendSliceAssumeCapacity(src_locs_and_hash); // Order is important when unstacking. if (args.body_gz) |body_gz| body_gz.unstack(); @@ -12853,12 +12906,20 @@ const GenZir = struct { any_comptime_fields: bool, any_default_inits: bool, any_aligned_fields: bool, + fields_hash: std.zig.SrcHash, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; - try astgen.extra.ensureUnusedCapacity(gpa, 6); - const payload_index: u32 = @intCast(astgen.extra.items.len); + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 6); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{ + .fields_hash_0 = fields_hash_arr[0], + .fields_hash_1 = fields_hash_arr[1], + .fields_hash_2 = fields_hash_arr[2], + .fields_hash_3 = fields_hash_arr[3], + }); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); @@ -12908,12 +12969,20 @@ const GenZir = struct { layout: std.builtin.Type.ContainerLayout, auto_enum_tag: bool, any_aligned_fields: bool, + fields_hash: std.zig.SrcHash, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; - try astgen.extra.ensureUnusedCapacity(gpa, 5); - const payload_index: u32 = @intCast(astgen.extra.items.len); + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 5); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{ + .fields_hash_0 = fields_hash_arr[0], + .fields_hash_1 = fields_hash_arr[1], + .fields_hash_2 = fields_hash_arr[2], + .fields_hash_3 = fields_hash_arr[3], + }); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); @@ -12958,12 +13027,20 @@ const GenZir = struct { fields_len: u32, decls_len: u32, nonexhaustive: bool, + fields_hash: std.zig.SrcHash, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; - try astgen.extra.ensureUnusedCapacity(gpa, 5); - const payload_index: u32 = @intCast(astgen.extra.items.len); + const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 5); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{ + .fields_hash_0 = fields_hash_arr[0], + .fields_hash_1 = fields_hash_arr[1], + .fields_hash_2 = fields_hash_arr[2], + .fields_hash_3 = fields_hash_arr[3], + }); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 619d58c3ce..fb027567eb 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -3497,7 +3497,7 @@ fn walkInstruction( }; const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len; const src_node: ?i32 = if (small.has_src_node) blk: { const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); @@ -3627,7 +3627,7 @@ fn walkInstruction( }; const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len; const src_node: ?i32 = if (small.has_src_node) blk: { const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); @@ -3778,7 +3778,7 @@ fn walkInstruction( }; const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; const src_node: ?i32 = if (small.has_src_node) blk: { const src_node = @as(i32, @bitCast(file.zir.extra[extra_index])); diff --git a/src/Module.zig b/src/Module.zig index d6ee0485ce..93b5066192 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2957,6 +2957,18 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void { continue; }; + if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: { + if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| { + if (std.zig.srcHashEql(old_hash, new_hash)) { + break :hash_changed; + } + } + // The source hash associated with this instruction changed - invalidate relevant dependencies. + zcu.comp.mutex.lock(); + defer zcu.comp.mutex.unlock(); + try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); + } + // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies. const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) { .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) { diff --git a/src/Sema.zig b/src/Sema.zig index abbf7f69ec..0f604d2236 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2718,7 +2718,7 @@ pub fn getStructType( assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; extra_index += @intFromBool(small.has_src_node); const fields_len = if (small.has_fields_len) blk: { const fields_len = sema.code.extra[extra_index]; @@ -2773,7 +2773,7 @@ fn zirStructDecl( const ip = &mod.intern_pool; const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { - const node_offset: i32 = @bitCast(sema.code.extra[extended.operand]); + const node_offset: i32 = @bitCast(sema.code.extra[extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len]); break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; @@ -2933,7 +2933,7 @@ fn zirEnumDecl( const mod = sema.mod; const gpa = sema.gpa; const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); @@ -3204,7 +3204,7 @@ fn zirUnionDecl( const mod = sema.mod; const gpa = sema.gpa; const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset: i32 = @bitCast(sema.code.extra[extra_index]); @@ -35742,7 +35742,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); if (small.has_backing_int) { - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_fields_len); extra_index += @intFromBool(small.has_decls_len); @@ -36457,7 +36457,7 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct { const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; extra_index += @intFromBool(small.has_src_node); @@ -36925,7 +36925,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .union_decl); const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); - var extra_index: usize = extended.operand; + var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len; const src = LazySrcLoc.nodeOffset(0); extra_index += @intFromBool(small.has_src_node); diff --git a/src/Zir.zig b/src/Zir.zig index 4462083b1f..c313ab8563 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2497,6 +2497,7 @@ pub const Inst = struct { /// } /// 2. body: Index // for each body_len /// 3. src_locs: SrcLocs // if body_len != 0 + /// 4. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype pub const Func = struct { /// If this is 0 it means a void return type. /// If this is 1 it means return_type is a simple Ref @@ -2558,6 +2559,7 @@ pub const Inst = struct { /// - each bit starting with LSB corresponds to parameter indexes /// 17. body: Index // for each body_len /// 18. src_locs: Func.SrcLocs // if body_len != 0 + /// 19. proto_hash: std.zig.SrcHash // if body_len != 0; hash of function prototype pub const FuncFancy = struct { /// Points to the block that contains the param instructions for this function. /// If this is a `declaration`, it refers to the declaration's value body. @@ -3040,6 +3042,12 @@ pub const Inst = struct { /// init_body_inst: Inst, // for each init_body_len /// } pub const StructDecl = struct { + // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. + // This hash contains the source of all fields, and any specified attributes (`extern`, backing type, etc). + fields_hash_0: u32, + fields_hash_1: u32, + fields_hash_2: u32, + fields_hash_3: u32, pub const Small = packed struct { has_src_node: bool, has_fields_len: bool, @@ -3102,6 +3110,12 @@ pub const Inst = struct { /// value: Ref, // if corresponding bit is set /// } pub const EnumDecl = struct { + // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. + // This hash contains the source of all fields, and the backing type if specified. + fields_hash_0: u32, + fields_hash_1: u32, + fields_hash_2: u32, + fields_hash_3: u32, pub const Small = packed struct { has_src_node: bool, has_tag_type: bool, @@ -3137,6 +3151,12 @@ pub const Inst = struct { /// tag_value: Ref, // if corresponding bit is set /// } pub const UnionDecl = struct { + // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. + // This hash contains the source of all fields, and any specified attributes (`extern` etc). + fields_hash_0: u32, + fields_hash_1: u32, + fields_hash_2: u32, + fields_hash_3: u32, pub const Small = packed struct { has_src_node: bool, has_tag_type: bool, @@ -3455,7 +3475,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { switch (extended.opcode) { .struct_decl => { const small: Inst.StructDecl.Small = @bitCast(extended.small); - var extra_index: u32 = extended.operand; + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len); extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_fields_len); const decls_len = if (small.has_decls_len) decls_len: { @@ -3482,7 +3502,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { }, .enum_decl => { const small: Inst.EnumDecl.Small = @bitCast(extended.small); - var extra_index: u32 = extended.operand; + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len); extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); extra_index += @intFromBool(small.has_body_len); @@ -3501,7 +3521,7 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator { }, .union_decl => { const small: Inst.UnionDecl.Small = @bitCast(extended.small); - var extra_index: u32 = extended.operand; + var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len); extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_tag_type); extra_index += @intFromBool(small.has_body_len); @@ -3938,3 +3958,111 @@ pub fn getDeclaration(zir: Zir, inst: Zir.Inst.Index) struct { Inst.Declaration, @intCast(extra.end), }; } + +pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash { + const tag = zir.instructions.items(.tag); + const data = zir.instructions.items(.data); + switch (tag[@intFromEnum(inst)]) { + .declaration => { + const pl_node = data[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Declaration, pl_node.payload_index); + return @bitCast([4]u32{ + extra.data.src_hash_0, + extra.data.src_hash_1, + extra.data.src_hash_2, + extra.data.src_hash_3, + }); + }, + .func, .func_inferred => { + const pl_node = data[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.Func, pl_node.payload_index); + if (extra.data.body_len == 0) { + // Function type or extern fn - no associated hash + return null; + } + const extra_index = extra.end + + 1 + + extra.data.body_len + + @typeInfo(Inst.Func.SrcLocs).Struct.fields.len; + return @bitCast([4]u32{ + zir.extra[extra_index + 0], + zir.extra[extra_index + 1], + zir.extra[extra_index + 2], + zir.extra[extra_index + 3], + }); + }, + .func_fancy => { + const pl_node = data[@intFromEnum(inst)].pl_node; + const extra = zir.extraData(Inst.FuncFancy, pl_node.payload_index); + if (extra.data.body_len == 0) { + // Function type or extern fn - no associated hash + return null; + } + const bits = extra.data.bits; + var extra_index = extra.end; + extra_index += @intFromBool(bits.has_lib_name); + if (bits.has_align_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_align_ref); + if (bits.has_addrspace_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_addrspace_ref); + if (bits.has_section_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_section_ref); + if (bits.has_cc_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_cc_ref); + if (bits.has_ret_ty_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1 + body_len; + } else extra_index += @intFromBool(bits.has_ret_ty_ref); + extra_index += @intFromBool(bits.has_any_noalias); + extra_index += extra.data.body_len; + extra_index += @typeInfo(Zir.Inst.Func.SrcLocs).Struct.fields.len; + return @bitCast([4]u32{ + zir.extra[extra_index + 0], + zir.extra[extra_index + 1], + zir.extra[extra_index + 2], + zir.extra[extra_index + 3], + }); + }, + .extended => {}, + else => return null, + } + const extended = data[@intFromEnum(inst)].extended; + switch (extended.opcode) { + .struct_decl => { + const extra = zir.extraData(Inst.StructDecl, extended.operand).data; + return @bitCast([4]u32{ + extra.fields_hash_0, + extra.fields_hash_1, + extra.fields_hash_2, + extra.fields_hash_3, + }); + }, + .union_decl => { + const extra = zir.extraData(Inst.UnionDecl, extended.operand).data; + return @bitCast([4]u32{ + extra.fields_hash_0, + extra.fields_hash_1, + extra.fields_hash_2, + extra.fields_hash_3, + }); + }, + .enum_decl => { + const extra = zir.extraData(Inst.EnumDecl, extended.operand).data; + return @bitCast([4]u32{ + extra.fields_hash_0, + extra.fields_hash_1, + extra.fields_hash_2, + extra.fields_hash_3, + }); + }, + else => return null, + } +} diff --git a/src/print_zir.zig b/src/print_zir.zig index 32904d3a0a..f33d00c989 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -1401,7 +1401,17 @@ const Writer = struct { fn writeStructDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; + const extra = self.code.extraData(Zir.Inst.StructDecl, extended.operand); + const fields_hash: std.zig.SrcHash = @bitCast([4]u32{ + extra.data.fields_hash_0, + extra.data.fields_hash_1, + extra.data.fields_hash_2, + extra.data.fields_hash_3, + }); + + try stream.print("hash({}) ", .{std.fmt.fmtSliceHexLower(&fields_hash)}); + + var extra_index: usize = extra.end; const src_node: ?i32 = if (small.has_src_node) blk: { const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); @@ -1591,7 +1601,17 @@ const Writer = struct { fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; + const extra = self.code.extraData(Zir.Inst.UnionDecl, extended.operand); + const fields_hash: std.zig.SrcHash = @bitCast([4]u32{ + extra.data.fields_hash_0, + extra.data.fields_hash_1, + extra.data.fields_hash_2, + extra.data.fields_hash_3, + }); + + try stream.print("hash({}) ", .{std.fmt.fmtSliceHexLower(&fields_hash)}); + + var extra_index: usize = extra.end; const src_node: ?i32 = if (small.has_src_node) blk: { const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); @@ -1733,7 +1753,18 @@ const Writer = struct { fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); - var extra_index: usize = extended.operand; + + const extra = self.code.extraData(Zir.Inst.EnumDecl, extended.operand); + const fields_hash: std.zig.SrcHash = @bitCast([4]u32{ + extra.data.fields_hash_0, + extra.data.fields_hash_1, + extra.data.fields_hash_2, + extra.data.fields_hash_3, + }); + + try stream.print("hash({}) ", .{std.fmt.fmtSliceHexLower(&fields_hash)}); + + var extra_index: usize = extra.end; const src_node: ?i32 = if (small.has_src_node) blk: { const src_node = @as(i32, @bitCast(self.code.extra[extra_index])); -- cgit v1.2.3 From a0004cebc255405764e889effb25a42fe07d8463 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 3 Feb 2024 03:03:17 +0000 Subject: Zcu: more dependency tracking logic * Invalidate `decl_val` dependencies * Recursively mark and un-mark all dependencies correctly * Queue analysis of outdated dependers in `Compilation.performAllTheWork` Introduces logic to invalidate `decl_val` dependencies after `Zcu.semaDecl` completes. Also, recursively un-mark dependencies as PO where needed. With this, all dependency invalidation logic is in place. The next step is analyzing outdated dependencies and triggering appropriate re-analysis. --- src/Compilation.zig | 11 ++ src/Module.zig | 304 +++++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 279 insertions(+), 36 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 5ee62692fe..5615808ee2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3514,6 +3514,17 @@ pub fn performAllTheWork( try processOneJob(comp, work_item, main_progress_node); continue; } + if (comp.module) |zcu| { + // If there's no work queued, check if there's anything outdated + // which we need to work on, and queue it if so. + if (try zcu.findOutdatedToAnalyze()) |outdated| { + switch (outdated.unwrap()) { + .decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }), + .func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }), + } + continue; + } + } break; } diff --git a/src/Module.zig b/src/Module.zig index 93b5066192..d3d7c854d1 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -149,9 +149,14 @@ error_limit: ErrorInt, /// previous analysis. generation: u32 = 0, -/// Value is the number of PO dependencies of this Depender. +/// Value is the number of PO or outdated Decls which this Depender depends on. potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{}, -outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, void) = .{}, +/// Value is the number of PO or outdated Decls which this Depender depends on. +/// Once this value drops to 0, the Depender is a candidate for re-analysis. +outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{}, +/// This contains all `Depender`s in `outdated` whose PO dependency count is 0. +/// Such `Depender`s are ready for immediate re-analysis. +outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.Depender, void) = .{}, stage1_flags: packed struct { have_winmain: bool = false, @@ -2485,6 +2490,8 @@ pub fn deinit(zcu: *Zcu) void { zcu.global_error_set.deinit(gpa); zcu.potentially_outdated.deinit(gpa); + zcu.outdated.deinit(gpa); + zcu.outdated_ready.deinit(gpa); zcu.test_functions.deinit(gpa); @@ -2851,11 +2858,27 @@ pub fn astGenFile(mod: *Module, file: *File) !void { file.prev_zir = null; } - if (file.root_decl.unwrap()) |root_decl| { + if (file.root_decl.unwrap()) |root_decl| mark_outdated: { // The root of this file must be re-analyzed, since the file has changed. comp.mutex.lock(); defer comp.mutex.unlock(); - try mod.outdated.put(gpa, InternPool.Depender.wrap(.{ .decl = root_decl }), {}); + + const root_decl_depender = InternPool.Depender.wrap(.{ .decl = root_decl }); + + const gop = try mod.outdated.getOrPut(gpa, root_decl_depender); + // If this Decl is already marked as outdated, nothing needs to be done. + if (gop.found_existing) break :mark_outdated; + + log.debug("outdated: {} (root Decl)", .{root_decl}); + + // If it's already PO, forward its existing PO dependency count. + // Otherwise, it has no PO dependencies yet. + if (mod.potentially_outdated.fetchSwapRemove(root_decl_depender)) |kv| { + gop.value_ptr.* = kv.value; + } else { + gop.value_ptr.* = 0; + try mod.outdated_ready.put(mod.gpa, root_decl_depender, {}); + } } } @@ -2953,6 +2976,7 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void { // Tracking failed for this instruction. Invalidate associated `src_hash` deps. zcu.comp.mutex.lock(); defer zcu.comp.mutex.unlock(); + log.debug("tracking failed for %{d}", .{old_inst}); try zcu.markDependeeOutdated(.{ .src_hash = ti_idx }); continue; }; @@ -2962,6 +2986,12 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void { if (std.zig.srcHashEql(old_hash, new_hash)) { break :hash_changed; } + log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{ + old_inst, + ti.inst, + std.fmt.fmtSliceHexLower(&old_hash), + std.fmt.fmtSliceHexLower(&new_hash), + }); } // The source hash associated with this instruction changed - invalidate relevant dependencies. zcu.comp.mutex.lock(); @@ -3042,25 +3072,80 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void { } pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void { + log.debug("outdated dependee: {}", .{dependee}); var it = zcu.intern_pool.dependencyIterator(dependee); while (it.next()) |depender| { - if (zcu.outdated.contains(depender)) continue; - const was_po = zcu.potentially_outdated.swapRemove(depender); - try zcu.outdated.putNoClobber(zcu.gpa, depender, {}); + if (zcu.outdated.contains(depender)) { + // We do not need to increment the PO dep count, as if the outdated + // dependee is a Decl, we had already marked this as PO. + continue; + } + const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender); + try zcu.outdated.putNoClobber( + zcu.gpa, + depender, + // We do not need to increment this count for the same reason as above. + if (opt_po_entry) |e| e.value else 0, + ); + log.debug("outdated: {}", .{depender}); + if (opt_po_entry != null) { + // This is a new entry with no PO dependencies. + try zcu.outdated_ready.put(zcu.gpa, depender, {}); + } // If this is a Decl and was not previously PO, we must recursively // mark dependencies on its tyval as PO. - if (was_po) switch (depender.unwrap()) { + if (opt_po_entry == null) switch (depender.unwrap()) { .decl => |decl_index| try zcu.markDeclDependenciesPotentiallyOutdated(decl_index), .func => {}, }; } } +fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { + var it = zcu.intern_pool.dependencyIterator(dependee); + while (it.next()) |depender| { + if (zcu.outdated.getPtr(depender)) |po_dep_count| { + // This depender is already outdated, but it now has one + // less PO dependency! + po_dep_count.* -= 1; + if (po_dep_count.* == 0) { + try zcu.outdated_ready.put(zcu.gpa, depender, {}); + } + continue; + } + // This depender is definitely at least PO, because this Decl was just analyzed + // due to being outdated. + const ptr = zcu.potentially_outdated.getPtr(depender).?; + if (ptr.* > 1) { + ptr.* -= 1; + continue; + } + + // This dependency is no longer PO, i.e. is known to be up-to-date. + assert(zcu.potentially_outdated.swapRemove(depender)); + // If this is a Decl, we must recursively mark dependencies on its tyval + // as no longer PO. + switch (depender.unwrap()) { + .decl => |decl_index| try zcu.markPoDependeeUpToDate(.{ .decl_val = decl_index }), + .func => {}, + } + } +} + /// Given a Decl which is newly outdated or PO, mark all dependers which depend /// on its tyval as PO. fn markDeclDependenciesPotentiallyOutdated(zcu: *Zcu, decl_index: Decl.Index) !void { var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); while (it.next()) |po| { + if (zcu.outdated.getPtr(po)) |po_dep_count| { + // This dependency is already outdated, but it now has one more PO + // dependency. + if (po_dep_count.* == 0) { + _ = zcu.outdated_ready.swapRemove(po); + } + po_dep_count.* += 1; + continue; + } if (zcu.potentially_outdated.getPtr(po)) |n| { // There is now one more PO dependency. n.* += 1; @@ -3077,6 +3162,92 @@ fn markDeclDependenciesPotentiallyOutdated(zcu: *Zcu, decl_index: Decl.Index) !v // TODO: repeat the above for `decl_ty` dependencies when they are introduced } +pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { + if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { + log.debug("findOutdatedToAnalyze: no outdated depender", .{}); + return null; + } + + // Our goal is to find an outdated Depender which itself has no outdated or + // PO dependencies. Most of the time, such a Depender will exist - we track + // them in the `outdated_ready` set for efficiency. However, this is not + // necessarily the case, since the Decl dependency graph may contain loops + // via mutually recursive definitions: + // pub const A = struct { b: *B }; + // pub const B = struct { b: *A }; + // In this case, we must defer to more complex logic below. + + if (zcu.outdated_ready.count() > 0) { + log.debug("findOutdatedToAnalyze: trivial '{s} {d}'", .{ + @tagName(zcu.outdated_ready.keys()[0].unwrap()), + switch (zcu.outdated_ready.keys()[0].unwrap()) { + inline else => |x| @intFromEnum(x), + }, + }); + return zcu.outdated_ready.keys()[0]; + } + + // There is no single Depender which is ready for re-analysis. Instead, we + // must assume that some Decl with PO dependencies is outdated - e.g. in the + // above example we arbitrarily pick one of A or B. We should select a Decl, + // since a Decl is definitely responsible for the loop in the dependency + // graph (since you can't depend on a runtime function analysis!). + + // The choice of this Decl could have a big impact on how much total + // analysis we perform, since if analysis concludes its tyval is unchanged, + // then other PO Dependers may be resolved as up-to-date. To hopefully avoid + // doing too much work, let's find a Decl which the most things depend on - + // the idea is that this will resolve a lot of loops (but this is only a + // heuristic). + + log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{ + zcu.outdated.count(), + zcu.potentially_outdated.count(), + }); + + var chosen_decl_idx: ?Decl.Index = null; + var chosen_decl_dependers: u32 = undefined; + + for (zcu.outdated.keys()) |depender| { + const decl_index = switch (depender.unwrap()) { + .decl => |d| d, + .func => continue, + }; + + var n: u32 = 0; + var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); + while (it.next()) |_| n += 1; + + if (chosen_decl_idx == null or n > chosen_decl_dependers) { + chosen_decl_idx = decl_index; + chosen_decl_dependers = n; + } + } + + for (zcu.potentially_outdated.keys()) |depender| { + const decl_index = switch (depender.unwrap()) { + .decl => |d| d, + .func => continue, + }; + + var n: u32 = 0; + var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index }); + while (it.next()) |_| n += 1; + + if (chosen_decl_idx == null or n > chosen_decl_dependers) { + chosen_decl_idx = decl_index; + chosen_decl_dependers = n; + } + } + + log.debug("findOutdatedToAnalyze: heuristic returned Decl {d} ({d} dependers)", .{ + chosen_decl_idx.?, + chosen_decl_dependers, + }); + + return InternPool.Depender.wrap(.{ .decl = chosen_decl_idx.? }); +} + pub fn mapOldZirToNew( gpa: Allocator, old_zir: Zir, @@ -3204,7 +3375,7 @@ pub fn mapOldZirToNew( } } -/// This ensures that the Decl will have a Type and Value populated. +/// This ensures that the Decl will have an up-to-date Type and Value populated. /// However the resolution status of the Type may not be fully resolved. /// For example an inferred error set is not resolved until after `analyzeFnBody`. /// is called. @@ -3214,7 +3385,25 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const decl = mod.declPtr(decl_index); - const subsequent_analysis = switch (decl.analysis) { + // Determine whether or not this Decl is outdated, i.e. requires re-analysis + // even if `complete`. If a Decl is PO, we pessismistically assume that it + // *does* require re-analysis, to ensure that the Decl is definitely + // up-to-date when this function returns. + + // If analysis occurs in a poor order, this could result in over-analysis. + // We do our best to avoid this by the other dependency logic in this file + // which tries to limit re-analysis to Decls whose previously listed + // dependencies are all up-to-date. + + const decl_as_depender = InternPool.Depender.wrap(.{ .decl = decl_index }); + const was_outdated = mod.outdated.swapRemove(decl_as_depender) or + mod.potentially_outdated.swapRemove(decl_as_depender); + + if (was_outdated) { + _ = mod.outdated_ready.swapRemove(decl_as_depender); + } + + switch (decl.analysis) { .in_progress => unreachable, .file_failure, @@ -3226,28 +3415,29 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .codegen_failure_retryable, => return error.AnalysisFail, - .complete => return, - - .outdated => blk: { + .complete => if (was_outdated) { if (build_options.only_c) unreachable; // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. try mod.deleteDeclExports(decl_index); + } else return, - break :blk true; - }, + .outdated => unreachable, // TODO: remove this field - .unreferenced => false, - }; + .unreferenced => {}, + } var decl_prog_node = mod.sema_prog_node.start("", 0); decl_prog_node.activate(); defer decl_prog_node.end(); - const type_changed = blk: { + const sema_result: SemaDeclResult = blk: { if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) { // Anonymous decl. We don't semantically analyze these. - break :blk false; // tv unchanged + break :blk .{ + .invalidate_decl_val = false, + .invalidate_decl_ref = false, + }; } break :blk mod.semaDecl(decl_index) catch |err| switch (err) { @@ -3276,9 +3466,18 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { }; }; - if (subsequent_analysis) { - _ = type_changed; - @panic("TODO re-implement incremental compilation"); + // TODO: we do not yet have separate dependencies for decl values vs types. + if (was_outdated) { + if (sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref) { + // This dependency was marked as PO, meaning dependees were waiting + // on its analysis result, and it has turned out to be outdated. + // Update dependees accordingly. + try mod.markDependeeOutdated(.{ .decl_val = decl_index }); + } else { + // This dependency was previously PO, but turned out to be up-to-date. + // We do not need to queue successive analysis. + try mod.markPoDependeeUpToDate(.{ .decl_val = decl_index }); + } } } @@ -3591,12 +3790,19 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { }, .incremental => {}, } + + // Since this is our first time analyzing this file, there can be no dependencies on + // its root Decl. Thus, we do not need to invalidate any dependencies. } -/// Returns `true` if the Decl type changed. -/// Returns `true` if this is the first time analyzing the Decl. -/// Returns `false` otherwise. -fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { +const SemaDeclResult = packed struct { + /// Whether the value of a `decl_val` of this Decl changed. + invalidate_decl_val: bool, + /// Whether the type of a `decl_ref` of this Decl changed. + invalidate_decl_ref: bool, +}; + +fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { const tracy = trace(@src()); defer tracy.end(); @@ -3674,8 +3880,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }; defer sema.deinit(); - // Every Decl has a dependency on its own source. - try sema.declareDependency(.{ .src_hash = try ip.trackZir(sema.gpa, decl.getFileScope(mod), decl.zir_decl_index.unwrap().?) }); + // Every Decl other (than file root Decls, which do not have a ZIR index) has a dependency on its own source. + if (decl.zir_decl_index.unwrap()) |zir_decl_index| { + try sema.declareDependency(.{ .src_hash = try ip.trackZir(sema.gpa, decl.getFileScope(mod), zir_decl_index) }); + } assert(!mod.declIsRoot(decl_index)); @@ -3735,7 +3943,11 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .complete; decl.generation = mod.generation; - return true; + // TODO: usingnamespace cannot currently participate in incremental compilation + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; } switch (ip.indexToKey(decl_tv.val.toIntern())) { @@ -3771,15 +3983,16 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { // The scope needs to have the decl in it. try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } - return type_changed or is_inline != prev_is_inline; + // TODO: align, linksection, addrspace? + const changed = type_changed or is_inline != prev_is_inline; + return .{ + .invalidate_decl_val = changed, + .invalidate_decl_ref = changed, + }; } }, else => {}, } - var type_changed = true; - if (decl.has_tv) { - type_changed = !decl.ty.eql(decl_tv.ty, mod); - } decl.owns_tv = false; var queue_linker_work = false; @@ -3807,6 +4020,14 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }, } + const old_has_tv = decl.has_tv; + // The following values are ignored if `!old_has_tv` + const old_ty = decl.ty; + const old_val = decl.val; + const old_align = decl.alignment; + const old_linksection = decl.@"linksection"; + const old_addrspace = decl.@"addrspace"; + decl.ty = decl_tv.ty; decl.val = Value.fromInterned((try decl_tv.val.intern(decl_tv.ty, mod))); decl.alignment = blk: { @@ -3850,6 +4071,17 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .complete; decl.generation = mod.generation; + const result: SemaDeclResult = if (old_has_tv) .{ + .invalidate_decl_val = !decl.ty.eql(old_ty, mod) or !decl.val.eql(old_val, decl.ty, mod), + .invalidate_decl_ref = !decl.ty.eql(old_ty, mod) or + decl.alignment != old_align or + decl.@"linksection" != old_linksection or + decl.@"addrspace" != old_addrspace, + } else .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; + const has_runtime_bits = is_extern or (queue_linker_work and try sema.typeHasRuntimeBits(decl.ty)); @@ -3861,7 +4093,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); - if (type_changed and mod.emit_h != null) { + if (result.invalidate_decl_ref and mod.emit_h != null) { try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); } } @@ -3872,7 +4104,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } - return type_changed; + return result; } pub const ImportFileResult = struct { -- cgit v1.2.3 From 269c1ae649017836f15313d1d4977402be11eed5 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 4 Feb 2024 03:00:13 +0000 Subject: Zcu: incremental compilation improvements * Mark root Decls for re-analysis separately * Check for re-analysis of root Decls * Remove `outdated` entry when analyzing fn body * Remove legacy `outdated` field from Decl analysis state --- src/Compilation.zig | 1 - src/Module.zig | 295 ++++++++++++++++++++++++++++------------------------ 2 files changed, 160 insertions(+), 136 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 5615808ee2..06c0dc7e7f 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3548,7 +3548,6 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, - .outdated => unreachable, .file_failure, .sema_failure, diff --git a/src/Module.zig b/src/Module.zig index d3d7c854d1..7b592ec204 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -156,7 +156,14 @@ potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{}, /// This contains all `Depender`s in `outdated` whose PO dependency count is 0. /// Such `Depender`s are ready for immediate re-analysis. +/// See `findOutdatedToAnalyze` for details. outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.Depender, void) = .{}, +/// This contains a set of Decls which may not be in `outdated`, but are the +/// root Decls of files which have updated source and thus must be re-analyzed. +/// If such a Decl is only in this set, the struct type index may be preserved +/// (only the namespace might change). If such a Decl is also `outdated`, the +/// struct type index must be recreated. +outdated_file_root: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, stage1_flags: packed struct { have_winmain: bool = false, @@ -431,13 +438,9 @@ pub const Decl = struct { /// This indicates the failure was something like running out of disk space, /// and attempting codegen again may succeed. codegen_failure_retryable, - /// Everything is done. During an update, this Decl may be out of date, depending - /// on its dependencies. The `generation` field can be used to determine if this - /// completion status occurred before or after a given update. + /// Sematic analysis of this Decl has succeeded. However, the Decl may + /// be outdated due to an incomplete update! complete, - /// A Module update is in progress, and this Decl has been flagged as being known - /// to require re-analysis. - outdated, }, /// Whether `typed_value`, `align`, `linksection` and `addrspace` are populated. has_tv: bool, @@ -735,8 +738,7 @@ pub const Namespace = struct { file_scope: *File, /// Will be a struct, enum, union, or opaque. ty: Type, - /// Direct children of the namespace. Used during an update to detect - /// which decls have been added/removed from source. + /// Direct children of the namespace. /// Declaration order is preserved via entry order. /// These are only declarations named directly by the AST; anonymous /// declarations are not stored here. @@ -2492,6 +2494,7 @@ pub fn deinit(zcu: *Zcu) void { zcu.potentially_outdated.deinit(gpa); zcu.outdated.deinit(gpa); zcu.outdated_ready.deinit(gpa); + zcu.outdated_file_root.deinit(gpa); zcu.test_functions.deinit(gpa); @@ -2858,27 +2861,13 @@ pub fn astGenFile(mod: *Module, file: *File) !void { file.prev_zir = null; } - if (file.root_decl.unwrap()) |root_decl| mark_outdated: { + if (file.root_decl.unwrap()) |root_decl| { // The root of this file must be re-analyzed, since the file has changed. comp.mutex.lock(); defer comp.mutex.unlock(); - const root_decl_depender = InternPool.Depender.wrap(.{ .decl = root_decl }); - - const gop = try mod.outdated.getOrPut(gpa, root_decl_depender); - // If this Decl is already marked as outdated, nothing needs to be done. - if (gop.found_existing) break :mark_outdated; - - log.debug("outdated: {} (root Decl)", .{root_decl}); - - // If it's already PO, forward its existing PO dependency count. - // Otherwise, it has no PO dependencies yet. - if (mod.potentially_outdated.fetchSwapRemove(root_decl_depender)) |kv| { - gop.value_ptr.* = kv.value; - } else { - gop.value_ptr.* = 0; - try mod.outdated_ready.put(mod.gpa, root_decl_depender, {}); - } + log.debug("outdated root Decl: {}", .{root_decl}); + try mod.outdated_file_root.put(gpa, root_decl, {}); } } @@ -3187,6 +3176,26 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { return zcu.outdated_ready.keys()[0]; } + // Next, we will see if there is any outdated file root which was not in + // `outdated`. This set will be small (number of files changed in this + // update), so it's alright for us to just iterate here. + for (zcu.outdated_file_root.keys()) |file_decl| { + const decl_depender = InternPool.Depender.wrap(.{ .decl = file_decl }); + if (zcu.outdated.contains(decl_depender)) { + // Since we didn't hit this in the first loop, this Decl must have + // pending dependencies, so is ineligible. + continue; + } + if (zcu.potentially_outdated.contains(decl_depender)) { + // This Decl's struct may or may not need to be recreated depending + // on whether it is outdated. If we analyzed it now, we would have + // to assume it was outdated and recreate it! + continue; + } + log.debug("findOutdatedToAnalyze: outdated file root decl '{d}'", .{file_decl}); + return decl_depender; + } + // There is no single Depender which is ready for re-analysis. Instead, we // must assume that some Decl with PO dependencies is outdated - e.g. in the // above example we arbitrarily pick one of A or B. We should select a Decl, @@ -3407,26 +3416,27 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .in_progress => unreachable, .file_failure, - .sema_failure, - .sema_failure_retryable, .liveness_failure, .codegen_failure, - .dependency_failure, .codegen_failure_retryable, + .dependency_failure, => return error.AnalysisFail, - .complete => if (was_outdated) { - if (build_options.only_c) unreachable; - // The exports this Decl performs will be re-discovered, so we remove them here - // prior to re-analysis. - try mod.deleteDeclExports(decl_index); - } else return, + .sema_failure, + .sema_failure_retryable, + => if (!was_outdated) return error.AnalysisFail, - .outdated => unreachable, // TODO: remove this field + .complete => if (!was_outdated) return, .unreferenced => {}, } + if (was_outdated) { + // The exports this Decl performs will be re-discovered, so we remove them here + // prior to re-analysis. + try mod.deleteDeclExports(decl_index); + } + var decl_prog_node = mod.sema_prog_node.start("", 0); decl_prog_node.activate(); defer decl_prog_node.end(); @@ -3493,7 +3503,6 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, - .outdated => unreachable, .file_failure, .sema_failure, @@ -3503,109 +3512,117 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError .sema_failure_retryable, => return error.AnalysisFail, - .complete, .codegen_failure_retryable => { - switch (func.analysis(ip).state) { - .sema_failure, .dependency_failure => return error.AnalysisFail, - .none, .queued => {}, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this - .success => return, - } + .complete, .codegen_failure_retryable => {}, + } - const gpa = zcu.gpa; + const func_as_depender = InternPool.Depender.wrap(.{ .func = func_index }); + const was_outdated = zcu.outdated.swapRemove(func_as_depender) or + zcu.potentially_outdated.swapRemove(func_as_depender); - var tmp_arena = std.heap.ArenaAllocator.init(gpa); - defer tmp_arena.deinit(); - const sema_arena = tmp_arena.allocator(); + if (was_outdated) { + _ = zcu.outdated_ready.swapRemove(func_as_depender); + } - var air = zcu.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { - error.AnalysisFail => { - if (func.analysis(ip).state == .in_progress) { - // If this decl caused the compile error, the analysis field would - // be changed to indicate it was this Decl's fault. Because this - // did not happen, we infer here that it was a dependency failure. - func.analysis(ip).state = .dependency_failure; - } - return error.AnalysisFail; - }, - error.OutOfMemory => return error.OutOfMemory, - }; - defer air.deinit(gpa); + switch (func.analysis(ip).state) { + .sema_failure, .dependency_failure => if (!was_outdated) return error.AnalysisFail, + .none, .queued => {}, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + .success => if (!was_outdated) return, + } - const comp = zcu.comp; + const gpa = zcu.gpa; - const dump_air = builtin.mode == .Debug and comp.verbose_air; - const dump_llvm_ir = builtin.mode == .Debug and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); + var tmp_arena = std.heap.ArenaAllocator.init(gpa); + defer tmp_arena.deinit(); + const sema_arena = tmp_arena.allocator(); - if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { - return; + var air = zcu.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { + error.AnalysisFail => { + if (func.analysis(ip).state == .in_progress) { + // If this decl caused the compile error, the analysis field would + // be changed to indicate it was this Decl's fault. Because this + // did not happen, we infer here that it was a dependency failure. + func.analysis(ip).state = .dependency_failure; } + return error.AnalysisFail; + }, + error.OutOfMemory => return error.OutOfMemory, + }; + defer air.deinit(gpa); - var liveness = try Liveness.analyze(gpa, air, ip); - defer liveness.deinit(gpa); + const comp = zcu.comp; - if (dump_air) { - const fqn = try decl.getFullyQualifiedName(zcu); - std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); - @import("print_air.zig").dump(zcu, air, liveness); - std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); - } + const dump_air = builtin.mode == .Debug and comp.verbose_air; + const dump_llvm_ir = builtin.mode == .Debug and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); - if (std.debug.runtime_safety) { - var verify = Liveness.Verify{ - .gpa = gpa, - .air = air, - .liveness = liveness, - .intern_pool = ip, - }; - defer verify.deinit(); - - verify.verify() catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber( - decl_index, - try Module.ErrorMsg.create( - gpa, - decl.srcLoc(zcu), - "invalid liveness: {s}", - .{@errorName(err)}, - ), - ); - decl.analysis = .liveness_failure; - return error.AnalysisFail; - }, - }; - } + if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { + return; + } - if (comp.bin_file) |lf| { - lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - }, - else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( - gpa, - decl.srcLoc(zcu), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - decl.analysis = .codegen_failure_retryable; - }, - }; - } else if (zcu.llvm_object) |llvm_object| { - if (build_options.only_c) unreachable; - llvm_object.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - }, - }; - } - }, + var liveness = try Liveness.analyze(gpa, air, ip); + defer liveness.deinit(gpa); + + if (dump_air) { + const fqn = try decl.getFullyQualifiedName(zcu); + std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); + @import("print_air.zig").dump(zcu, air, liveness); + std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); + } + + if (std.debug.runtime_safety) { + var verify = Liveness.Verify{ + .gpa = gpa, + .air = air, + .liveness = liveness, + .intern_pool = ip, + }; + defer verify.deinit(); + + verify.verify() catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => { + try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); + zcu.failed_decls.putAssumeCapacityNoClobber( + decl_index, + try Module.ErrorMsg.create( + gpa, + decl.srcLoc(zcu), + "invalid liveness: {s}", + .{@errorName(err)}, + ), + ); + decl.analysis = .liveness_failure; + return error.AnalysisFail; + }, + }; + } + + if (comp.bin_file) |lf| { + lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + }, + else => { + try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); + zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(zcu), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + }, + }; + } else if (zcu.llvm_object) |llvm_object| { + if (build_options.only_c) unreachable; + llvm_object.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + }, + }; } } @@ -3625,7 +3642,6 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, - .outdated => unreachable, .file_failure, .sema_failure, @@ -3813,6 +3829,15 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { return error.AnalysisFail; } + if (mod.declIsRoot(decl_index)) { + // This comes from an `analyze_decl` job on an incremental update where + // this file changed. + @panic("TODO: update root Decl of modified file"); + } else if (decl.owns_tv) { + // We are re-analyzing an owner Decl (for a function or a namespace type). + @panic("TODO: update owner Decl"); + } + const gpa = mod.gpa; const zir = decl.getFileScope(mod).zir; @@ -3880,12 +3905,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { }; defer sema.deinit(); - // Every Decl other (than file root Decls, which do not have a ZIR index) has a dependency on its own source. - if (decl.zir_decl_index.unwrap()) |zir_decl_index| { - try sema.declareDependency(.{ .src_hash = try ip.trackZir(sema.gpa, decl.getFileScope(mod), zir_decl_index) }); - } - - assert(!mod.declIsRoot(decl_index)); + // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source. + try sema.declareDependency(.{ .src_hash = try ip.trackZir( + sema.gpa, + decl.getFileScope(mod), + decl.zir_decl_index.unwrap().?, + ) }); var block_scope: Sema.Block = .{ .parent = null, -- cgit v1.2.3 From 0d8207c29236bf731f7d3bad189beb3a1e1b1d0c Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 4 Feb 2024 17:27:41 +0000 Subject: Zcu: refactor Decl.analysis field * Functions failing codegen now set this failure on the function analysis state. Decl analysis `codegen_failure` is reserved for failures generating constant values. * `liveness_failure` is consolidated into `codegen_failure`, as we do not need to distinguish these, and Liveness.Verify is just a debugging feature anyway. * `sema_failure_retryable` and `codegen_failure_retryable` are removed. Instead, retryable failures are recorded in the new `Zcu.retryable_failures` list. On an incremental update, this list is flushed, and all elements are marked as outdated so that we re-attempt analysis and code generation. Also remove the `generation` fields from `Zcu` and `Decl` as these are not needed by our new strategy for incremental updates. --- src/Compilation.zig | 16 ++---- src/InternPool.zig | 10 ++-- src/Module.zig | 138 ++++++++++++++++++++++++++++------------------------ src/Sema.zig | 7 --- 4 files changed, 84 insertions(+), 87 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 06c0dc7e7f..596b8d5667 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2141,7 +2141,6 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void if (comp.module) |module| { module.compile_log_text.shrinkAndFree(gpa, 0); - module.generation += 1; // Make sure std.zig is inside the import_table. We unconditionally need // it for start.zig. @@ -3491,9 +3490,7 @@ pub fn performAllTheWork( if (comp.module) |mod| { try reportMultiModuleErrors(mod); - } - - if (comp.module) |mod| { + try mod.flushRetryableFailures(); mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); mod.sema_prog_node.activate(); } @@ -3551,13 +3548,11 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v .file_failure, .sema_failure, - .liveness_failure, .codegen_failure, .dependency_failure, - .sema_failure_retryable, => return, - .complete, .codegen_failure_retryable => { + .complete => { const named_frame = tracy.namedFrame("codegen_decl"); defer named_frame.end(); @@ -3592,17 +3587,15 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, - .outdated => unreachable, .file_failure, .sema_failure, .dependency_failure, - .sema_failure_retryable, => return, // emit-h only requires semantic analysis of the Decl to be complete, // it does not depend on machine code generation to succeed. - .liveness_failure, .codegen_failure, .codegen_failure_retryable, .complete => { + .codegen_failure, .complete => { const named_frame = tracy.namedFrame("emit_h_decl"); defer named_frame.end(); @@ -3674,7 +3667,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v "unable to update line number: {s}", .{@errorName(err)}, )); - decl.analysis = .codegen_failure_retryable; + decl.analysis = .codegen_failure; + try module.retryable_failures.append(gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |pkg| { diff --git a/src/InternPool.zig b/src/InternPool.zig index 79afe294b1..9075284674 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3483,6 +3483,11 @@ pub const FuncAnalysis = packed struct(u32) { /// This function might be OK but it depends on another Decl which did not /// successfully complete semantic analysis. dependency_failure, + /// There will be a corresponding ErrorMsg in Module.failed_decls. + /// Indicates that semantic analysis succeeded, but code generation for + /// this function failed. + codegen_failure, + /// Semantic analysis and code generation of this function succeeded. success, }; }; @@ -6182,7 +6187,6 @@ pub const GetFuncInstanceKey = struct { is_noinline: bool, generic_owner: Index, inferred_error_set: bool, - generation: u32, }; pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index { @@ -6249,7 +6253,6 @@ pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) generic_owner, func_index, func_extra_index, - arg.generation, func_ty, arg.section, ); @@ -6381,7 +6384,6 @@ pub fn getFuncInstanceIes( generic_owner, func_index, func_extra_index, - arg.generation, func_ty, arg.section, ); @@ -6393,7 +6395,6 @@ fn finishFuncInstance( generic_owner: Index, func_index: Index, func_extra_index: u32, - generation: u32, func_ty: Index, section: OptionalNullTerminatedString, ) Allocator.Error!Index { @@ -6413,7 +6414,6 @@ fn finishFuncInstance( .analysis = .complete, .zir_decl_index = fn_owner_decl.zir_decl_index, .src_scope = fn_owner_decl.src_scope, - .generation = generation, .is_pub = fn_owner_decl.is_pub, .is_exported = fn_owner_decl.is_exported, .alive = true, diff --git a/src/Module.zig b/src/Module.zig index 7b592ec204..f6f660e70e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -144,11 +144,6 @@ global_error_set: GlobalErrorSet = .{}, /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, -/// Incrementing integer used to compare against the corresponding Decl -/// field to determine whether a Decl's status applies to an ongoing update, or a -/// previous analysis. -generation: u32 = 0, - /// Value is the number of PO or outdated Decls which this Depender depends on. potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{}, /// Value is the number of PO or outdated Decls which this Depender depends on. @@ -164,6 +159,11 @@ outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.Depender, void) = .{}, /// (only the namespace might change). If such a Decl is also `outdated`, the /// struct type index must be recreated. outdated_file_root: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, +/// This contains a list of Dependers whose analysis or codegen failed, but the +/// failure was something like running out of disk space, and trying again may +/// succeed. On the next update, we will flush this list, marking all members of +/// it as outdated. +retryable_failures: std.ArrayListUnmanaged(InternPool.Depender) = .{}, stage1_flags: packed struct { have_winmain: bool = false, @@ -380,21 +380,14 @@ pub const Decl = struct { alignment: Alignment, /// Populated when `has_tv`. @"addrspace": std.builtin.AddressSpace, - /// The direct parent namespace of the Decl. - /// Reference to externally owned memory. - /// In the case of the Decl corresponding to a file, this is - /// the namespace of the struct, since there is no parent. + /// The direct parent namespace of the Decl. In the case of the Decl + /// corresponding to a file, this is the namespace of the struct, since + /// there is no parent. src_namespace: Namespace.Index, - /// The scope which lexically contains this decl. A decl must depend - /// on its lexical parent, in order to ensure that this pointer is valid. - /// This scope is allocated out of the arena of the parent decl. + /// The scope which lexically contains this decl. src_scope: CaptureScope.Index, - /// An integer that can be checked against the corresponding incrementing - /// generation field of Module. This is used to determine whether `complete` status - /// represents pre- or post- re-analysis. - generation: u32, /// The AST node index of this declaration. /// Must be recomputed when the corresponding source file is modified. src_node: Ast.Node.Index, @@ -420,26 +413,19 @@ pub const Decl = struct { /// The file corresponding to this Decl had a parse error or ZIR error. /// There will be a corresponding ErrorMsg in Module.failed_files. file_failure, - /// This Decl might be OK but it depends on another one which did not successfully complete - /// semantic analysis. + /// This Decl might be OK but it depends on another one which did not + /// successfully complete semantic analysis. dependency_failure, /// Semantic analysis failure. /// There will be a corresponding ErrorMsg in Module.failed_decls. sema_failure, /// There will be a corresponding ErrorMsg in Module.failed_decls. - /// This indicates the failure was something like running out of disk space, - /// and attempting semantic analysis again may succeed. - sema_failure_retryable, - /// There will be a corresponding ErrorMsg in Module.failed_decls. - liveness_failure, - /// There will be a corresponding ErrorMsg in Module.failed_decls. codegen_failure, - /// There will be a corresponding ErrorMsg in Module.failed_decls. - /// This indicates the failure was something like running out of disk space, - /// and attempting codegen again may succeed. - codegen_failure_retryable, - /// Sematic analysis of this Decl has succeeded. However, the Decl may - /// be outdated due to an incomplete update! + /// Sematic analysis and constant value codegen of this Decl has + /// succeeded. However, the Decl may be outdated due to an in-progress + /// update. Note that for a function, this does not mean codegen of the + /// function body succeded: that state is indicated by the function's + /// `analysis` field. complete, }, /// Whether `typed_value`, `align`, `linksection` and `addrspace` are populated. @@ -2495,6 +2481,7 @@ pub fn deinit(zcu: *Zcu) void { zcu.outdated.deinit(gpa); zcu.outdated_ready.deinit(gpa); zcu.outdated_file_root.deinit(gpa); + zcu.retryable_failures.deinit(gpa); zcu.test_functions.deinit(gpa); @@ -3257,6 +3244,29 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { return InternPool.Depender.wrap(.{ .decl = chosen_decl_idx.? }); } +/// During an incremental update, before semantic analysis, call this to flush all values from +/// `retryable_failures` and mark them as outdated so they get re-analyzed. +pub fn flushRetryableFailures(zcu: *Zcu) !void { + const gpa = zcu.gpa; + for (zcu.retryable_failures.items) |depender| { + if (zcu.outdated.contains(depender)) continue; + if (zcu.potentially_outdated.fetchSwapRemove(depender)) |kv| { + // This Depender was already PO, but we now consider it outdated. + // Any transitive dependencies are already marked PO. + try zcu.outdated.put(gpa, depender, kv.value); + continue; + } + // This Depender was not marked PO, but is now outdated. Mark it as + // such, then recursively mark transitive dependencies as PO. + try zcu.outdated.put(gpa, depender, 0); + switch (depender.unwrap()) { + .decl => |decl| try zcu.markDeclDependenciesPotentiallyOutdated(decl), + .func => {}, + } + } + zcu.retryable_failures.clearRetainingCapacity(); +} + pub fn mapOldZirToNew( gpa: Allocator, old_zir: Zir, @@ -3415,15 +3425,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { switch (decl.analysis) { .in_progress => unreachable, - .file_failure, - .liveness_failure, - .codegen_failure, - .codegen_failure_retryable, - .dependency_failure, - => return error.AnalysisFail, + .file_failure => return error.AnalysisFail, .sema_failure, - .sema_failure_retryable, + .dependency_failure, + .codegen_failure, => if (!was_outdated) return error.AnalysisFail, .complete => if (!was_outdated) return, @@ -3434,6 +3440,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { if (was_outdated) { // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. + if (build_options.only_c) unreachable; try mod.deleteDeclExports(decl_index); } @@ -3463,8 +3470,9 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, else => |e| { - decl.analysis = .sema_failure_retryable; + decl.analysis = .sema_failure; try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); + try mod.retryable_failures.append(mod.gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, decl.srcLoc(mod), @@ -3504,15 +3512,14 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError .unreferenced => unreachable, .in_progress => unreachable, + .codegen_failure => unreachable, // functions do not perform constant value generation + .file_failure, .sema_failure, - .liveness_failure, - .codegen_failure, .dependency_failure, - .sema_failure_retryable, => return error.AnalysisFail, - .complete, .codegen_failure_retryable => {}, + .complete => {}, } const func_as_depender = InternPool.Depender.wrap(.{ .func = func_index }); @@ -3524,11 +3531,14 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError } switch (func.analysis(ip).state) { - .sema_failure, .dependency_failure => if (!was_outdated) return error.AnalysisFail, + .success, + .sema_failure, + .dependency_failure, + .codegen_failure, + => if (!was_outdated) return error.AnalysisFail, .none, .queued => {}, .in_progress => unreachable, .inline_only => unreachable, // don't queue work for this - .success => if (!was_outdated) return, } const gpa = zcu.gpa; @@ -3592,8 +3602,8 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError .{@errorName(err)}, ), ); - decl.analysis = .liveness_failure; - return error.AnalysisFail; + func.analysis(ip).state = .codegen_failure; + return; }, }; } @@ -3602,7 +3612,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - decl.analysis = .codegen_failure; + func.analysis(ip).state = .codegen_failure; }, else => { try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); @@ -3612,7 +3622,8 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError "unable to codegen: {s}", .{@errorName(err)}, )); - decl.analysis = .codegen_failure_retryable; + func.analysis(ip).state = .codegen_failure; + try zcu.retryable_failures.append(zcu.gpa, InternPool.Depender.wrap(.{ .func = func_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { @@ -3620,7 +3631,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError llvm_object.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - decl.analysis = .codegen_failure; + func.analysis(ip).state = .codegen_failure; }, }; } @@ -3645,14 +3656,11 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) .file_failure, .sema_failure, - .liveness_failure, .codegen_failure, .dependency_failure, - .sema_failure_retryable, - .codegen_failure_retryable, - // The function analysis failed, but we've already emitted an error for - // that. The callee doesn't need the function to be analyzed right now, - // so its analysis can safely continue. + // Analysis of the function Decl itself failed, but we've already + // emitted an error for that. The callee doesn't need the function to be + // analyzed right now, so its analysis can safely continue. => return, .complete => {}, @@ -3660,14 +3668,21 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) assert(decl.has_tv); + const func_as_depender = InternPool.Depender.wrap(.{ .func = func_index }); + const is_outdated = mod.outdated.contains(func_as_depender) or + mod.potentially_outdated.contains(func_as_depender); + switch (func.analysis(ip).state) { .none => {}, .queued => return, // As above, we don't need to forward errors here. - .sema_failure, .dependency_failure => return, + .sema_failure, + .dependency_failure, + .codegen_failure, + .success, + => if (!is_outdated) return, .in_progress => return, .inline_only => unreachable, // don't queue work for this - .success => return, } // Decl itself is safely analyzed, and body analysis is not yet queued @@ -3727,7 +3742,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { new_decl.@"linksection" = .none; new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive. new_decl.analysis = .in_progress; - new_decl.generation = mod.generation; if (file.status != .success_zir) { new_decl.analysis = .file_failure; @@ -3966,7 +3980,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { decl.has_tv = true; decl.owns_tv = false; decl.analysis = .complete; - decl.generation = mod.generation; // TODO: usingnamespace cannot currently participate in incremental compilation return .{ @@ -3997,7 +4010,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { decl.has_tv = true; decl.owns_tv = owns_tv; decl.analysis = .complete; - decl.generation = mod.generation; const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; if (decl.is_exported) { @@ -4094,7 +4106,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { }; decl.has_tv = true; decl.analysis = .complete; - decl.generation = mod.generation; const result: SemaDeclResult = if (old_has_tv) .{ .invalidate_decl_val = !decl.ty.eql(old_ty, mod) or !decl.val.eql(old_val, decl.ty, mod), @@ -5005,7 +5016,6 @@ pub fn allocateNewDecl( .analysis = .unreferenced, .zir_decl_index = .none, .src_scope = src_scope, - .generation = 0, .is_pub = false, .is_exported = false, .alive = false, @@ -5083,7 +5093,6 @@ pub fn initNewAnonDecl( new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; - new_decl.generation = mod.generation; } pub fn errNoteNonLazy( @@ -5745,7 +5754,8 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { "unable to codegen: {s}", .{@errorName(err)}, )); - decl.analysis = .codegen_failure_retryable; + decl.analysis = .codegen_failure; + try zcu.retryable_failures.append(zcu.gpa, InternPool.Depender.wrap(.{ .decl = decl_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { diff --git a/src/Sema.zig b/src/Sema.zig index 0f604d2236..df77862311 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2583,7 +2583,6 @@ fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; } else { sema.owner_decl.analysis = .sema_failure; - sema.owner_decl.generation = mod.generation; } if (sema.func_index != .none) { ip.funcAnalysis(sema.func_index).state = .sema_failure; @@ -9468,7 +9467,6 @@ fn funcCommon( .inferred_error_set = inferred_error_set, .generic_owner = sema.generic_owner, .comptime_args = sema.comptime_args, - .generation = mod.generation, }); return finishFunc( sema, @@ -25957,7 +25955,6 @@ fn zirBuiltinExtern( new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.analysis = .complete; - new_decl.generation = mod.generation; try sema.ensureDeclAnalyzed(new_decl_index); @@ -36215,10 +36212,8 @@ pub fn resolveTypeFieldsStruct( .file_failure, .dependency_failure, .sema_failure, - .sema_failure_retryable, => { sema.owner_decl.analysis = .dependency_failure; - sema.owner_decl.generation = mod.generation; return error.AnalysisFail; }, else => {}, @@ -36274,10 +36269,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key. .file_failure, .dependency_failure, .sema_failure, - .sema_failure_retryable, => { sema.owner_decl.analysis = .dependency_failure; - sema.owner_decl.generation = mod.generation; return error.AnalysisFail; }, else => {}, -- cgit v1.2.3 From 0784d389844a127248bb724352ce7101bc49784c Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 4 Feb 2024 17:46:04 +0000 Subject: compiler: lock incremental dependency tracking behind --debug-incremental This logic (currently) has a non-trivial cost (particularly in terms of peak RSS) for tracking dependencies. Until incremental compilation is in use in the wild, it doesn't make sense for users to pay that cost. --- src/Compilation.zig | 3 +++ src/Module.zig | 2 ++ src/Sema.zig | 51 ++++++++++++++++++++++++++++++--------------------- src/main.zig | 1 + 4 files changed, 36 insertions(+), 21 deletions(-) (limited to 'src/Module.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 596b8d5667..1d83a7d3ec 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -156,6 +156,7 @@ time_report: bool, stack_report: bool, debug_compiler_runtime_libs: bool, debug_compile_errors: bool, +debug_incremental: bool, job_queued_compiler_rt_lib: bool = false, job_queued_compiler_rt_obj: bool = false, job_queued_update_builtin_zig: bool, @@ -1079,6 +1080,7 @@ pub const CreateOptions = struct { verbose_llvm_cpu_features: bool = false, debug_compiler_runtime_libs: bool = false, debug_compile_errors: bool = false, + debug_incremental: bool = false, /// Normally when you create a `Compilation`, Zig will automatically build /// and link in required dependencies, such as compiler-rt and libc. When /// building such dependencies themselves, this flag must be set to avoid @@ -1508,6 +1510,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .test_name_prefix = options.test_name_prefix, .debug_compiler_runtime_libs = options.debug_compiler_runtime_libs, .debug_compile_errors = options.debug_compile_errors, + .debug_incremental = options.debug_incremental, .libcxx_abi_version = options.libcxx_abi_version, .root_name = root_name, .sysroot = sysroot, diff --git a/src/Module.zig b/src/Module.zig index f6f660e70e..df3a999fc0 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -3139,6 +3139,8 @@ fn markDeclDependenciesPotentiallyOutdated(zcu: *Zcu, decl_index: Decl.Index) !v } pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender { + if (!zcu.comp.debug_incremental) return null; + if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { log.debug("findOutdatedToAnalyze: no outdated depender", .{}); return null; diff --git a/src/Sema.zig b/src/Sema.zig index df77862311..4b641e141d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2788,11 +2788,13 @@ fn zirStructDecl( new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - try ip.addDependency( - sema.gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) }, - ); + if (sema.mod.comp.debug_incremental) { + try ip.addDependency( + sema.gpa, + InternPool.Depender.wrap(.{ .decl = new_decl_index }), + .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) }, + ); + } const new_namespace_index = try mod.createNamespace(.{ .parent = block.namespace.toOptional(), @@ -2978,11 +2980,13 @@ fn zirEnumDecl( new_decl.owns_tv = true; errdefer if (!done) mod.abortAnonDecl(new_decl_index); - try mod.intern_pool.addDependency( - sema.gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, - ); + if (sema.mod.comp.debug_incremental) { + try mod.intern_pool.addDependency( + sema.gpa, + InternPool.Depender.wrap(.{ .decl = new_decl_index }), + .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, + ); + } const new_namespace_index = try mod.createNamespace(.{ .parent = block.namespace.toOptional(), @@ -3237,11 +3241,13 @@ fn zirUnionDecl( new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - try mod.intern_pool.addDependency( - sema.gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, - ); + if (sema.mod.comp.debug_incremental) { + try mod.intern_pool.addDependency( + sema.gpa, + InternPool.Depender.wrap(.{ .decl = new_decl_index }), + .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, + ); + } const new_namespace_index = try mod.createNamespace(.{ .parent = block.namespace.toOptional(), @@ -3336,11 +3342,13 @@ fn zirOpaqueDecl( new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - try mod.intern_pool.addDependency( - sema.gpa, - InternPool.Depender.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, - ); + if (sema.mod.comp.debug_incremental) { + try mod.intern_pool.addDependency( + sema.gpa, + InternPool.Depender.wrap(.{ .decl = new_decl_index }), + .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, + ); + } const new_namespace_index = try mod.createNamespace(.{ .parent = block.namespace.toOptional(), @@ -32435,7 +32443,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - // TODO: if this is a `decl_ref`, only depend on decl type + // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type try sema.declareDependency(.{ .decl_val = decl_index }); const ptr_ty = try sema.ptrType(.{ .child = decl_tv.ty.toIntern(), @@ -38925,6 +38933,7 @@ fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type { } pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { + if (!sema.mod.comp.debug_incremental) return; const depender = InternPool.Depender.wrap( if (sema.owner_func_index != .none) .{ .func = sema.owner_func_index } diff --git a/src/main.zig b/src/main.zig index a1b4a098db..466c6dd697 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3255,6 +3255,7 @@ fn buildOutputType( .cache_mode = cache_mode, .subsystem = subsystem, .debug_compile_errors = debug_compile_errors, + .debug_incremental = debug_incremental, .enable_link_snapshots = enable_link_snapshots, .install_name = install_name, .entitlements = entitlements, -- cgit v1.2.3