aboutsummaryrefslogtreecommitdiff
path: root/src-self-hosted/Module.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2020-05-28 22:45:59 -0400
committerAndrew Kelley <andrew@ziglang.org>2020-05-28 22:45:59 -0400
commitdadc4327e109e1cb13ae7e6132db586f5f849396 (patch)
treeb83c3d6b74fe3f299de9a1869e4b64e614d2fa5e /src-self-hosted/Module.zig
parent2ae9e06363b294f60b62e765d31fce4417e14a9d (diff)
parent5d77fede896ff7446e9946f91c74b7632b63253e (diff)
downloadzig-dadc4327e109e1cb13ae7e6132db586f5f849396.tar.gz
zig-dadc4327e109e1cb13ae7e6132db586f5f849396.zip
Merge branch 'stage2-vaddr-alloc'
Diffstat (limited to 'src-self-hosted/Module.zig')
-rw-r--r--src-self-hosted/Module.zig480
1 files changed, 367 insertions, 113 deletions
diff --git a/src-self-hosted/Module.zig b/src-self-hosted/Module.zig
index da826a6e95..972aa649f3 100644
--- a/src-self-hosted/Module.zig
+++ b/src-self-hosted/Module.zig
@@ -55,9 +55,20 @@ failed_files: std.AutoHashMap(*Scope.ZIRModule, *ErrorMsg),
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
failed_exports: std.AutoHashMap(*Export, *ErrorMsg),
+/// Incrementing integer used to compare against the corresponding Decl
+/// field to determine whether a Decl's status applies to an ongoing update, or a
+/// previous analysis.
+generation: u32 = 0,
+
+/// Candidates for deletion. After a semantic analysis update completes, this list
+/// contains Decls that need to be deleted if they end up having no references to them.
+deletion_set: std.ArrayListUnmanaged(*Decl) = std.ArrayListUnmanaged(*Decl){},
+
pub const WorkItem = union(enum) {
/// Write the machine code for a Decl to the output file.
codegen_decl: *Decl,
+ /// Decl has been determined to be outdated; perform semantic analysis again.
+ re_analyze_decl: *Decl,
};
pub const Export = struct {
@@ -68,6 +79,8 @@ pub const Export = struct {
link: link.ElfFile.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: *Decl,
+ /// The Decl being exported. Note this is *not* the Decl performing the export.
+ exported_decl: *Decl,
status: enum {
in_progress,
failed,
@@ -94,8 +107,7 @@ pub const Decl = struct {
/// This is the base offset that src offsets within this Decl are relative to.
src: usize,
/// The most recent value of the Decl after a successful semantic analysis.
- /// The tag for this union is determined by the tag value of the analysis field.
- typed_value: union {
+ typed_value: union(enum) {
never_succeeded: void,
most_recent: TypedValue.Managed,
},
@@ -104,50 +116,56 @@ pub const Decl = struct {
/// analysis of the function body is performed with this value set to `success`. Functions
/// have their own analysis status field.
analysis: enum {
- initial_in_progress,
+ /// Semantic analysis for this Decl is running right now. This state detects dependency loops.
+ in_progress,
/// This Decl might be OK but it depends on another one which did not successfully complete
- /// semantic analysis. This Decl never had a value computed.
- initial_dependency_failure,
- /// Semantic analysis failure. This Decl never had a value computed.
+ /// semantic analysis.
+ dependency_failure,
+ /// Semantic analysis failure.
/// There will be a corresponding ErrorMsg in Module.failed_decls.
- initial_sema_failure,
- /// In this case the `typed_value.most_recent` can still be accessed.
+ sema_failure,
/// There will be a corresponding ErrorMsg in Module.failed_decls.
codegen_failure,
- /// In this case the `typed_value.most_recent` can still be accessed.
/// There will be a corresponding ErrorMsg in Module.failed_decls.
/// This indicates the failure was something like running out of disk space,
/// and attempting codegen again may succeed.
codegen_failure_retryable,
- /// This Decl might be OK but it depends on another one which did not successfully complete
- /// semantic analysis. There is a most recent value available.
- repeat_dependency_failure,
- /// Semantic anlaysis failure, but the `typed_value.most_recent` can be accessed.
- /// There will be a corresponding ErrorMsg in Module.failed_decls.
- repeat_sema_failure,
- /// Completed successfully before; the `typed_value.most_recent` can be accessed, and
- /// new semantic analysis is in progress.
- repeat_in_progress,
- /// Everything is done and updated.
+ /// Everything is done. During an update, this Decl may be out of date, depending
+ /// on its dependencies. The `generation` field can be used to determine if this
+ /// completion status occurred before or after a given update.
complete,
+ /// A Module update is in progress, and this Decl has been flagged as being known
+ /// to require re-analysis.
+ outdated,
},
+ /// This flag is set when this Decl is added to a check_for_deletion set, and cleared
+ /// when removed.
+ deletion_flag: bool,
+ /// An integer that can be checked against the corresponding incrementing
+ /// generation field of Module. This is used to determine whether `complete` status
+ /// represents pre- or post- re-analysis.
+ generation: u32,
/// Represents the position of the code in the output file.
/// This is populated regardless of semantic analysis and code generation.
- link: link.ElfFile.Decl = link.ElfFile.Decl.empty,
+ link: link.ElfFile.TextBlock = link.ElfFile.TextBlock.empty,
+
+ contents_hash: Hash,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
- /// TODO look into using a lightweight map/set data structure rather than a linear array.
dependants: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
-
- contents_hash: Hash,
+ /// The shallow set of other decls whose typed_value changing indicates that this Decl's
+ /// typed_value may need to be regenerated.
+ dependencies: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
pub fn destroy(self: *Decl, allocator: *Allocator) void {
allocator.free(mem.spanZ(self.name));
if (self.typedValueManaged()) |tvm| {
tvm.deinit(allocator);
}
+ self.dependants.deinit(allocator);
+ self.dependencies.deinit(allocator);
allocator.destroy(self);
}
@@ -172,7 +190,7 @@ pub const Decl = struct {
pub fn fullyQualifiedNameHash(self: Decl) Hash {
// Right now we only have ZIRModule as the source. So this is simply the
// relative name of the decl.
- return hashSimpleName(mem.spanZ(u8, self.name));
+ return hashSimpleName(mem.spanZ(self.name));
}
pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue {
@@ -200,20 +218,31 @@ pub const Decl = struct {
}
fn typedValueManaged(self: *Decl) ?*TypedValue.Managed {
- switch (self.analysis) {
- .initial_in_progress,
- .initial_dependency_failure,
- .initial_sema_failure,
- => return null,
- .codegen_failure,
- .codegen_failure_retryable,
- .repeat_dependency_failure,
- .repeat_sema_failure,
- .repeat_in_progress,
- .complete,
- => return &self.typed_value.most_recent,
+ switch (self.typed_value) {
+ .most_recent => |*x| return x,
+ .never_succeeded => return null,
}
}
+
+ fn removeDependant(self: *Decl, other: *Decl) void {
+ for (self.dependants.items) |item, i| {
+ if (item == other) {
+ _ = self.dependants.swapRemove(i);
+ return;
+ }
+ }
+ unreachable;
+ }
+
+ fn removeDependency(self: *Decl, other: *Decl) void {
+ for (self.dependencies.items) |item, i| {
+ if (item == other) {
+ _ = self.dependencies.swapRemove(i);
+ return;
+ }
+ }
+ unreachable;
+ }
};
/// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
@@ -266,12 +295,12 @@ pub const Scope = struct {
/// Asserts the scope has a parent which is a DeclAnalysis and
/// returns the Decl.
- pub fn decl(self: *Scope) *Decl {
- switch (self.tag) {
- .block => return self.cast(Block).?.decl,
- .decl => return self.cast(DeclAnalysis).?.decl,
- .zir_module => unreachable,
- }
+ pub fn decl(self: *Scope) ?*Decl {
+ return switch (self.tag) {
+ .block => self.cast(Block).?.decl,
+ .decl => self.cast(DeclAnalysis).?.decl,
+ .zir_module => null,
+ };
}
/// Asserts the scope has a parent which is a ZIRModule and
@@ -477,6 +506,7 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
pub fn deinit(self: *Module) void {
self.bin_file.deinit();
const allocator = self.allocator;
+ self.deletion_set.deinit(allocator);
self.work_queue.deinit();
{
var it = self.decl_table.iterator();
@@ -517,11 +547,7 @@ pub fn deinit(self: *Module) void {
{
var it = self.export_owners.iterator();
while (it.next()) |kv| {
- const export_list = kv.value;
- for (export_list) |exp| {
- allocator.destroy(exp);
- }
- allocator.free(export_list);
+ freeExportList(allocator, kv.value);
}
self.export_owners.deinit();
}
@@ -532,12 +558,21 @@ pub fn deinit(self: *Module) void {
self.* = undefined;
}
+fn freeExportList(allocator: *Allocator, export_list: []*Export) void {
+ for (export_list) |exp| {
+ allocator.destroy(exp);
+ }
+ allocator.free(export_list);
+}
+
pub fn target(self: Module) std.Target {
return self.bin_file.options.target;
}
/// Detect changes to source files, perform semantic analysis, and update the output files.
pub fn update(self: *Module) !void {
+ self.generation += 1;
+
// TODO Use the cache hash file system to detect which source files changed.
// Here we simulate a full cache miss.
// Analyze the root source file now.
@@ -550,6 +585,15 @@ pub fn update(self: *Module) !void {
try self.performAllTheWork();
+ // Process the deletion set.
+ while (self.deletion_set.popOrNull()) |decl| {
+ if (decl.dependants.items.len != 0) {
+ decl.deletion_flag = false;
+ continue;
+ }
+ try self.deleteDecl(decl);
+ }
+
// Unload all the source files from memory.
self.root_scope.unload(self.allocator);
@@ -634,15 +678,12 @@ const InnerError = error{ OutOfMemory, AnalysisFail };
pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
while (self.work_queue.readItem()) |work_item| switch (work_item) {
.codegen_decl => |decl| switch (decl.analysis) {
- .initial_in_progress,
- .repeat_in_progress,
- => unreachable,
+ .in_progress => unreachable,
+ .outdated => unreachable,
- .initial_sema_failure,
- .repeat_sema_failure,
+ .sema_failure,
.codegen_failure,
- .initial_dependency_failure,
- .repeat_dependency_failure,
+ .dependency_failure,
=> continue,
.complete, .codegen_failure_retryable => {
@@ -668,7 +709,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
self.bin_file.updateDecl(self, decl) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
- decl.analysis = .repeat_dependency_failure;
+ decl.analysis = .dependency_failure;
},
else => {
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
@@ -683,9 +724,60 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
};
},
},
+ .re_analyze_decl => |decl| switch (decl.analysis) {
+ .in_progress => unreachable,
+
+ .sema_failure,
+ .codegen_failure,
+ .dependency_failure,
+ .complete,
+ .codegen_failure_retryable,
+ => continue,
+
+ .outdated => {
+ const zir_module = self.getSrcModule(decl.scope) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => {
+ try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
+ self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
+ self.allocator,
+ decl.src,
+ "unable to load source file '{}': {}",
+ .{decl.scope.sub_file_path, @errorName(err)},
+ ));
+ decl.analysis = .codegen_failure_retryable;
+ continue;
+ },
+ };
+ const decl_name = mem.spanZ(decl.name);
+ // We already detected deletions, so we know this will be found.
+ const src_decl = zir_module.findDecl(decl_name).?;
+ self.reAnalyzeDecl(decl, src_decl) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => continue,
+ };
+ }
+ },
};
}
+fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void {
+ try depender.dependencies.ensureCapacity(self.allocator, depender.dependencies.items.len + 1);
+ try dependee.dependants.ensureCapacity(self.allocator, dependee.dependants.items.len + 1);
+
+ for (depender.dependencies.items) |item| {
+ if (item == dependee) break; // Already in the set.
+ } else {
+ depender.dependencies.appendAssumeCapacity(dependee);
+ }
+
+ for (dependee.dependants.items) |item| {
+ if (item == depender) break; // Already in the set.
+ } else {
+ dependee.dependants.appendAssumeCapacity(depender);
+ }
+}
+
fn getSource(self: *Module, root_scope: *Scope.ZIRModule) ![:0]const u8 {
switch (root_scope.source) {
.unloaded => {
@@ -742,13 +834,6 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
}
fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
- // TODO use the cache to identify, from the modified source files, the decls which have
- // changed based on the span of memory that represents the decl in the re-parsed source file.
- // Use the cached dependency graph to recursively determine the set of decls which need
- // regeneration.
- // Here we simulate adding a source file which was previously not part of the compilation,
- // which means scanning the decls looking for exports.
- // TODO also identify decls that need to be deleted.
switch (root_scope.status) {
.never_loaded => {
const src_module = try self.getSrcModule(root_scope);
@@ -759,7 +844,7 @@ fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
for (src_module.decls) |decl| {
if (decl.cast(zir.Inst.Export)) |export_inst| {
- _ = try self.resolveDecl(&root_scope.base, &export_inst.base, link.ElfFile.Decl.empty);
+ _ = try self.resolveDecl(&root_scope.base, &export_inst.base);
}
}
},
@@ -772,41 +857,112 @@ fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
=> {
const src_module = try self.getSrcModule(root_scope);
- // Look for changed decls.
+ var exports_to_resolve = std.ArrayList(*zir.Inst).init(self.allocator);
+ defer exports_to_resolve.deinit();
+
+ // Keep track of the decls that we expect to see in this file so that
+ // we know which ones have been deleted.
+ var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
+ defer deleted_decls.deinit();
+ try deleted_decls.ensureCapacity(self.decl_table.size);
+ {
+ var it = self.decl_table.iterator();
+ while (it.next()) |kv| {
+ deleted_decls.putAssumeCapacityNoClobber(kv.value, {});
+ }
+ }
+
for (src_module.decls) |src_decl| {
const name_hash = Decl.hashSimpleName(src_decl.name);
if (self.decl_table.get(name_hash)) |kv| {
const decl = kv.value;
+ deleted_decls.removeAssertDiscard(decl);
const new_contents_hash = Decl.hashSimpleName(src_decl.contents);
if (!mem.eql(u8, &new_contents_hash, &decl.contents_hash)) {
- // TODO recursive dependency management
- //std.debug.warn("noticed that '{}' changed\n", .{src_decl.name});
- self.decl_table.removeAssertDiscard(name_hash);
- const saved_link = decl.link;
- decl.destroy(self.allocator);
- if (self.export_owners.getValue(decl)) |exports| {
- @panic("TODO handle updating a decl that does an export");
- }
- const new_decl = self.resolveDecl(
- &root_scope.base,
- src_decl,
- saved_link,
- ) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => continue,
- };
- if (self.decl_exports.remove(decl)) |entry| {
- self.decl_exports.putAssumeCapacityNoClobber(new_decl, entry.value);
- }
+ //std.debug.warn("noticed '{}' source changed\n", .{src_decl.name});
+ decl.analysis = .outdated;
+ decl.contents_hash = new_contents_hash;
+ try self.work_queue.writeItem(.{ .re_analyze_decl = decl });
}
} else if (src_decl.cast(zir.Inst.Export)) |export_inst| {
- _ = try self.resolveDecl(&root_scope.base, &export_inst.base, link.ElfFile.Decl.empty);
+ try exports_to_resolve.append(&export_inst.base);
+ }
+ }
+ {
+ // Handle explicitly deleted decls from the source code. Not to be confused
+ // with when we delete decls because they are no longer referenced.
+ var it = deleted_decls.iterator();
+ while (it.next()) |kv| {
+ //std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name});
+ try self.deleteDecl(kv.key);
}
}
+ for (exports_to_resolve.items) |export_inst| {
+ _ = try self.resolveDecl(&root_scope.base, export_inst);
+ }
},
}
}
+fn deleteDecl(self: *Module, decl: *Decl) !void {
+ //std.debug.warn("deleting decl '{}'\n", .{decl.name});
+ const name_hash = decl.fullyQualifiedNameHash();
+ self.decl_table.removeAssertDiscard(name_hash);
+ // Remove itself from its dependencies, because we are about to destroy the decl pointer.
+ for (decl.dependencies.items) |dep| {
+ dep.removeDependant(decl);
+ if (dep.dependants.items.len == 0) {
+ // We don't recursively perform a deletion here, because during the update,
+ // another reference to it may turn up.
+ assert(!dep.deletion_flag);
+ dep.deletion_flag = true;
+ try self.deletion_set.append(self.allocator, dep);
+ }
+ }
+ // Anything that depends on this deleted decl certainly needs to be re-analyzed.
+ for (decl.dependants.items) |dep| {
+ dep.removeDependency(decl);
+ if (dep.analysis != .outdated) {
+ dep.analysis = .outdated;
+ try self.work_queue.writeItem(.{ .re_analyze_decl = dep });
+ }
+ }
+ self.deleteDeclExports(decl);
+ self.bin_file.freeDecl(decl);
+ decl.destroy(self.allocator);
+}
+
+/// Delete all the Export objects that are caused by this Decl. Re-analysis of
+/// this Decl will cause them to be re-created (or not).
+fn deleteDeclExports(self: *Module, decl: *Decl) void {
+ const kv = self.export_owners.remove(decl) orelse return;
+
+ for (kv.value) |exp| {
+ if (self.decl_exports.get(exp.exported_decl)) |decl_exports_kv| {
+ // Remove exports with owner_decl matching the regenerating decl.
+ const list = decl_exports_kv.value;
+ var i: usize = 0;
+ var new_len = list.len;
+ while (i < new_len) {
+ if (list[i].owner_decl == decl) {
+ mem.copyBackwards(*Export, list[i..], list[i + 1..new_len]);
+ new_len -= 1;
+ } else {
+ i += 1;
+ }
+ }
+ decl_exports_kv.value = self.allocator.shrink(list, new_len);
+ if (new_len == 0) {
+ self.decl_exports.removeAssertDiscard(exp.exported_decl);
+ }
+ }
+
+ self.bin_file.deleteExport(exp.link);
+ self.allocator.destroy(exp);
+ }
+ self.allocator.free(kv.value);
+}
+
fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
// Use the Decl's arena for function memory.
var arena = decl.typed_value.most_recent.arena.?.promote(self.allocator);
@@ -836,15 +992,111 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
};
}
-fn resolveDecl(
- self: *Module,
- scope: *Scope,
- old_inst: *zir.Inst,
- bin_file_link: link.ElfFile.Decl,
-) InnerError!*Decl {
+fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!void {
+ switch (decl.analysis) {
+ .in_progress => unreachable,
+ .dependency_failure,
+ .sema_failure,
+ .codegen_failure,
+ .codegen_failure_retryable,
+ .complete,
+ => return,
+
+ .outdated => {}, // Decl re-analysis
+ }
+ //std.debug.warn("re-analyzing {}\n", .{decl.name});
+ decl.src = old_inst.src;
+
+ // The exports this Decl performs will be re-discovered, so we remove them here
+ // prior to re-analysis.
+ self.deleteDeclExports(decl);
+ // Dependencies will be re-discovered, so we remove them here prior to re-analysis.
+ for (decl.dependencies.items) |dep| {
+ dep.removeDependant(decl);
+ if (dep.dependants.items.len == 0) {
+ // We don't perform a deletion here, because this Decl or another one
+ // may end up referencing it before the update is complete.
+ assert(!dep.deletion_flag);
+ dep.deletion_flag = true;
+ try self.deletion_set.append(self.allocator, dep);
+ }
+ }
+ decl.dependencies.shrink(self.allocator, 0);
+ var decl_scope: Scope.DeclAnalysis = .{
+ .decl = decl,
+ .arena = std.heap.ArenaAllocator.init(self.allocator),
+ };
+ errdefer decl_scope.arena.deinit();
+
+ const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => {
+ switch (decl.analysis) {
+ .in_progress => decl.analysis = .dependency_failure,
+ else => {},
+ }
+ decl.generation = self.generation;
+ return error.AnalysisFail;
+ },
+ };
+ const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State);
+ arena_state.* = decl_scope.arena.state;
+
+ var prev_type_has_bits = false;
+ var type_changed = true;
+
+ if (decl.typedValueManaged()) |tvm| {
+ prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits();
+ type_changed = !tvm.typed_value.ty.eql(typed_value.ty);
+
+ tvm.deinit(self.allocator);
+ }
+ decl.typed_value = .{
+ .most_recent = .{
+ .typed_value = typed_value,
+ .arena = arena_state,
+ },
+ };
+ decl.analysis = .complete;
+ decl.generation = self.generation;
+ if (typed_value.ty.hasCodeGenBits()) {
+ // We don't fully codegen the decl until later, but we do need to reserve a global
+ // offset table index for it. This allows us to codegen decls out of dependency order,
+ // increasing how many computations can be done in parallel.
+ try self.bin_file.allocateDeclIndexes(decl);
+ try self.work_queue.writeItem(.{ .codegen_decl = decl });
+ } else if (prev_type_has_bits) {
+ self.bin_file.freeDecl(decl);
+ }
+
+ // If the decl is a function, and the type is the same, we do not need
+ // to chase the dependants.
+ if (type_changed or typed_value.val.tag() != .function) {
+ for (decl.dependants.items) |dep| {
+ switch (dep.analysis) {
+ .in_progress => unreachable,
+ .outdated => continue, // already queued for update
+
+ .dependency_failure,
+ .sema_failure,
+ .codegen_failure,
+ .codegen_failure_retryable,
+ .complete,
+ => if (dep.generation != self.generation) {
+ dep.analysis = .outdated;
+ try self.work_queue.writeItem(.{ .re_analyze_decl = dep });
+ },
+ }
+ }
+ }
+}
+
+fn resolveDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl {
const hash = Decl.hashSimpleName(old_inst.name);
if (self.decl_table.get(hash)) |kv| {
- return kv.value;
+ const decl = kv.value;
+ try self.reAnalyzeDecl(decl, old_inst);
+ return decl;
} else {
const new_decl = blk: {
try self.decl_table.ensureCapacity(self.decl_table.size + 1);
@@ -857,9 +1109,11 @@ fn resolveDecl(
.scope = scope.namespace(),
.src = old_inst.src,
.typed_value = .{ .never_succeeded = {} },
- .analysis = .initial_in_progress,
+ .analysis = .in_progress,
+ .deletion_flag = false,
.contents_hash = Decl.hashSimpleName(old_inst.contents),
- .link = bin_file_link,
+ .link = link.ElfFile.TextBlock.empty,
+ .generation = 0,
};
self.decl_table.putAssumeCapacityNoClobber(hash, new_decl);
break :blk new_decl;
@@ -875,10 +1129,10 @@ fn resolveDecl(
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
switch (new_decl.analysis) {
- .initial_in_progress => new_decl.analysis = .initial_dependency_failure,
- .repeat_in_progress => new_decl.analysis = .repeat_dependency_failure,
+ .in_progress => new_decl.analysis = .dependency_failure,
else => {},
}
+ new_decl.generation = self.generation;
return error.AnalysisFail;
},
};
@@ -893,34 +1147,37 @@ fn resolveDecl(
},
};
new_decl.analysis = .complete;
+ new_decl.generation = self.generation;
if (typed_value.ty.hasCodeGenBits()) {
// We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency order,
// increasing how many computations can be done in parallel.
try self.bin_file.allocateDeclIndexes(new_decl);
-
- // We ensureCapacity when scanning for decls.
- self.work_queue.writeItemAssumeCapacity(.{ .codegen_decl = new_decl });
+ try self.work_queue.writeItem(.{ .codegen_decl = new_decl });
}
return new_decl;
}
}
+/// Declares a dependency on the decl.
fn resolveCompleteDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl {
- const decl = try self.resolveDecl(scope, old_inst, link.ElfFile.Decl.empty);
+ const decl = try self.resolveDecl(scope, old_inst);
switch (decl.analysis) {
- .initial_in_progress => unreachable,
- .repeat_in_progress => unreachable,
- .initial_dependency_failure,
- .repeat_dependency_failure,
- .initial_sema_failure,
- .repeat_sema_failure,
+ .in_progress => unreachable,
+ .outdated => unreachable,
+
+ .dependency_failure,
+ .sema_failure,
.codegen_failure,
.codegen_failure_retryable,
=> return error.AnalysisFail,
- .complete => return decl,
+ .complete => {},
}
+ if (scope.decl()) |scope_decl| {
+ try self.declareDeclDependency(scope_decl, decl);
+ }
+ return decl;
}
fn resolveInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst {
@@ -998,13 +1255,14 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In
const new_export = try self.allocator.create(Export);
errdefer self.allocator.destroy(new_export);
- const owner_decl = scope.decl();
+ const owner_decl = scope.decl().?;
new_export.* = .{
.options = .{ .name = symbol_name },
.src = export_inst.base.src,
.link = .{},
.owner_decl = owner_decl,
+ .exported_decl = exported_decl,
.status = .in_progress,
};
@@ -1327,7 +1585,7 @@ fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError
new_func.* = .{
.fn_type = fn_type,
.analysis = .{ .queued = fn_inst },
- .owner_decl = scope.decl(),
+ .owner_decl = scope.decl().?,
};
const fn_payload = try scope.arena().create(Value.Payload.Function);
fn_payload.* = .{ .func = new_func };
@@ -2024,11 +2282,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err
switch (scope.tag) {
.decl => {
const decl = scope.cast(Scope.DeclAnalysis).?.decl;
- switch (decl.analysis) {
- .initial_in_progress => decl.analysis = .initial_sema_failure,
- .repeat_in_progress => decl.analysis = .repeat_sema_failure,
- else => unreachable,
- }
+ decl.analysis = .sema_failure;
self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg);
},
.block => {