aboutsummaryrefslogtreecommitdiff
path: root/src/Module.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-04-19 21:51:08 -0700
committerAndrew Kelley <andrew@ziglang.org>2022-04-20 17:37:35 -0700
commitf7596ae9423e9de8276629803147e1a243f2177b (patch)
treeb97f2a8e8fdb84a118f587bcbfd76918710587dd /src/Module.zig
parent4f527e5d36f66a83ff6a263a03f16e2c4d049f1e (diff)
downloadzig-f7596ae9423e9de8276629803147e1a243f2177b.tar.gz
zig-f7596ae9423e9de8276629803147e1a243f2177b.zip
stage2: use indexes for Decl objects
Rather than allocating Decl objects with an Allocator, we instead allocate them with a SegmentedList. This provides four advantages: * Stable memory so that one thread can access a Decl object while another thread allocates additional Decl objects from this list. * It allows us to use u32 indexes to reference Decl objects rather than pointers, saving memory in Type, Value, and dependency sets. * Using integers to reference Decl objects rather than pointers makes serialization trivial. * It provides a unique integer to be used for anonymous symbol names, avoiding multi-threaded contention on an atomic counter.
Diffstat (limited to 'src/Module.zig')
-rw-r--r--src/Module.zig910
1 files changed, 529 insertions, 381 deletions
diff --git a/src/Module.zig b/src/Module.zig
index 95ae55feb8..1119d73ab0 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -49,15 +49,15 @@ global_zir_cache: Compilation.Directory,
/// Used by AstGen worker to load and store ZIR cache.
local_zir_cache: Compilation.Directory,
/// It's rare for a decl to be exported, so we save memory by having a sparse
-/// map of Decl pointers to details about them being exported.
+/// map of Decl indexes to details about them being exported.
/// The Export memory is owned by the `export_owners` table; the slice itself
/// is owned by this table. The slice is guaranteed to not be empty.
-decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
+decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, []*Export) = .{},
/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl
/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
/// is performing the export of another Decl.
/// This table owns the Export memory.
-export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
+export_owners: std.AutoArrayHashMapUnmanaged(Decl.Index, []*Export) = .{},
/// The set of all the Zig source files in the Module. We keep track of this in order
/// to iterate over it and check which source files have been modified on the file system when
/// an update is requested, as well as to cache `@import` results.
@@ -89,10 +89,10 @@ align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{},
/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator.
/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
/// a Decl can have a failed_decls entry but have analysis status of success.
-failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
+failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{},
/// Keep track of one `@compileLog` callsite per owner Decl.
/// The value is the AST node index offset from the Decl.
-compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, i32) = .{},
+compile_log_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, i32) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator.
failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{},
@@ -102,11 +102,9 @@ failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{},
/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
-next_anon_name_index: usize = 0,
-
/// Candidates for deletion. After a semantic analysis update completes, this list
/// contains Decls that need to be deleted if they end up having no references to them.
-deletion_set: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
+deletion_set: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
/// Error tags and their values, tag names are duped with mod.gpa.
/// Corresponds with `error_name_list`.
@@ -137,7 +135,21 @@ compile_log_text: ArrayListUnmanaged(u8) = .{},
emit_h: ?*GlobalEmitH,
-test_functions: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
+test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
+
+/// Rather than allocating Decl objects with an Allocator, we instead allocate
+/// them with this SegmentedList. This provides four advantages:
+/// * Stable memory so that one thread can access a Decl object while another
+/// thread allocates additional Decl objects from this list.
+/// * It allows us to use u32 indexes to reference Decl objects rather than
+/// pointers, saving memory in Type, Value, and dependency sets.
+/// * Using integers to reference Decl objects rather than pointers makes
+/// serialization trivial.
+/// * It provides a unique integer to be used for anonymous symbol names, avoiding
+/// multi-threaded contention on an atomic counter.
+allocated_decls: std.SegmentedList(Decl, 0) = .{},
+/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
+decls_free_list: std.ArrayListUnmanaged(Decl.Index) = .{},
const MonomorphedFuncsSet = std.HashMapUnmanaged(
*Fn,
@@ -173,7 +185,7 @@ pub const MemoizedCallSet = std.HashMapUnmanaged(
);
pub const MemoizedCall = struct {
- target: std.Target,
+ module: *Module,
pub const Key = struct {
func: *Fn,
@@ -191,7 +203,7 @@ pub const MemoizedCall = struct {
assert(a.args.len == b.args.len);
for (a.args) |a_arg, arg_i| {
const b_arg = b.args[arg_i];
- if (!a_arg.eql(b_arg, ctx.target)) {
+ if (!a_arg.eql(b_arg, ctx.module)) {
return false;
}
}
@@ -210,7 +222,7 @@ pub const MemoizedCall = struct {
// This logic must be kept in sync with the logic in `analyzeCall` that
// computes the hash.
for (key.args) |arg| {
- arg.hash(&hasher, ctx.target);
+ arg.hash(&hasher, ctx.module);
}
return hasher.final();
@@ -231,9 +243,17 @@ pub const GlobalEmitH = struct {
/// When emit_h is non-null, each Decl gets one more compile error slot for
/// emit-h failing for that Decl. This table is also how we tell if a Decl has
/// failed emit-h or succeeded.
- failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
+ failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{},
/// Tracks all decls in order to iterate over them and emit .h code for them.
- decl_table: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
+ decl_table: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
+ /// Similar to the allocated_decls field of Module, this is where `EmitH` objects
+ /// are allocated. There will be exactly one EmitH object per Decl object, with
+ /// identical indexes.
+ allocated_emit_h: std.SegmentedList(EmitH, 0) = .{},
+
+ pub fn declPtr(global_emit_h: *GlobalEmitH, decl_index: Decl.Index) *EmitH {
+ return global_emit_h.allocated_emit_h.at(@enumToInt(decl_index));
+ }
};
pub const ErrorInt = u32;
@@ -244,12 +264,12 @@ pub const Export = struct {
/// Represents the position of the export, if any, in the output file.
link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
/// The Decl containing the export statement. Inline function calls
/// may cause this to be different from the owner_decl.
- src_decl: *Decl,
+ src_decl: Decl.Index,
/// The Decl being exported. Note this is *not* the Decl performing the export.
- exported_decl: *Decl,
+ exported_decl: Decl.Index,
status: enum {
in_progress,
failed,
@@ -259,22 +279,16 @@ pub const Export = struct {
complete,
},
- pub fn getSrcLoc(exp: Export) SrcLoc {
+ pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc {
+ const src_decl = mod.declPtr(exp.src_decl);
return .{
- .file_scope = exp.src_decl.getFileScope(),
- .parent_decl_node = exp.src_decl.src_node,
+ .file_scope = src_decl.getFileScope(),
+ .parent_decl_node = src_decl.src_node,
.lazy = exp.src,
};
}
};
-/// When Module emit_h field is non-null, each Decl is allocated via this struct, so that
-/// there can be EmitH state attached to each Decl.
-pub const DeclPlusEmitH = struct {
- decl: Decl,
- emit_h: EmitH,
-};
-
pub const CaptureScope = struct {
parent: ?*CaptureScope,
@@ -458,36 +472,33 @@ pub const Decl = struct {
/// typed_value may need to be regenerated.
dependencies: DepsTable = .{},
- pub const DepsTable = std.AutoArrayHashMapUnmanaged(*Decl, void);
-
- pub fn clearName(decl: *Decl, gpa: Allocator) void {
- gpa.free(mem.sliceTo(decl.name, 0));
- decl.name = undefined;
- }
+ pub const Index = enum(u32) {
+ _,
- pub fn destroy(decl: *Decl, module: *Module) void {
- const gpa = module.gpa;
- log.debug("destroy {*} ({s})", .{ decl, decl.name });
- _ = module.test_functions.swapRemove(decl);
- if (decl.deletion_flag) {
- assert(module.deletion_set.swapRemove(decl));
+ pub fn toOptional(i: Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(i));
}
- if (decl.has_tv) {
- if (decl.getInnerNamespace()) |namespace| {
- namespace.destroyDecls(module);
- }
- decl.clearValues(gpa);
+ };
+
+ pub const OptionalIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn init(oi: ?Index) OptionalIndex {
+ return oi orelse .none;
}
- decl.dependants.deinit(gpa);
- decl.dependencies.deinit(gpa);
- decl.clearName(gpa);
- if (module.emit_h != null) {
- const decl_plus_emit_h = @fieldParentPtr(DeclPlusEmitH, "decl", decl);
- decl_plus_emit_h.emit_h.fwd_decl.deinit(gpa);
- gpa.destroy(decl_plus_emit_h);
- } else {
- gpa.destroy(decl);
+
+ pub fn unwrap(oi: OptionalIndex) ?Index {
+ if (oi == .none) return null;
+ return @intToEnum(Index, @enumToInt(oi));
}
+ };
+
+ pub const DepsTable = std.AutoArrayHashMapUnmanaged(Decl.Index, void);
+
+ pub fn clearName(decl: *Decl, gpa: Allocator) void {
+ gpa.free(mem.sliceTo(decl.name, 0));
+ decl.name = undefined;
}
pub fn clearValues(decl: *Decl, gpa: Allocator) void {
@@ -573,13 +584,6 @@ pub const Decl = struct {
return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
}
- /// Returns true if and only if the Decl is the top level struct associated with a File.
- pub fn isRoot(decl: *const Decl) bool {
- if (decl.src_namespace.parent != null)
- return false;
- return decl == decl.src_namespace.getDecl();
- }
-
pub fn relativeToLine(decl: Decl, offset: u32) u32 {
return decl.src_line + offset;
}
@@ -622,20 +626,20 @@ pub const Decl = struct {
return tree.tokens.items(.start)[decl.srcToken()];
}
- pub fn renderFullyQualifiedName(decl: Decl, writer: anytype) !void {
+ pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void {
const unqualified_name = mem.sliceTo(decl.name, 0);
- return decl.src_namespace.renderFullyQualifiedName(unqualified_name, writer);
+ return decl.src_namespace.renderFullyQualifiedName(mod, unqualified_name, writer);
}
- pub fn renderFullyQualifiedDebugName(decl: Decl, writer: anytype) !void {
+ pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void {
const unqualified_name = mem.sliceTo(decl.name, 0);
- return decl.src_namespace.renderFullyQualifiedDebugName(unqualified_name, writer);
+ return decl.src_namespace.renderFullyQualifiedDebugName(mod, unqualified_name, writer);
}
- pub fn getFullyQualifiedName(decl: Decl, gpa: Allocator) ![:0]u8 {
- var buffer = std.ArrayList(u8).init(gpa);
+ pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 {
+ var buffer = std.ArrayList(u8).init(mod.gpa);
defer buffer.deinit();
- try decl.renderFullyQualifiedName(buffer.writer());
+ try decl.renderFullyQualifiedName(mod, buffer.writer());
return buffer.toOwnedSliceSentinel(0);
}
@@ -662,7 +666,6 @@ pub const Decl = struct {
if (!decl.owns_tv) return null;
const ty = (decl.val.castTag(.ty) orelse return null).data;
const struct_obj = (ty.castTag(.@"struct") orelse return null).data;
- assert(struct_obj.owner_decl == decl);
return struct_obj;
}
@@ -672,7 +675,6 @@ pub const Decl = struct {
if (!decl.owns_tv) return null;
const ty = (decl.val.castTag(.ty) orelse return null).data;
const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data;
- assert(union_obj.owner_decl == decl);
return union_obj;
}
@@ -681,7 +683,6 @@ pub const Decl = struct {
pub fn getFunction(decl: *const Decl) ?*Fn {
if (!decl.owns_tv) return null;
const func = (decl.val.castTag(.function) orelse return null).data;
- assert(func.owner_decl == decl);
return func;
}
@@ -690,16 +691,14 @@ pub const Decl = struct {
pub fn getExternFn(decl: *const Decl) ?*ExternFn {
if (!decl.owns_tv) return null;
const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data;
- assert(extern_fn.owner_decl == decl);
return extern_fn;
}
/// If the Decl has a value and it is a variable, returns it,
/// otherwise null.
- pub fn getVariable(decl: *Decl) ?*Var {
+ pub fn getVariable(decl: *const Decl) ?*Var {
if (!decl.owns_tv) return null;
const variable = (decl.val.castTag(.variable) orelse return null).data;
- assert(variable.owner_decl == decl);
return variable;
}
@@ -712,12 +711,10 @@ pub const Decl = struct {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
- assert(struct_obj.owner_decl == decl);
return &struct_obj.namespace;
},
.enum_full, .enum_nonexhaustive => {
const enum_obj = ty.cast(Type.Payload.EnumFull).?.data;
- assert(enum_obj.owner_decl == decl);
return &enum_obj.namespace;
},
.empty_struct => {
@@ -725,12 +722,10 @@ pub const Decl = struct {
},
.@"opaque" => {
const opaque_obj = ty.cast(Type.Payload.Opaque).?.data;
- assert(opaque_obj.owner_decl == decl);
return &opaque_obj.namespace;
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
- assert(union_obj.owner_decl == decl);
return &union_obj.namespace;
},
@@ -757,17 +752,11 @@ pub const Decl = struct {
return decl.src_namespace.file_scope;
}
- pub fn getEmitH(decl: *Decl, module: *Module) *EmitH {
- assert(module.emit_h != null);
- const decl_plus_emit_h = @fieldParentPtr(DeclPlusEmitH, "decl", decl);
- return &decl_plus_emit_h.emit_h;
- }
-
- pub fn removeDependant(decl: *Decl, other: *Decl) void {
+ pub fn removeDependant(decl: *Decl, other: Decl.Index) void {
assert(decl.dependants.swapRemove(other));
}
- pub fn removeDependency(decl: *Decl, other: *Decl) void {
+ pub fn removeDependency(decl: *Decl, other: Decl.Index) void {
assert(decl.dependencies.swapRemove(other));
}
@@ -790,16 +779,6 @@ pub const Decl = struct {
return decl.ty.abiAlignment(target);
}
}
-
- pub fn markAlive(decl: *Decl) void {
- if (decl.alive) return;
- decl.alive = true;
-
- // This is the first time we are marking this Decl alive. We must
- // therefore recurse into its value and mark any Decl it references
- // as also alive, so that any Decl referenced does not get garbage collected.
- decl.val.markReferencedDeclsAlive();
- }
};
/// This state is attached to every Decl when Module emit_h is non-null.
@@ -810,7 +789,7 @@ pub const EmitH = struct {
/// Represents the data that an explicit error set syntax provides.
pub const ErrorSet = struct {
/// The Decl that corresponds to the error set itself.
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
/// Offset from Decl node index, points to the error set AST node.
node_offset: i32,
/// The string bytes are stored in the owner Decl arena.
@@ -819,10 +798,11 @@ pub const ErrorSet = struct {
pub const NameMap = std.StringArrayHashMapUnmanaged(void);
- pub fn srcLoc(self: ErrorSet) SrcLoc {
+ pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc {
+ const owner_decl = mod.declPtr(self.owner_decl);
return .{
- .file_scope = self.owner_decl.getFileScope(),
- .parent_decl_node = self.owner_decl.src_node,
+ .file_scope = owner_decl.getFileScope(),
+ .parent_decl_node = owner_decl.src_node,
.lazy = .{ .node_offset = self.node_offset },
};
}
@@ -844,12 +824,12 @@ pub const PropertyBoolean = enum { no, yes, unknown, wip };
/// Represents the data that a struct declaration provides.
pub const Struct = struct {
- /// The Decl that corresponds to the struct itself.
- owner_decl: *Decl,
/// Set of field names in declaration order.
fields: Fields,
/// Represents the declarations inside this struct.
namespace: Namespace,
+ /// The Decl that corresponds to the struct itself.
+ owner_decl: Decl.Index,
/// Offset from `owner_decl`, points to the struct AST node.
node_offset: i32,
/// Index of the struct_decl ZIR instruction.
@@ -900,30 +880,32 @@ pub const Struct = struct {
}
};
- pub fn getFullyQualifiedName(s: *Struct, gpa: Allocator) ![:0]u8 {
- return s.owner_decl.getFullyQualifiedName(gpa);
+ pub fn getFullyQualifiedName(s: *Struct, mod: *Module) ![:0]u8 {
+ return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
}
- pub fn srcLoc(s: Struct) SrcLoc {
+ pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
+ const owner_decl = mod.declPtr(s.owner_decl);
return .{
- .file_scope = s.owner_decl.getFileScope(),
- .parent_decl_node = s.owner_decl.src_node,
+ .file_scope = owner_decl.getFileScope(),
+ .parent_decl_node = owner_decl.src_node,
.lazy = .{ .node_offset = s.node_offset },
};
}
- pub fn fieldSrcLoc(s: Struct, gpa: Allocator, query: FieldSrcQuery) SrcLoc {
+ pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc {
@setCold(true);
- const tree = s.owner_decl.getFileScope().getTree(gpa) catch |err| {
+ const owner_decl = mod.declPtr(s.owner_decl);
+ const file = owner_decl.getFileScope();
+ const tree = file.getTree(mod.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- s.owner_decl.getFileScope().sub_file_path, @errorName(err),
+ file.sub_file_path, @errorName(err),
});
- return s.srcLoc();
+ return s.srcLoc(mod);
};
- const node = s.owner_decl.relativeToNodeIndex(s.node_offset);
+ const node = owner_decl.relativeToNodeIndex(s.node_offset);
const node_tags = tree.nodes.items(.tag);
- const file = s.owner_decl.getFileScope();
switch (node_tags[node]) {
.container_decl,
.container_decl_trailing,
@@ -1013,18 +995,19 @@ pub const Struct = struct {
/// the number of fields.
pub const EnumSimple = struct {
/// The Decl that corresponds to the enum itself.
- owner_decl: *Decl,
- /// Set of field names in declaration order.
- fields: NameMap,
+ owner_decl: Decl.Index,
/// Offset from `owner_decl`, points to the enum decl AST node.
node_offset: i32,
+ /// Set of field names in declaration order.
+ fields: NameMap,
pub const NameMap = EnumFull.NameMap;
- pub fn srcLoc(self: EnumSimple) SrcLoc {
+ pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc {
+ const owner_decl = mod.declPtr(self.owner_decl);
return .{
- .file_scope = self.owner_decl.getFileScope(),
- .parent_decl_node = self.owner_decl.src_node,
+ .file_scope = owner_decl.getFileScope(),
+ .parent_decl_node = owner_decl.src_node,
.lazy = .{ .node_offset = self.node_offset },
};
}
@@ -1035,7 +1018,9 @@ pub const EnumSimple = struct {
/// are explicitly provided.
pub const EnumNumbered = struct {
/// The Decl that corresponds to the enum itself.
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
+ /// Offset from `owner_decl`, points to the enum decl AST node.
+ node_offset: i32,
/// An integer type which is used for the numerical value of the enum.
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
@@ -1045,16 +1030,15 @@ pub const EnumNumbered = struct {
/// Entries are in declaration order, same as `fields`.
/// If this hash map is empty, it means the enum tags are auto-numbered.
values: ValueMap,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
pub const NameMap = EnumFull.NameMap;
pub const ValueMap = EnumFull.ValueMap;
- pub fn srcLoc(self: EnumNumbered) SrcLoc {
+ pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc {
+ const owner_decl = mod.declPtr(self.owner_decl);
return .{
- .file_scope = self.owner_decl.getFileScope(),
- .parent_decl_node = self.owner_decl.src_node,
+ .file_scope = owner_decl.getFileScope(),
+ .parent_decl_node = owner_decl.src_node,
.lazy = .{ .node_offset = self.node_offset },
};
}
@@ -1064,7 +1048,9 @@ pub const EnumNumbered = struct {
/// at least one tag value explicitly specified, or at least one declaration.
pub const EnumFull = struct {
/// The Decl that corresponds to the enum itself.
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
+ /// Offset from `owner_decl`, points to the enum decl AST node.
+ node_offset: i32,
/// An integer type which is used for the numerical value of the enum.
/// Whether zig chooses this type or the user specifies it, it is stored here.
tag_ty: Type,
@@ -1076,26 +1062,23 @@ pub const EnumFull = struct {
values: ValueMap,
/// Represents the declarations inside this enum.
namespace: Namespace,
- /// Offset from `owner_decl`, points to the enum decl AST node.
- node_offset: i32,
/// true if zig inferred this tag type, false if user specified it
tag_ty_inferred: bool,
pub const NameMap = std.StringArrayHashMapUnmanaged(void);
pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false);
- pub fn srcLoc(self: EnumFull) SrcLoc {
+ pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc {
+ const owner_decl = mod.declPtr(self.owner_decl);
return .{
- .file_scope = self.owner_decl.getFileScope(),
- .parent_decl_node = self.owner_decl.src_node,
+ .file_scope = owner_decl.getFileScope(),
+ .parent_decl_node = owner_decl.src_node,
.lazy = .{ .node_offset = self.node_offset },
};
}
};
pub const Union = struct {
- /// The Decl that corresponds to the union itself.
- owner_decl: *Decl,
/// An enum type which is used for the tag of the union.
/// This type is created even for untagged unions, even when the memory
/// layout does not store the tag.
@@ -1106,6 +1089,8 @@ pub const Union = struct {
fields: Fields,
/// Represents the declarations inside this union.
namespace: Namespace,
+ /// The Decl that corresponds to the union itself.
+ owner_decl: Decl.Index,
/// Offset from `owner_decl`, points to the union decl AST node.
node_offset: i32,
/// Index of the union_decl ZIR instruction.
@@ -1145,30 +1130,32 @@ pub const Union = struct {
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
- pub fn getFullyQualifiedName(s: *Union, gpa: Allocator) ![:0]u8 {
- return s.owner_decl.getFullyQualifiedName(gpa);
+ pub fn getFullyQualifiedName(s: *Union, mod: *Module) ![:0]u8 {
+ return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
}
- pub fn srcLoc(self: Union) SrcLoc {
+ pub fn srcLoc(self: Union, mod: *Module) SrcLoc {
+ const owner_decl = mod.declPtr(self.owner_decl);
return .{
- .file_scope = self.owner_decl.getFileScope(),
- .parent_decl_node = self.owner_decl.src_node,
+ .file_scope = owner_decl.getFileScope(),
+ .parent_decl_node = owner_decl.src_node,
.lazy = .{ .node_offset = self.node_offset },
};
}
- pub fn fieldSrcLoc(u: Union, gpa: Allocator, query: FieldSrcQuery) SrcLoc {
+ pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc {
@setCold(true);
- const tree = u.owner_decl.getFileScope().getTree(gpa) catch |err| {
+ const owner_decl = mod.declPtr(u.owner_decl);
+ const file = owner_decl.getFileScope();
+ const tree = file.getTree(mod.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- u.owner_decl.getFileScope().sub_file_path, @errorName(err),
+ file.sub_file_path, @errorName(err),
});
- return u.srcLoc();
+ return u.srcLoc(mod);
};
- const node = u.owner_decl.relativeToNodeIndex(u.node_offset);
+ const node = owner_decl.relativeToNodeIndex(u.node_offset);
const node_tags = tree.nodes.items(.tag);
- const file = u.owner_decl.getFileScope();
switch (node_tags[node]) {
.container_decl,
.container_decl_trailing,
@@ -1348,22 +1335,23 @@ pub const Union = struct {
pub const Opaque = struct {
/// The Decl that corresponds to the opaque itself.
- owner_decl: *Decl,
- /// Represents the declarations inside this opaque.
- namespace: Namespace,
+ owner_decl: Decl.Index,
/// Offset from `owner_decl`, points to the opaque decl AST node.
node_offset: i32,
+ /// Represents the declarations inside this opaque.
+ namespace: Namespace,
- pub fn srcLoc(self: Opaque) SrcLoc {
+ pub fn srcLoc(self: Opaque, mod: *Module) SrcLoc {
+ const owner_decl = mod.declPtr(self.owner_decl);
return .{
- .file_scope = self.owner_decl.getFileScope(),
- .parent_decl_node = self.owner_decl.src_node,
+ .file_scope = owner_decl.getFileScope(),
+ .parent_decl_node = owner_decl.src_node,
.lazy = .{ .node_offset = self.node_offset },
};
}
- pub fn getFullyQualifiedName(s: *Opaque, gpa: Allocator) ![:0]u8 {
- return s.owner_decl.getFullyQualifiedName(gpa);
+ pub fn getFullyQualifiedName(s: *Opaque, mod: *Module) ![:0]u8 {
+ return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
}
};
@@ -1371,7 +1359,7 @@ pub const Opaque = struct {
/// arena allocator.
pub const ExternFn = struct {
/// The Decl that corresponds to the function itself.
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
/// Library name if specified.
/// For example `extern "c" fn write(...) usize` would have 'c' as library name.
/// Allocated with Module's allocator; outlives the ZIR code.
@@ -1389,7 +1377,12 @@ pub const ExternFn = struct {
/// instead.
pub const Fn = struct {
/// The Decl that corresponds to the function itself.
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
+ /// The ZIR instruction that is a function instruction. Use this to find
+ /// the body. We store this rather than the body directly so that when ZIR
+ /// is regenerated on update(), we can map this to the new corresponding
+ /// ZIR instruction.
+ zir_body_inst: Zir.Inst.Index,
/// If this is not null, this function is a generic function instantiation, and
/// there is a `TypedValue` here for each parameter of the function.
/// Non-comptime parameters are marked with a `generic_poison` for the value.
@@ -1403,11 +1396,6 @@ pub const Fn = struct {
/// parameter and tells whether it is anytype.
/// TODO apply the same enhancement for param_names below to this field.
anytype_args: [*]bool,
- /// The ZIR instruction that is a function instruction. Use this to find
- /// the body. We store this rather than the body directly so that when ZIR
- /// is regenerated on update(), we can map this to the new corresponding
- /// ZIR instruction.
- zir_body_inst: Zir.Inst.Index,
/// Prefer to use `getParamName` to access this because of the future improvement
/// we want to do mentioned in the TODO below.
@@ -1537,8 +1525,9 @@ pub const Fn = struct {
return func.param_names[index];
}
- pub fn hasInferredErrorSet(func: Fn) bool {
- const zir = func.owner_decl.getFileScope().zir;
+ pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool {
+ const owner_decl = mod.declPtr(func.owner_decl);
+ const zir = owner_decl.getFileScope().zir;
const zir_tags = zir.instructions.items(.tag);
switch (zir_tags[func.zir_body_inst]) {
.func => return false,
@@ -1556,7 +1545,7 @@ pub const Fn = struct {
pub const Var = struct {
/// if is_extern == true this is undefined
init: Value,
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
/// Library name if specified.
/// For example `extern "c" var stderrp = ...` would have 'c' as library name.
@@ -1576,14 +1565,16 @@ pub const Var = struct {
};
pub const DeclAdapter = struct {
+ mod: *Module,
+
pub fn hash(self: @This(), s: []const u8) u32 {
_ = self;
return @truncate(u32, std.hash.Wyhash.hash(0, s));
}
- pub fn eql(self: @This(), a: []const u8, b_decl: *Decl, b_index: usize) bool {
- _ = self;
+ pub fn eql(self: @This(), a: []const u8, b_decl_index: Decl.Index, b_index: usize) bool {
_ = b_index;
+ const b_decl = self.mod.declPtr(b_decl_index);
return mem.eql(u8, a, mem.sliceTo(b_decl.name, 0));
}
};
@@ -1599,25 +1590,30 @@ pub const Namespace = struct {
/// Declaration order is preserved via entry order.
/// Key memory is owned by `decl.name`.
/// Anonymous decls are not stored here; they are kept in `anon_decls` instead.
- decls: std.ArrayHashMapUnmanaged(*Decl, void, DeclContext, true) = .{},
+ decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{},
- anon_decls: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{},
+ anon_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
/// Key is usingnamespace Decl itself. To find the namespace being included,
/// the Decl Value has to be resolved as a Type which has a Namespace.
/// Value is whether the usingnamespace decl is marked `pub`.
- usingnamespace_set: std.AutoHashMapUnmanaged(*Decl, bool) = .{},
+ usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{},
const DeclContext = struct {
- pub fn hash(self: @This(), decl: *Decl) u32 {
- _ = self;
+ module: *Module,
+
+ pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 {
+ const decl = ctx.module.declPtr(decl_index);
return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceTo(decl.name, 0)));
}
- pub fn eql(self: @This(), a: *Decl, b: *Decl, b_index: usize) bool {
- _ = self;
+ pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool {
_ = b_index;
- return mem.eql(u8, mem.sliceTo(a.name, 0), mem.sliceTo(b.name, 0));
+ const a_decl = ctx.module.declPtr(a_decl_index);
+ const b_decl = ctx.module.declPtr(b_decl_index);
+ const a_name = mem.sliceTo(a_decl.name, 0);
+ const b_name = mem.sliceTo(b_decl.name, 0);
+ return mem.eql(u8, a_name, b_name);
}
};
@@ -1637,13 +1633,13 @@ pub const Namespace = struct {
var anon_decls = ns.anon_decls;
ns.anon_decls = .{};
- for (decls.keys()) |decl| {
- decl.destroy(mod);
+ for (decls.keys()) |decl_index| {
+ mod.destroyDecl(decl_index);
}
decls.deinit(gpa);
for (anon_decls.keys()) |key| {
- key.destroy(mod);
+ mod.destroyDecl(key);
}
anon_decls.deinit(gpa);
ns.usingnamespace_set.deinit(gpa);
@@ -1652,7 +1648,7 @@ pub const Namespace = struct {
pub fn deleteAllDecls(
ns: *Namespace,
mod: *Module,
- outdated_decls: ?*std.AutoArrayHashMap(*Decl, void),
+ outdated_decls: ?*std.AutoArrayHashMap(Decl.Index, void),
) !void {
const gpa = mod.gpa;
@@ -1669,13 +1665,13 @@ pub const Namespace = struct {
for (decls.keys()) |child_decl| {
mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory");
- child_decl.destroy(mod);
+ mod.destroyDecl(child_decl);
}
decls.deinit(gpa);
for (anon_decls.keys()) |child_decl| {
mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory");
- child_decl.destroy(mod);
+ mod.destroyDecl(child_decl);
}
anon_decls.deinit(gpa);
@@ -1685,12 +1681,14 @@ pub const Namespace = struct {
// This renders e.g. "std.fs.Dir.OpenOptions"
pub fn renderFullyQualifiedName(
ns: Namespace,
+ mod: *Module,
name: []const u8,
writer: anytype,
) @TypeOf(writer).Error!void {
if (ns.parent) |parent| {
- const decl = ns.getDecl();
- try parent.renderFullyQualifiedName(mem.sliceTo(decl.name, 0), writer);
+ const decl_index = ns.getDeclIndex();
+ const decl = mod.declPtr(decl_index);
+ try parent.renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer);
} else {
try ns.file_scope.renderFullyQualifiedName(writer);
}
@@ -1703,13 +1701,15 @@ pub const Namespace = struct {
/// This renders e.g. "std/fs.zig:Dir.OpenOptions"
pub fn renderFullyQualifiedDebugName(
ns: Namespace,
+ mod: *Module,
name: []const u8,
writer: anytype,
) @TypeOf(writer).Error!void {
var separator_char: u8 = '.';
if (ns.parent) |parent| {
- const decl = ns.getDecl();
- try parent.renderFullyQualifiedDebugName(mem.sliceTo(decl.name, 0), writer);
+ const decl_index = ns.getDeclIndex();
+ const decl = mod.declPtr(decl_index);
+ try parent.renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer);
} else {
try ns.file_scope.renderFullyQualifiedDebugName(writer);
separator_char = ':';
@@ -1720,12 +1720,14 @@ pub const Namespace = struct {
}
}
- pub fn getDecl(ns: Namespace) *Decl {
+ pub fn getDeclIndex(ns: Namespace) Decl.Index {
return ns.ty.getOwnerDecl();
}
};
pub const File = struct {
+ /// The Decl of the struct that represents this File.
+ root_decl: Decl.OptionalIndex,
status: enum {
never_loaded,
retryable_failure,
@@ -1749,16 +1751,14 @@ pub const File = struct {
zir: Zir,
/// Package that this file is a part of, managed externally.
pkg: *Package,
- /// The Decl of the struct that represents this File.
- root_decl: ?*Decl,
/// Used by change detection algorithm, after astgen, contains the
/// set of decls that existed in the previous ZIR but not in the new one.
- deleted_decls: std.ArrayListUnmanaged(*Decl) = .{},
+ deleted_decls: std.ArrayListUnmanaged(Decl.Index) = .{},
/// Used by change detection algorithm, after astgen, contains the
/// set of decls that existed both in the previous ZIR and in the new one,
/// but their source code has been modified.
- outdated_decls: std.ArrayListUnmanaged(*Decl) = .{},
+ outdated_decls: std.ArrayListUnmanaged(Decl.Index) = .{},
/// The most recent successful ZIR for this file, with no errors.
/// This is only populated when a previously successful ZIR
@@ -1798,8 +1798,8 @@ pub const File = struct {
log.debug("deinit File {s}", .{file.sub_file_path});
file.deleted_decls.deinit(gpa);
file.outdated_decls.deinit(gpa);
- if (file.root_decl) |root_decl| {
- root_decl.destroy(mod);
+ if (file.root_decl.unwrap()) |root_decl| {
+ mod.destroyDecl(root_decl);
}
gpa.free(file.sub_file_path);
file.unload(gpa);
@@ -1932,7 +1932,7 @@ pub const EmbedFile = struct {
/// The Decl that was created from the `@embedFile` to own this resource.
/// This is how zig knows what other Decl objects to invalidate if the file
/// changes on disk.
- owner_decl: *Decl,
+ owner_decl: Decl.Index,
fn destroy(embed_file: *EmbedFile, mod: *Module) void {
const gpa = mod.gpa;
@@ -2776,6 +2776,7 @@ pub fn deinit(mod: *Module) void {
}
emit_h.failed_decls.deinit(gpa);
emit_h.decl_table.deinit(gpa);
+ emit_h.allocated_emit_h.deinit(gpa);
gpa.destroy(emit_h);
}
@@ -2827,6 +2828,52 @@ pub fn deinit(mod: *Module) void {
}
mod.memoized_calls.deinit(gpa);
}
+
+ mod.decls_free_list.deinit(gpa);
+ mod.allocated_decls.deinit(gpa);
+}
+
+pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
+ const gpa = mod.gpa;
+ {
+ const decl = mod.declPtr(decl_index);
+ log.debug("destroy {*} ({s})", .{ decl, decl.name });
+ _ = mod.test_functions.swapRemove(decl_index);
+ if (decl.deletion_flag) {
+ assert(mod.deletion_set.swapRemove(decl_index));
+ }
+ if (decl.has_tv) {
+ if (decl.getInnerNamespace()) |namespace| {
+ namespace.destroyDecls(mod);
+ }
+ decl.clearValues(gpa);
+ }
+ decl.dependants.deinit(gpa);
+ decl.dependencies.deinit(gpa);
+ decl.clearName(gpa);
+ decl.* = undefined;
+ }
+ mod.decls_free_list.append(gpa, decl_index) catch {
+ // In order to keep `destroyDecl` a non-fallible function, we ignore memory
+ // allocation failures here, instead leaking the Decl until garbage collection.
+ };
+ if (mod.emit_h) |mod_emit_h| {
+ const decl_emit_h = mod_emit_h.declPtr(decl_index);
+ decl_emit_h.fwd_decl.deinit(gpa);
+ decl_emit_h.* = undefined;
+ }
+}
+
+pub fn declPtr(mod: *Module, decl_index: Decl.Index) *Decl {
+ return mod.allocated_decls.at(@enumToInt(decl_index));
+}
+
+/// Returns true if and only if the Decl is the top level struct associated with a File.
+pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
+ const decl = mod.declPtr(decl_index);
+ if (decl.src_namespace.parent != null)
+ return false;
+ return decl_index == decl.src_namespace.getDeclIndex();
}
fn freeExportList(gpa: Allocator, export_list: []*Export) void {
@@ -3230,14 +3277,14 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
// We do not need to hold any locks at this time because all the Decl and Namespace
// objects being touched are specific to this File, and the only other concurrent
// tasks are touching other File objects.
- try updateZirRefs(gpa, file, prev_zir.*);
+ try updateZirRefs(mod, file, prev_zir.*);
// At this point, `file.outdated_decls` and `file.deleted_decls` are populated,
// and semantic analysis will deal with them properly.
// No need to keep previous ZIR.
prev_zir.deinit(gpa);
gpa.destroy(prev_zir);
file.prev_zir = null;
- } else if (file.root_decl) |root_decl| {
+ } else if (file.root_decl.unwrap()) |root_decl| {
// This is an update, but it is the first time the File has succeeded
// ZIR. We must mark it outdated since we have already tried to
// semantically analyze it.
@@ -3251,7 +3298,8 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
/// * Decl.zir_index
/// * Fn.zir_body_inst
/// * Decl.zir_decl_index
-fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
+fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
+ const gpa = mod.gpa;
const new_zir = file.zir;
// Maps from old ZIR to new ZIR, struct_decl, enum_decl, etc. Any instruction which
@@ -3268,10 +3316,10 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
// Walk the Decl graph, updating ZIR indexes, strings, and populating
// the deleted and outdated lists.
- var decl_stack: std.ArrayListUnmanaged(*Decl) = .{};
+ var decl_stack: std.ArrayListUnmanaged(Decl.Index) = .{};
defer decl_stack.deinit(gpa);
- const root_decl = file.root_decl.?;
+ const root_decl = file.root_decl.unwrap().?;
try decl_stack.append(gpa, root_decl);
file.deleted_decls.clearRetainingCapacity();
@@ -3281,7 +3329,8 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
// to re-generate ZIR for the File.
try file.outdated_decls.append(gpa, root_decl);
- while (decl_stack.popOrNull()) |decl| {
+ while (decl_stack.popOrNull()) |decl_index| {
+ const decl = mod.declPtr(decl_index);
// Anonymous decls and the root decl have this set to 0. We still need
// to walk them but we do not need to modify this value.
// Anonymous decls should not be marked outdated. They will be re-generated
@@ -3292,7 +3341,7 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
log.debug("updateZirRefs {s}: delete {*} ({s})", .{
file.sub_file_path, decl, decl.name,
});
- try file.deleted_decls.append(gpa, decl);
+ try file.deleted_decls.append(gpa, decl_index);
continue;
};
const old_hash = decl.contentsHashZir(old_zir);
@@ -3302,7 +3351,7 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{
file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index,
});
- try file.outdated_decls.append(gpa, decl);
+ try file.outdated_decls.append(gpa, decl_index);
} else {
log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{
file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index,
@@ -3314,21 +3363,21 @@ fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
if (decl.getStruct()) |struct_obj| {
struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
- try file.deleted_decls.append(gpa, decl);
+ try file.deleted_decls.append(gpa, decl_index);
continue;
};
}
if (decl.getUnion()) |union_obj| {
union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse {
- try file.deleted_decls.append(gpa, decl);
+ try file.deleted_decls.append(gpa, decl_index);
continue;
};
}
if (decl.getFunction()) |func| {
func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse {
- try file.deleted_decls.append(gpa, decl);
+ try file.deleted_decls.append(gpa, decl_index);
continue;
};
}
@@ -3485,10 +3534,12 @@ pub fn mapOldZirToNew(
/// However the resolution status of the Type may not be fully resolved.
/// For example an inferred error set is not resolved until after `analyzeFnBody`.
/// is called.
-pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
+pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
+
const subsequent_analysis = switch (decl.analysis) {
.in_progress => unreachable,
@@ -3507,15 +3558,16 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
- mod.deleteDeclExports(decl);
+ mod.deleteDeclExports(decl_index);
// Dependencies will be re-discovered, so we remove them here prior to re-analysis.
- for (decl.dependencies.keys()) |dep| {
- dep.removeDependant(decl);
+ for (decl.dependencies.keys()) |dep_index| {
+ const dep = mod.declPtr(dep_index);
+ dep.removeDependant(decl_index);
if (dep.dependants.count() == 0 and !dep.deletion_flag) {
log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{
decl, decl.name, dep, dep.name,
});
- try mod.markDeclForDeletion(dep);
+ try mod.markDeclForDeletion(dep_index);
}
}
decl.dependencies.clearRetainingCapacity();
@@ -3530,7 +3582,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
decl_prog_node.activate();
defer decl_prog_node.end();
- const type_changed = mod.semaDecl(decl) catch |err| switch (err) {
+ const type_changed = mod.semaDecl(decl_index) catch |err| switch (err) {
error.AnalysisFail => {
if (decl.analysis == .in_progress) {
// If this decl caused the compile error, the analysis field would
@@ -3545,7 +3597,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
else => |e| {
decl.analysis = .sema_failure_retryable;
try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1);
- mod.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
+ mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create(
mod.gpa,
decl.srcLoc(),
"unable to analyze: {s}",
@@ -3559,7 +3611,8 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
// We may need to chase the dependants and re-analyze them.
// However, if the decl is a function, and the type is the same, we do not need to.
if (type_changed or decl.ty.zigTypeTag() != .Fn) {
- for (decl.dependants.keys()) |dep| {
+ for (decl.dependants.keys()) |dep_index| {
+ const dep = mod.declPtr(dep_index);
switch (dep.analysis) {
.unreferenced => unreachable,
.in_progress => continue, // already doing analysis, ok
@@ -3573,7 +3626,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
.codegen_failure_retryable,
.complete,
=> if (dep.generation != mod.generation) {
- try mod.markOutdatedDecl(dep);
+ try mod.markOutdatedDecl(dep_index);
},
}
}
@@ -3585,7 +3638,10 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
const tracy = trace(@src());
defer tracy.end();
- switch (func.owner_decl.analysis) {
+ const decl_index = func.owner_decl;
+ const decl = mod.declPtr(decl_index);
+
+ switch (decl.analysis) {
.unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
@@ -3607,13 +3663,12 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
}
const gpa = mod.gpa;
- const decl = func.owner_decl;
var tmp_arena = std.heap.ArenaAllocator.init(gpa);
defer tmp_arena.deinit();
const sema_arena = tmp_arena.allocator();
- var air = mod.analyzeFnBody(decl, func, sema_arena) catch |err| switch (err) {
+ var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) {
error.AnalysisFail => {
if (func.state == .in_progress) {
// If this decl caused the compile error, the analysis field would
@@ -3635,7 +3690,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
if (builtin.mode == .Debug and mod.comp.verbose_air) {
std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name});
- @import("print_air.zig").dump(gpa, air, liveness);
+ @import("print_air.zig").dump(mod, air, liveness);
std.debug.print("# End Function AIR: {s}\n\n", .{decl.name});
}
@@ -3647,7 +3702,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
},
else => {
try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
- mod.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
+ mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
"unable to codegen: {s}",
@@ -3668,7 +3723,9 @@ pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
// TODO we can potentially relax this if we store some more information along
// with decl dependency edges
- for (embed_file.owner_decl.dependants.keys()) |dep| {
+ const owner_decl = mod.declPtr(embed_file.owner_decl);
+ for (owner_decl.dependants.keys()) |dep_index| {
+ const dep = mod.declPtr(dep_index);
switch (dep.analysis) {
.unreferenced => unreachable,
.in_progress => continue, // already doing analysis, ok
@@ -3682,7 +3739,7 @@ pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
.codegen_failure_retryable,
.complete,
=> if (dep.generation != mod.generation) {
- try mod.markOutdatedDecl(dep);
+ try mod.markOutdatedDecl(dep_index);
},
}
}
@@ -3699,7 +3756,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
const tracy = trace(@src());
defer tracy.end();
- if (file.root_decl != null) return;
+ if (file.root_decl != .none) return;
const gpa = mod.gpa;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
@@ -3724,10 +3781,11 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.file_scope = file,
},
};
- const decl_name = try file.fullyQualifiedNameZ(gpa);
- const new_decl = try mod.allocateNewDecl(decl_name, &struct_obj.namespace, 0, null);
- file.root_decl = new_decl;
- struct_obj.owner_decl = new_decl;
+ const new_decl_index = try mod.allocateNewDecl(&struct_obj.namespace, 0, null);
+ const new_decl = mod.declPtr(new_decl_index);
+ file.root_decl = new_decl_index.toOptional();
+ struct_obj.owner_decl = new_decl_index;
+ new_decl.name = try file.fullyQualifiedNameZ(gpa);
new_decl.src_line = 0;
new_decl.is_pub = true;
new_decl.is_exported = false;
@@ -3757,6 +3815,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.perm_arena = new_decl_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
+ .owner_decl_index = new_decl_index,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
@@ -3769,7 +3828,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
var block_scope: Sema.Block = .{
.parent = null,
.sema = &sema,
- .src_decl = new_decl,
+ .src_decl = new_decl_index,
.namespace = &struct_obj.namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
@@ -3808,10 +3867,12 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
/// Returns `true` if the Decl type changed.
/// Returns `true` if this is the first time analyzing the Decl.
/// Returns `false` otherwise.
-fn semaDecl(mod: *Module, decl: *Decl) !bool {
+fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
+
if (decl.getFileScope().status != .success_zir) {
return error.AnalysisFail;
}
@@ -3838,13 +3899,14 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
.perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
+ .owner_decl_index = decl_index,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
- if (decl.isRoot()) {
+ if (mod.declIsRoot(decl_index)) {
log.debug("semaDecl root {*} ({s})", .{ decl, decl.name });
const main_struct_inst = Zir.main_struct_inst;
const struct_obj = decl.getStruct().?;
@@ -3864,7 +3926,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
var block_scope: Sema.Block = .{
.parent = null,
.sema = &sema,
- .src_decl = decl,
+ .src_decl = decl_index,
.namespace = decl.src_namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
@@ -3922,15 +3984,15 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
const decl_arena_state = try decl_arena_allocator.create(std.heap.ArenaAllocator.State);
if (decl.is_usingnamespace) {
- if (!decl_tv.ty.eql(Type.type, target)) {
+ if (!decl_tv.ty.eql(Type.type, mod)) {
return sema.fail(&block_scope, src, "expected type, found {}", .{
- decl_tv.ty.fmt(target),
+ decl_tv.ty.fmt(mod),
});
}
var buffer: Value.ToTypeBuffer = undefined;
const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator);
if (ty.getNamespace() == null) {
- return sema.fail(&block_scope, src, "type {} has no namespace", .{ty.fmt(target)});
+ return sema.fail(&block_scope, src, "type {} has no namespace", .{ty.fmt(mod)});
}
decl.ty = Type.type;
@@ -3949,7 +4011,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl_tv.val.castTag(.function)) |fn_payload| {
const func = fn_payload.data;
- const owns_tv = func.owner_decl == decl;
+ const owns_tv = func.owner_decl == decl_index;
if (owns_tv) {
var prev_type_has_bits = false;
var prev_is_inline = false;
@@ -3957,7 +4019,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl.has_tv) {
prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits();
- type_changed = !decl.ty.eql(decl_tv.ty, target);
+ type_changed = !decl.ty.eql(decl_tv.ty, mod);
if (decl.getFunction()) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
}
@@ -3982,13 +4044,13 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
// We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency
// order, increasing how many computations can be done in parallel.
- try mod.comp.bin_file.allocateDeclIndexes(decl);
+ try mod.comp.bin_file.allocateDeclIndexes(decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_func = func });
if (type_changed and mod.emit_h != null) {
- try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl });
+ try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
}
} else if (!prev_is_inline and prev_type_has_bits) {
- mod.comp.bin_file.freeDecl(decl);
+ mod.comp.bin_file.freeDecl(decl_index);
}
const is_inline = decl.ty.fnCallingConvention() == .Inline;
@@ -3999,14 +4061,14 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
// The scope needs to have the decl in it.
const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) };
- try sema.analyzeExport(&block_scope, export_src, options, decl);
+ try sema.analyzeExport(&block_scope, export_src, options, decl_index);
}
return type_changed or is_inline != prev_is_inline;
}
}
var type_changed = true;
if (decl.has_tv) {
- type_changed = !decl.ty.eql(decl_tv.ty, target);
+ type_changed = !decl.ty.eql(decl_tv.ty, mod);
decl.clearValues(gpa);
}
@@ -4016,7 +4078,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
switch (decl_tv.val.tag()) {
.variable => {
const variable = decl_tv.val.castTag(.variable).?.data;
- if (variable.owner_decl == decl) {
+ if (variable.owner_decl == decl_index) {
decl.owns_tv = true;
queue_linker_work = true;
@@ -4026,7 +4088,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
},
.extern_fn => {
const extern_fn = decl_tv.val.castTag(.extern_fn).?.data;
- if (extern_fn.owner_decl == decl) {
+ if (extern_fn.owner_decl == decl_index) {
decl.owns_tv = true;
queue_linker_work = true;
is_extern = true;
@@ -4065,11 +4127,11 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
// codegen backend wants full access to the Decl Type.
try sema.resolveTypeFully(&block_scope, src, decl.ty);
- try mod.comp.bin_file.allocateDeclIndexes(decl);
- try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl });
+ try mod.comp.bin_file.allocateDeclIndexes(decl_index);
+ try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
if (type_changed and mod.emit_h != null) {
- try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl });
+ try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
}
}
@@ -4077,15 +4139,18 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
const export_src = src; // TODO point to the export token
// The scope needs to have the decl in it.
const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) };
- try sema.analyzeExport(&block_scope, export_src, options, decl);
+ try sema.analyzeExport(&block_scope, export_src, options, decl_index);
}
return type_changed;
}
/// Returns the depender's index of the dependee.
-pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !void {
- if (depender == dependee) return;
+pub fn declareDeclDependency(mod: *Module, depender_index: Decl.Index, dependee_index: Decl.Index) !void {
+ if (depender_index == dependee_index) return;
+
+ const depender = mod.declPtr(depender_index);
+ const dependee = mod.declPtr(dependee_index);
log.debug("{*} ({s}) depends on {*} ({s})", .{
depender, depender.name, dependee, dependee.name,
@@ -4096,11 +4161,11 @@ pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !vo
if (dependee.deletion_flag) {
dependee.deletion_flag = false;
- assert(mod.deletion_set.swapRemove(dependee));
+ assert(mod.deletion_set.swapRemove(dependee_index));
}
- dependee.dependants.putAssumeCapacity(depender, {});
- depender.dependencies.putAssumeCapacity(dependee, {});
+ dependee.dependants.putAssumeCapacity(depender_index, {});
+ depender.dependencies.putAssumeCapacity(dependee_index, {});
}
pub const ImportFileResult = struct {
@@ -4146,7 +4211,7 @@ pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult {
.zir = undefined,
.status = .never_loaded,
.pkg = pkg,
- .root_decl = null,
+ .root_decl = .none,
};
return ImportFileResult{
.file = new_file,
@@ -4214,7 +4279,7 @@ pub fn importFile(
.zir = undefined,
.status = .never_loaded,
.pkg = cur_file.pkg,
- .root_decl = null,
+ .root_decl = .none,
};
return ImportFileResult{
.file = new_file,
@@ -4388,8 +4453,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
const line = iter.parent_decl.relativeToLine(line_off);
const decl_name_index = zir.extra[decl_sub_index + 5];
const decl_doccomment_index = zir.extra[decl_sub_index + 7];
- const decl_index = zir.extra[decl_sub_index + 6];
- const decl_block_inst_data = zir.instructions.items(.data)[decl_index].pl_node;
+ const decl_zir_index = zir.extra[decl_sub_index + 6];
+ const decl_block_inst_data = zir.instructions.items(.data)[decl_zir_index].pl_node;
const decl_node = iter.parent_decl.relativeToNodeIndex(decl_block_inst_data.src_node);
// Every Decl needs a name.
@@ -4432,15 +4497,22 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
if (is_usingnamespace) try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1);
// We create a Decl for it regardless of analysis status.
- const gop = try namespace.decls.getOrPutAdapted(gpa, @as([]const u8, mem.sliceTo(decl_name, 0)), DeclAdapter{});
+ const gop = try namespace.decls.getOrPutContextAdapted(
+ gpa,
+ @as([]const u8, mem.sliceTo(decl_name, 0)),
+ DeclAdapter{ .mod = mod },
+ Namespace.DeclContext{ .module = mod },
+ );
if (!gop.found_existing) {
- const new_decl = try mod.allocateNewDecl(decl_name, namespace, decl_node, iter.parent_decl.src_scope);
+ const new_decl_index = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope);
+ const new_decl = mod.declPtr(new_decl_index);
+ new_decl.name = decl_name;
if (is_usingnamespace) {
- namespace.usingnamespace_set.putAssumeCapacity(new_decl, is_pub);
+ namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, is_pub);
}
log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace });
new_decl.src_line = line;
- gop.key_ptr.* = new_decl;
+ gop.key_ptr.* = new_decl_index;
// Exported decls, comptime decls, usingnamespace decls, and
// test decls if in test mode, get analyzed.
const decl_pkg = namespace.file_scope.pkg;
@@ -4451,7 +4523,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
// the test name filter.
if (!mod.comp.bin_file.options.is_test) break :blk false;
if (decl_pkg != mod.main_pkg) break :blk false;
- try mod.test_functions.put(gpa, new_decl, {});
+ try mod.test_functions.put(gpa, new_decl_index, {});
break :blk true;
},
else => blk: {
@@ -4459,12 +4531,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
if (!mod.comp.bin_file.options.is_test) break :blk false;
if (decl_pkg != mod.main_pkg) break :blk false;
// TODO check the name against --test-filter
- try mod.test_functions.put(gpa, new_decl, {});
+ try mod.test_functions.put(gpa, new_decl_index, {});
break :blk true;
},
};
if (want_analysis) {
- mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
+ mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl_index });
}
new_decl.is_pub = is_pub;
new_decl.is_exported = is_exported;
@@ -4476,7 +4548,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
return;
}
gpa.free(decl_name);
- const decl = gop.key_ptr.*;
+ const decl_index = gop.key_ptr.*;
+ const decl = mod.declPtr(decl_index);
log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace });
// Update the AST node of the decl; even if its contents are unchanged, it may
// have been re-ordered.
@@ -4497,17 +4570,17 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
.elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
- mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
+ mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
.macho => if (decl.fn_link.macho.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
- mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
+ mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
.plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
- mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
+ mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
.c, .wasm, .spirv, .nvptx => {},
}
@@ -4517,25 +4590,27 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi
/// Make it as if the semantic analysis for this Decl never happened.
pub fn clearDecl(
mod: *Module,
- decl: *Decl,
- outdated_decls: ?*std.AutoArrayHashMap(*Decl, void),
+ decl_index: Decl.Index,
+ outdated_decls: ?*std.AutoArrayHashMap(Decl.Index, void),
) Allocator.Error!void {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
log.debug("clearing {*} ({s})", .{ decl, decl.name });
const gpa = mod.gpa;
try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count());
if (outdated_decls) |map| {
- _ = map.swapRemove(decl);
+ _ = map.swapRemove(decl_index);
try map.ensureUnusedCapacity(decl.dependants.count());
}
// Remove itself from its dependencies.
- for (decl.dependencies.keys()) |dep| {
- dep.removeDependant(decl);
+ for (decl.dependencies.keys()) |dep_index| {
+ const dep = mod.declPtr(dep_index);
+ dep.removeDependant(decl_index);
if (dep.dependants.count() == 0 and !dep.deletion_flag) {
log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{
decl, decl.name, dep, dep.name,
@@ -4543,35 +4618,36 @@ pub fn clearDecl(
// We don't recursively perform a deletion here, because during the update,
// another reference to it may turn up.
dep.deletion_flag = true;
- mod.deletion_set.putAssumeCapacity(dep, {});
+ mod.deletion_set.putAssumeCapacity(dep_index, {});
}
}
decl.dependencies.clearRetainingCapacity();
// Anything that depends on this deleted decl needs to be re-analyzed.
- for (decl.dependants.keys()) |dep| {
- dep.removeDependency(decl);
+ for (decl.dependants.keys()) |dep_index| {
+ const dep = mod.declPtr(dep_index);
+ dep.removeDependency(decl_index);
if (outdated_decls) |map| {
- map.putAssumeCapacity(dep, {});
+ map.putAssumeCapacity(dep_index, {});
}
}
decl.dependants.clearRetainingCapacity();
- if (mod.failed_decls.fetchSwapRemove(decl)) |kv| {
+ if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| {
kv.value.destroy(gpa);
}
if (mod.emit_h) |emit_h| {
- if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| {
+ if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| {
kv.value.destroy(gpa);
}
- assert(emit_h.decl_table.swapRemove(decl));
+ assert(emit_h.decl_table.swapRemove(decl_index));
}
- _ = mod.compile_log_decls.swapRemove(decl);
- mod.deleteDeclExports(decl);
+ _ = mod.compile_log_decls.swapRemove(decl_index);
+ mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
- mod.comp.bin_file.freeDecl(decl);
+ mod.comp.bin_file.freeDecl(decl_index);
// TODO instead of a union, put this memory trailing Decl objects,
// and allow it to be variably sized.
@@ -4604,15 +4680,16 @@ pub fn clearDecl(
if (decl.deletion_flag) {
decl.deletion_flag = false;
- assert(mod.deletion_set.swapRemove(decl));
+ assert(mod.deletion_set.swapRemove(decl_index));
}
decl.analysis = .unreferenced;
}
/// This function is exclusively called for anonymous decls.
-pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void {
- log.debug("deleteUnusedDecl {*} ({s})", .{ decl, decl.name });
+pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
+ const decl = mod.declPtr(decl_index);
+ log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name });
// TODO: remove `allocateDeclIndexes` and make the API that the linker backends
// are required to notice the first time `updateDecl` happens and keep track
@@ -4626,55 +4703,58 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void {
.c => {}, // this linker backend has already migrated to the new API
else => if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
- mod.comp.bin_file.freeDecl(decl);
+ mod.comp.bin_file.freeDecl(decl_index);
}
},
}
- assert(!decl.isRoot());
- assert(decl.src_namespace.anon_decls.swapRemove(decl));
+ assert(!mod.declIsRoot(decl_index));
+ assert(decl.src_namespace.anon_decls.swapRemove(decl_index));
const dependants = decl.dependants.keys();
for (dependants) |dep| {
- dep.removeDependency(decl);
+ mod.declPtr(dep).removeDependency(decl_index);
}
for (decl.dependencies.keys()) |dep| {
- dep.removeDependant(decl);
+ mod.declPtr(dep).removeDependant(decl_index);
}
- decl.destroy(mod);
+ mod.destroyDecl(decl_index);
}
/// We don't perform a deletion here, because this Decl or another one
/// may end up referencing it before the update is complete.
-fn markDeclForDeletion(mod: *Module, decl: *Decl) !void {
+fn markDeclForDeletion(mod: *Module, decl_index: Decl.Index) !void {
+ const decl = mod.declPtr(decl_index);
decl.deletion_flag = true;
- try mod.deletion_set.put(mod.gpa, decl, {});
+ try mod.deletion_set.put(mod.gpa, decl_index, {});
}
/// Cancel the creation of an anon decl and delete any references to it.
/// If other decls depend on this decl, they must be aborted first.
-pub fn abortAnonDecl(mod: *Module, decl: *Decl) void {
+pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
+ const decl = mod.declPtr(decl_index);
log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name });
- assert(!decl.isRoot());
- assert(decl.src_namespace.anon_decls.swapRemove(decl));
+ assert(!mod.declIsRoot(decl_index));
+ assert(decl.src_namespace.anon_decls.swapRemove(decl_index));
// An aborted decl must not have dependants -- they must have
// been aborted first and removed from this list.
assert(decl.dependants.count() == 0);
- for (decl.dependencies.keys()) |dep| {
- dep.removeDependant(decl);
+ for (decl.dependencies.keys()) |dep_index| {
+ const dep = mod.declPtr(dep_index);
+ dep.removeDependant(decl_index);
}
- decl.destroy(mod);
+ mod.destroyDecl(decl_index);
}
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
-fn deleteDeclExports(mod: *Module, decl: *Decl) void {
- const kv = mod.export_owners.fetchSwapRemove(decl) orelse return;
+fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
+ const kv = mod.export_owners.fetchSwapRemove(decl_index) orelse return;
for (kv.value) |exp| {
if (mod.decl_exports.getPtr(exp.exported_decl)) |value_ptr| {
@@ -4683,7 +4763,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
var i: usize = 0;
var new_len = list.len;
while (i < new_len) {
- if (list[i].owner_decl == decl) {
+ if (list[i].owner_decl == decl_index) {
mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]);
new_len -= 1;
} else {
@@ -4713,11 +4793,13 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
mod.gpa.free(kv.value);
}
-pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) SemaError!Air {
+pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
const gpa = mod.gpa;
+ const decl_index = func.owner_decl;
+ const decl = mod.declPtr(decl_index);
// Use the Decl's arena for captured values.
var decl_arena = decl.value_arena.?.promote(gpa);
@@ -4731,8 +4813,9 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
.perm_arena = decl_arena_allocator,
.code = decl.getFileScope().zir,
.owner_decl = decl,
+ .owner_decl_index = decl_index,
.func = func,
- .fn_ret_ty = func.owner_decl.ty.fnReturnType(),
+ .fn_ret_ty = decl.ty.fnReturnType(),
.owner_func = func,
};
defer sema.deinit();
@@ -4748,7 +4831,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
var inner_block: Sema.Block = .{
.parent = null,
.sema = &sema,
- .src_decl = decl,
+ .src_decl = decl_index,
.namespace = decl.src_namespace,
.wip_capture_scope = wip_captures.scope,
.instructions = .{},
@@ -4903,10 +4986,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
};
}
-fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
+fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void {
+ const decl = mod.declPtr(decl_index);
log.debug("mark outdated {*} ({s})", .{ decl, decl.name });
- try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl });
- if (mod.failed_decls.fetchSwapRemove(decl)) |kv| {
+ try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl_index });
+ if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| {
kv.value.destroy(mod.gpa);
}
if (decl.has_tv and decl.owns_tv) {
@@ -4916,33 +5000,43 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
}
}
if (mod.emit_h) |emit_h| {
- if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| {
+ if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| {
kv.value.destroy(mod.gpa);
}
}
- _ = mod.compile_log_decls.swapRemove(decl);
+ _ = mod.compile_log_decls.swapRemove(decl_index);
decl.analysis = .outdated;
}
pub fn allocateNewDecl(
mod: *Module,
- name: [:0]const u8,
namespace: *Namespace,
src_node: Ast.Node.Index,
src_scope: ?*CaptureScope,
-) !*Decl {
- // If we have emit-h then we must allocate a bigger structure to store the emit-h state.
- const new_decl: *Decl = if (mod.emit_h != null) blk: {
- const parent_struct = try mod.gpa.create(DeclPlusEmitH);
- parent_struct.* = .{
- .emit_h = .{},
- .decl = undefined,
+) !Decl.Index {
+ const decl_and_index: struct {
+ new_decl: *Decl,
+ decl_index: Decl.Index,
+ } = if (mod.decls_free_list.popOrNull()) |decl_index| d: {
+ break :d .{
+ .new_decl = mod.declPtr(decl_index),
+ .decl_index = decl_index,
+ };
+ } else d: {
+ const decl = try mod.allocated_decls.addOne(mod.gpa);
+ errdefer mod.allocated_decls.shrinkRetainingCapacity(mod.allocated_decls.len - 1);
+ if (mod.emit_h) |mod_emit_h| {
+ const decl_emit_h = try mod_emit_h.allocated_emit_h.addOne(mod.gpa);
+ decl_emit_h.* = .{};
+ }
+ break :d .{
+ .new_decl = decl,
+ .decl_index = @intToEnum(Decl.Index, mod.allocated_decls.len - 1),
};
- break :blk &parent_struct.decl;
- } else try mod.gpa.create(Decl);
+ };
- new_decl.* = .{
- .name = name,
+ decl_and_index.new_decl.* = .{
+ .name = undefined,
.src_namespace = namespace,
.src_node = src_node,
.src_line = undefined,
@@ -4986,7 +5080,7 @@ pub fn allocateNewDecl(
.is_usingnamespace = false,
};
- return new_decl;
+ return decl_and_index.decl_index;
}
/// Get error value for error tag `name`.
@@ -5010,18 +5104,9 @@ pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged
};
}
-/// Takes ownership of `name` even if it returns an error.
-pub fn createAnonymousDeclNamed(
- mod: *Module,
- block: *Sema.Block,
- typed_value: TypedValue,
- name: [:0]u8,
-) !*Decl {
- return mod.createAnonymousDeclFromDeclNamed(block.src_decl, block.namespace, block.wip_capture_scope, typed_value, name);
-}
-
-pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !*Decl {
- return mod.createAnonymousDeclFromDecl(block.src_decl, block.namespace, block.wip_capture_scope, typed_value);
+pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index {
+ const src_decl = mod.declPtr(block.src_decl);
+ return mod.createAnonymousDeclFromDecl(src_decl, block.namespace, block.wip_capture_scope, typed_value);
}
pub fn createAnonymousDeclFromDecl(
@@ -5030,30 +5115,31 @@ pub fn createAnonymousDeclFromDecl(
namespace: *Namespace,
src_scope: ?*CaptureScope,
tv: TypedValue,
-) !*Decl {
- const name_index = mod.getNextAnonNameIndex();
+) !Decl.Index {
+ const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope);
+ errdefer mod.destroyDecl(new_decl_index);
const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{
- src_decl.name, name_index,
+ src_decl.name, @enumToInt(new_decl_index),
});
- return mod.createAnonymousDeclFromDeclNamed(src_decl, namespace, src_scope, tv, name);
+ try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name);
+ return new_decl_index;
}
/// Takes ownership of `name` even if it returns an error.
-pub fn createAnonymousDeclFromDeclNamed(
+pub fn initNewAnonDecl(
mod: *Module,
- src_decl: *Decl,
+ new_decl_index: Decl.Index,
+ src_line: u32,
namespace: *Namespace,
- src_scope: ?*CaptureScope,
typed_value: TypedValue,
name: [:0]u8,
-) !*Decl {
+) !void {
errdefer mod.gpa.free(name);
- try namespace.anon_decls.ensureUnusedCapacity(mod.gpa, 1);
+ const new_decl = mod.declPtr(new_decl_index);
- const new_decl = try mod.allocateNewDecl(name, namespace, src_decl.src_node, src_scope);
-
- new_decl.src_line = src_decl.src_line;
+ new_decl.name = name;
+ new_decl.src_line = src_line;
new_decl.ty = typed_value.ty;
new_decl.val = typed_value.val;
new_decl.@"align" = 0;
@@ -5062,22 +5148,16 @@ pub fn createAnonymousDeclFromDeclNamed(
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
- namespace.anon_decls.putAssumeCapacityNoClobber(new_decl, {});
+ try namespace.anon_decls.putNoClobber(mod.gpa, new_decl_index, {});
// The Decl starts off with alive=false and the codegen backend will set alive=true
// if the Decl is referenced by an instruction or another constant. Otherwise,
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (typed_value.ty.isFnOrHasRuntimeBits()) {
- try mod.comp.bin_file.allocateDeclIndexes(new_decl);
- try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl });
+ try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
+ try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index });
}
-
- return new_decl;
-}
-
-pub fn getNextAnonNameIndex(mod: *Module) usize {
- return @atomicRmw(usize, &mod.next_anon_name_index, .Add, 1, .Monotonic);
}
pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
@@ -5339,12 +5419,12 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {
// for the outdated decls, but we cannot queue up the tasks until after
// we find out which ones have been deleted, otherwise there would be
// deleted Decl pointers in the work queue.
- var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa);
+ var outdated_decls = std.AutoArrayHashMap(Decl.Index, void).init(mod.gpa);
defer outdated_decls.deinit();
for (mod.import_table.values()) |file| {
try outdated_decls.ensureUnusedCapacity(file.outdated_decls.items.len);
- for (file.outdated_decls.items) |decl| {
- outdated_decls.putAssumeCapacity(decl, {});
+ for (file.outdated_decls.items) |decl_index| {
+ outdated_decls.putAssumeCapacity(decl_index, {});
}
file.outdated_decls.clearRetainingCapacity();
@@ -5356,15 +5436,16 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {
// it may be both in this `deleted_decls` set, as well as in the
// `Module.deletion_set`. To avoid deleting it twice, we remove it from the
// deletion set at this time.
- for (file.deleted_decls.items) |decl| {
+ for (file.deleted_decls.items) |decl_index| {
+ const decl = mod.declPtr(decl_index);
log.debug("deleted from source: {*} ({s})", .{ decl, decl.name });
// Remove from the namespace it resides in, preserving declaration order.
assert(decl.zir_decl_index != 0);
- _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{});
+ _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{ .mod = mod });
- try mod.clearDecl(decl, &outdated_decls);
- decl.destroy(mod);
+ try mod.clearDecl(decl_index, &outdated_decls);
+ mod.destroyDecl(decl_index);
}
file.deleted_decls.clearRetainingCapacity();
}
@@ -5393,13 +5474,13 @@ pub fn processExports(mod: *Module) !void {
if (gop.found_existing) {
new_export.status = .failed_retryable;
try mod.failed_exports.ensureUnusedCapacity(gpa, 1);
- const src_loc = new_export.getSrcLoc();
+ const src_loc = new_export.getSrcLoc(mod);
const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{
new_export.options.name,
});
errdefer msg.destroy(gpa);
const other_export = gop.value_ptr.*;
- const other_src_loc = other_export.getSrcLoc();
+ const other_src_loc = other_export.getSrcLoc(mod);
try mod.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{});
mod.failed_exports.putAssumeCapacityNoClobber(new_export, msg);
new_export.status = .failed;
@@ -5413,7 +5494,7 @@ pub fn processExports(mod: *Module) !void {
const new_export = exports[0];
new_export.status = .failed_retryable;
try mod.failed_exports.ensureUnusedCapacity(gpa, 1);
- const src_loc = new_export.getSrcLoc();
+ const src_loc = new_export.getSrcLoc(mod);
const msg = try ErrorMsg.create(gpa, src_loc, "unable to export: {s}", .{
@errorName(err),
});
@@ -5427,12 +5508,14 @@ pub fn populateTestFunctions(mod: *Module) !void {
const gpa = mod.gpa;
const builtin_pkg = mod.main_pkg.table.get("builtin").?;
const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file;
- const builtin_namespace = builtin_file.root_decl.?.src_namespace;
- const decl = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{}).?;
+ const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?);
+ const builtin_namespace = root_decl.src_namespace;
+ const decl_index = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{ .mod = mod }).?;
+ const decl = mod.declPtr(decl_index);
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType();
- const array_decl = d: {
+ const array_decl_index = d: {
// Add mod.test_functions to an array decl then make the test_functions
// decl reference it as a slice.
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
@@ -5440,50 +5523,52 @@ pub fn populateTestFunctions(mod: *Module) !void {
const arena = new_decl_arena.allocator();
const test_fn_vals = try arena.alloc(Value, mod.test_functions.count());
- const array_decl = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
+ const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
.ty = try Type.Tag.array.create(arena, .{
.len = test_fn_vals.len,
.elem_type = try tmp_test_fn_ty.copy(arena),
}),
.val = try Value.Tag.aggregate.create(arena, test_fn_vals),
});
+ const array_decl = mod.declPtr(array_decl_index);
// Add a dependency on each test name and function pointer.
try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
- for (mod.test_functions.keys()) |test_decl, i| {
+ for (mod.test_functions.keys()) |test_decl_index, i| {
+ const test_decl = mod.declPtr(test_decl_index);
const test_name_slice = mem.sliceTo(test_decl.name, 0);
- const test_name_decl = n: {
+ const test_name_decl_index = n: {
var name_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer name_decl_arena.deinit();
const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice);
- const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
+ const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
.ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len),
.val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes),
});
- try test_name_decl.finalizeNewArena(&name_decl_arena);
- break :n test_name_decl;
+ try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena);
+ break :n test_name_decl_index;
};
- array_decl.dependencies.putAssumeCapacityNoClobber(test_decl, {});
- array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl, {});
- try mod.linkerUpdateDecl(test_name_decl);
+ array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, {});
+ array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, {});
+ try mod.linkerUpdateDecl(test_name_decl_index);
const field_vals = try arena.create([3]Value);
field_vals.* = .{
try Value.Tag.slice.create(arena, .{
- .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl),
+ .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index),
.len = try Value.Tag.int_u64.create(arena, test_name_slice.len),
}), // name
- try Value.Tag.decl_ref.create(arena, test_decl), // func
+ try Value.Tag.decl_ref.create(arena, test_decl_index), // func
Value.initTag(.null_value), // async_frame_size
};
test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals);
}
try array_decl.finalizeNewArena(&new_decl_arena);
- break :d array_decl;
+ break :d array_decl_index;
};
- try mod.linkerUpdateDecl(array_decl);
+ try mod.linkerUpdateDecl(array_decl_index);
{
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
@@ -5493,7 +5578,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
// This copy accesses the old Decl Type/Value so it must be done before `clearValues`.
const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena));
const new_val = try Value.Tag.slice.create(arena, .{
- .ptr = try Value.Tag.decl_ref.create(arena, array_decl),
+ .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index),
.len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()),
});
@@ -5506,15 +5591,17 @@ pub fn populateTestFunctions(mod: *Module) !void {
try decl.finalizeNewArena(&new_decl_arena);
}
- try mod.linkerUpdateDecl(decl);
+ try mod.linkerUpdateDecl(decl_index);
}
-pub fn linkerUpdateDecl(mod: *Module, decl: *Decl) !void {
+pub fn linkerUpdateDecl(mod: *Module, decl_index: Decl.Index) !void {
const comp = mod.comp;
if (comp.bin_file.options.emit == null) return;
- comp.bin_file.updateDecl(mod, decl) catch |err| switch (err) {
+ const decl = mod.declPtr(decl_index);
+
+ comp.bin_file.updateDecl(mod, decl_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
decl.analysis = .codegen_failure;
@@ -5523,7 +5610,7 @@ pub fn linkerUpdateDecl(mod: *Module, decl: *Decl) !void {
else => {
const gpa = mod.gpa;
try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
- mod.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
+ mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create(
gpa,
decl.srcLoc(),
"unable to codegen: {s}",
@@ -5566,3 +5653,64 @@ fn reportRetryableFileError(
}
gop.value_ptr.* = err_msg;
}
+
+pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void {
+ switch (val.tag()) {
+ .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index),
+ .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl),
+ .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl),
+ .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl),
+ .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data),
+
+ .repeated,
+ .eu_payload,
+ .opt_payload,
+ .empty_array_sentinel,
+ => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data),
+
+ .eu_payload_ptr,
+ .opt_payload_ptr,
+ => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr),
+
+ .slice => {
+ const slice = val.cast(Value.Payload.Slice).?.data;
+ mod.markReferencedDeclsAlive(slice.ptr);
+ mod.markReferencedDeclsAlive(slice.len);
+ },
+
+ .elem_ptr => {
+ const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data;
+ return mod.markReferencedDeclsAlive(elem_ptr.array_ptr);
+ },
+ .field_ptr => {
+ const field_ptr = val.cast(Value.Payload.FieldPtr).?.data;
+ return mod.markReferencedDeclsAlive(field_ptr.container_ptr);
+ },
+ .aggregate => {
+ for (val.castTag(.aggregate).?.data) |field_val| {
+ mod.markReferencedDeclsAlive(field_val);
+ }
+ },
+ .@"union" => {
+ const data = val.cast(Value.Payload.Union).?.data;
+ mod.markReferencedDeclsAlive(data.tag);
+ mod.markReferencedDeclsAlive(data.val);
+ },
+
+ else => {},
+ }
+}
+
+pub fn markDeclAlive(mod: *Module, decl: *Decl) void {
+ if (decl.alive) return;
+ decl.alive = true;
+
+ // This is the first time we are marking this Decl alive. We must
+ // therefore recurse into its value and mark any Decl it references
+ // as also alive, so that any Decl referenced does not get garbage collected.
+ mod.markReferencedDeclsAlive(decl.val);
+}
+
+fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void {
+ return mod.markDeclAlive(mod.declPtr(decl_index));
+}