aboutsummaryrefslogtreecommitdiff
path: root/src/Module.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-06-12 22:50:50 -0700
committerGitHub <noreply@github.com>2023-06-12 22:50:50 -0700
commit529ef75101e21bd45402c516138343b4770238eb (patch)
tree56c925bd7df84e5f223c31a7c8fa90606c8e2dc9 /src/Module.zig
parent1e7dcaa3ae57294ab5998b44a8c13ccc5019e7ea (diff)
parent2ad073ec6d4e2be967f18c9907844404a7eed42e (diff)
downloadzig-529ef75101e21bd45402c516138343b4770238eb.tar.gz
zig-529ef75101e21bd45402c516138343b4770238eb.zip
Merge pull request #15569 from ziglang/intern-pool-3
Use InternPool for all types and constant values
Diffstat (limited to 'src/Module.zig')
-rw-r--r--src/Module.zig2518
1 files changed, 1436 insertions, 1082 deletions
diff --git a/src/Module.zig b/src/Module.zig
index a8f2281c4f..61f39a327a 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -32,6 +32,19 @@ const build_options = @import("build_options");
const Liveness = @import("Liveness.zig");
const isUpDir = @import("introspect.zig").isUpDir;
const clang = @import("clang.zig");
+const InternPool = @import("InternPool.zig");
+
+comptime {
+ @setEvalBranchQuota(4000);
+ for (
+ @typeInfo(Zir.Inst.Ref).Enum.fields,
+ @typeInfo(Air.Inst.Ref).Enum.fields,
+ @typeInfo(InternPool.Index).Enum.fields,
+ ) |zir_field, air_field, ip_field| {
+ assert(mem.eql(u8, zir_field.name, ip_field.name));
+ assert(mem.eql(u8, air_field.name, ip_field.name));
+ }
+}
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: Allocator,
@@ -72,28 +85,29 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{},
/// Keys are fully resolved file paths. This table owns the keys and values.
embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
-/// This is a temporary addition to stage2 in order to match legacy behavior,
-/// however the end-game once the lang spec is settled will be to use a global
-/// InternPool for comptime memoized objects, making this behavior consistent across all types,
-/// not only string literals. Or, we might decide to not guarantee string literals
-/// to have equal comptime pointers, in which case this field can be deleted (perhaps
-/// the commit that introduced it can simply be reverted).
-/// This table uses an optional index so that when a Decl is destroyed, the string literal
-/// is still reclaimable by a future Decl.
-string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{},
-string_literal_bytes: ArrayListUnmanaged(u8) = .{},
+/// Stores all Type and Value objects; periodically garbage collected.
+intern_pool: InternPool = .{},
+
+/// To be eliminated in a future commit by moving more data into InternPool.
+/// Current uses that must be eliminated:
+/// * Struct comptime_args
+/// * Struct optimized_order
+/// * Union fields
+/// This memory lives until the Module is destroyed.
+tmp_hack_arena: std.heap.ArenaAllocator,
+
+/// This is currently only used for string literals.
+memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{},
+monomorphed_func_keys: std.ArrayListUnmanaged(InternPool.Index) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
/// to the same function.
monomorphed_funcs: MonomorphedFuncsSet = .{},
-/// The set of all comptime function calls that have been cached so that future calls
-/// with the same parameters will get the same return value.
-memoized_calls: MemoizedCallSet = .{},
/// Contains the values from `@setAlignStack`. A sparse table is used here
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
/// functions are many.
-align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{},
+align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{},
/// We optimize memory usage for a compilation with no compile errors by storing the
/// error messages and mapping outside of `Decl`.
@@ -120,13 +134,8 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, []CImportError) = .{},
/// contains Decls that need to be deleted if they end up having no references to them.
deletion_set: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
-/// Error tags and their values, tag names are duped with mod.gpa.
-/// Corresponds with `error_name_list`.
-global_error_set: std.StringHashMapUnmanaged(ErrorInt) = .{},
-
-/// ErrorInt -> []const u8 for fast lookups for @intToError at comptime
-/// Corresponds with `global_error_set`.
-error_name_list: ArrayListUnmanaged([]const u8),
+/// Key is the error name, index is the error tag value. Index 0 has a length-0 string.
+global_error_set: GlobalErrorSet = .{},
/// Incrementing integer used to compare against the corresponding Decl
/// field to determine whether a Decl's status applies to an ongoing update, or a
@@ -165,6 +174,11 @@ allocated_decls: std.SegmentedList(Decl, 0) = .{},
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
decls_free_list: ArrayListUnmanaged(Decl.Index) = .{},
+/// Same pattern as with `allocated_decls`.
+allocated_namespaces: std.SegmentedList(Namespace, 0) = .{},
+/// Same pattern as with `decls_free_list`.
+namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{},
+
global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{},
reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct {
@@ -172,6 +186,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct {
src: LazySrcLoc,
}) = .{},
+pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
+
pub const CImportError = struct {
offset: u32,
line: u32,
@@ -187,108 +203,40 @@ pub const CImportError = struct {
}
};
-pub const StringLiteralContext = struct {
- bytes: *ArrayListUnmanaged(u8),
+pub const MonomorphedFuncKey = struct { func: Fn.Index, args_index: u32, args_len: u32 };
- pub const Key = struct {
- index: u32,
- len: u32,
- };
+pub const MonomorphedFuncAdaptedKey = struct { func: Fn.Index, args: []const InternPool.Index };
- pub fn eql(self: @This(), a: Key, b: Key) bool {
- _ = self;
- return a.index == b.index and a.len == b.len;
- }
-
- pub fn hash(self: @This(), x: Key) u64 {
- const x_slice = self.bytes.items[x.index..][0..x.len];
- return std.hash_map.hashString(x_slice);
- }
-};
-
-pub const StringLiteralAdapter = struct {
- bytes: *ArrayListUnmanaged(u8),
-
- pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool {
- const b_slice = self.bytes.items[b.index..][0..b.len];
- return mem.eql(u8, a_slice, b_slice);
- }
-
- pub fn hash(self: @This(), adapted_key: []const u8) u64 {
- _ = self;
- return std.hash_map.hashString(adapted_key);
- }
-};
-
-const MonomorphedFuncsSet = std.HashMapUnmanaged(
- *Fn,
- void,
+pub const MonomorphedFuncsSet = std.HashMapUnmanaged(
+ MonomorphedFuncKey,
+ InternPool.Index,
MonomorphedFuncsContext,
std.hash_map.default_max_load_percentage,
);
-const MonomorphedFuncsContext = struct {
- pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool {
- _ = ctx;
- return a == b;
+pub const MonomorphedFuncsContext = struct {
+ mod: *Module,
+
+ pub fn eql(_: @This(), a: MonomorphedFuncKey, b: MonomorphedFuncKey) bool {
+ return std.meta.eql(a, b);
}
- /// Must match `Sema.GenericCallAdapter.hash`.
- pub fn hash(ctx: @This(), key: *Fn) u64 {
- _ = ctx;
- return key.hash;
+ pub fn hash(ctx: @This(), key: MonomorphedFuncKey) u64 {
+ const key_args = ctx.mod.monomorphed_func_keys.items[key.args_index..][0..key.args_len];
+ return std.hash.Wyhash.hash(@enumToInt(key.func), std.mem.sliceAsBytes(key_args));
}
};
-pub const MemoizedCallSet = std.HashMapUnmanaged(
- MemoizedCall.Key,
- MemoizedCall.Result,
- MemoizedCall,
- std.hash_map.default_max_load_percentage,
-);
-
-pub const MemoizedCall = struct {
- module: *Module,
-
- pub const Key = struct {
- func: *Fn,
- args: []TypedValue,
- };
-
- pub const Result = struct {
- val: Value,
- arena: std.heap.ArenaAllocator.State,
- };
-
- pub fn eql(ctx: @This(), a: Key, b: Key) bool {
- if (a.func != b.func) return false;
-
- assert(a.args.len == b.args.len);
- for (a.args, 0..) |a_arg, arg_i| {
- const b_arg = b.args[arg_i];
- if (!a_arg.eql(b_arg, ctx.module)) {
- return false;
- }
- }
+pub const MonomorphedFuncsAdaptedContext = struct {
+ mod: *Module,
- return true;
+ pub fn eql(ctx: @This(), adapted_key: MonomorphedFuncAdaptedKey, other_key: MonomorphedFuncKey) bool {
+ const other_key_args = ctx.mod.monomorphed_func_keys.items[other_key.args_index..][0..other_key.args_len];
+ return adapted_key.func == other_key.func and std.mem.eql(InternPool.Index, adapted_key.args, other_key_args);
}
- /// Must match `Sema.GenericCallAdapter.hash`.
- pub fn hash(ctx: @This(), key: Key) u64 {
- var hasher = std.hash.Wyhash.init(0);
-
- // The generic function Decl is guaranteed to be the first dependency
- // of each of its instantiations.
- std.hash.autoHash(&hasher, key.func);
-
- // This logic must be kept in sync with the logic in `analyzeCall` that
- // computes the hash.
- for (key.args) |arg| {
- arg.hash(&hasher, ctx.module);
- }
-
- return hasher.final();
+ pub fn hash(_: @This(), adapted_key: MonomorphedFuncAdaptedKey) u64 {
+ return std.hash.Wyhash.hash(@enumToInt(adapted_key.func), std.mem.sliceAsBytes(adapted_key.args));
}
};
@@ -322,7 +270,7 @@ pub const GlobalEmitH = struct {
pub const ErrorInt = u32;
pub const Export = struct {
- options: std.builtin.ExportOptions,
+ opts: Options,
src: LazySrcLoc,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: Decl.Index,
@@ -340,10 +288,17 @@ pub const Export = struct {
complete,
},
+ pub const Options = struct {
+ name: InternPool.NullTerminatedString,
+ linkage: std.builtin.GlobalLinkage = .Strong,
+ section: InternPool.OptionalNullTerminatedString = .none,
+ visibility: std.builtin.SymbolVisibility = .default,
+ };
+
pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc {
const src_decl = mod.declPtr(exp.src_decl);
return .{
- .file_scope = src_decl.getFileScope(),
+ .file_scope = src_decl.getFileScope(mod),
.parent_decl_node = src_decl.src_node,
.lazy = exp.src,
};
@@ -351,61 +306,76 @@ pub const Export = struct {
};
pub const CaptureScope = struct {
+ refs: u32,
parent: ?*CaptureScope,
/// Values from this decl's evaluation that will be closed over in
- /// child decls. Values stored in the value_arena of the linked decl.
- /// During sema, this map is backed by the gpa. Once sema completes,
- /// it is reallocated using the value_arena.
- captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, TypedValue) = .{},
+ /// child decls. This map is backed by the gpa, and deinited when
+ /// the refcount reaches 0.
+ captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Capture) = .{},
+
+ pub const Capture = union(enum) {
+ comptime_val: InternPool.Index, // index of value
+ runtime_val: InternPool.Index, // index of type
+ };
- pub fn failed(noalias self: *const @This()) bool {
+ pub fn failed(noalias self: *const CaptureScope) bool {
return self.captures.available == 0 and self.captures.size == std.math.maxInt(u32);
}
- pub fn fail(noalias self: *@This()) void {
+ pub fn fail(noalias self: *CaptureScope, gpa: Allocator) void {
+ self.captures.deinit(gpa);
self.captures.available = 0;
self.captures.size = std.math.maxInt(u32);
}
+
+ pub fn incRef(self: *CaptureScope) void {
+ self.refs += 1;
+ }
+
+ pub fn decRef(self: *CaptureScope, gpa: Allocator) void {
+ self.refs -= 1;
+ if (self.refs > 0) return;
+ if (self.parent) |p| p.decRef(gpa);
+ if (!self.failed()) {
+ self.captures.deinit(gpa);
+ }
+ gpa.destroy(self);
+ }
};
pub const WipCaptureScope = struct {
scope: *CaptureScope,
finalized: bool,
gpa: Allocator,
- perm_arena: Allocator,
- pub fn init(gpa: Allocator, perm_arena: Allocator, parent: ?*CaptureScope) !@This() {
- const scope = try perm_arena.create(CaptureScope);
- scope.* = .{ .parent = parent };
- return @This(){
+ pub fn init(gpa: Allocator, parent: ?*CaptureScope) !WipCaptureScope {
+ const scope = try gpa.create(CaptureScope);
+ if (parent) |p| p.incRef();
+ scope.* = .{ .refs = 1, .parent = parent };
+ return .{
.scope = scope,
.finalized = false,
.gpa = gpa,
- .perm_arena = perm_arena,
};
}
- pub fn finalize(noalias self: *@This()) !void {
- assert(!self.finalized);
- // use a temp to avoid unintentional aliasing due to RLS
- const tmp = try self.scope.captures.clone(self.perm_arena);
- self.scope.captures.deinit(self.gpa);
- self.scope.captures = tmp;
+ pub fn finalize(noalias self: *WipCaptureScope) !void {
self.finalized = true;
}
- pub fn reset(noalias self: *@This(), parent: ?*CaptureScope) !void {
- if (!self.finalized) try self.finalize();
- self.scope = try self.perm_arena.create(CaptureScope);
- self.scope.* = .{ .parent = parent };
- self.finalized = false;
+ pub fn reset(noalias self: *WipCaptureScope, parent: ?*CaptureScope) !void {
+ self.scope.decRef(self.gpa);
+ self.scope = try self.gpa.create(CaptureScope);
+ if (parent) |p| p.incRef();
+ self.scope.* = .{ .refs = 1, .parent = parent };
}
- pub fn deinit(noalias self: *@This()) void {
- if (!self.finalized) {
- self.scope.captures.deinit(self.gpa);
- self.scope.fail();
+ pub fn deinit(noalias self: *WipCaptureScope) void {
+ if (self.finalized) {
+ self.scope.decRef(self.gpa);
+ } else {
+ self.scope.fail(self.gpa);
}
self.* = undefined;
}
@@ -452,8 +422,7 @@ const ValueArena = struct {
};
pub const Decl = struct {
- /// Allocated with Module's allocator; outlives the ZIR code.
- name: [*:0]const u8,
+ name: InternPool.NullTerminatedString,
/// The most recent Type of the Decl after a successful semantic analysis.
/// Populated when `has_tv`.
ty: Type,
@@ -461,20 +430,16 @@ pub const Decl = struct {
/// Populated when `has_tv`.
val: Value,
/// Populated when `has_tv`.
- /// Points to memory inside value_arena.
- @"linksection": ?[*:0]const u8,
+ @"linksection": InternPool.OptionalNullTerminatedString,
/// Populated when `has_tv`.
@"align": u32,
/// Populated when `has_tv`.
@"addrspace": std.builtin.AddressSpace,
- /// The memory for ty, val, align, linksection, and captures.
- /// If this is `null` then there is no memory management needed.
- value_arena: ?*ValueArena = null,
/// The direct parent namespace of the Decl.
/// Reference to externally owned memory.
/// In the case of the Decl corresponding to a file, this is
/// the namespace of the struct, since there is no parent.
- src_namespace: *Namespace,
+ src_namespace: Namespace.Index,
/// The scope which lexically contains this decl. A decl must depend
/// on its lexical parent, in order to ensure that this pointer is valid.
@@ -624,55 +589,17 @@ pub const Decl = struct {
function_body,
};
- pub fn clearName(decl: *Decl, gpa: Allocator) void {
- gpa.free(mem.sliceTo(decl.name, 0));
- decl.name = undefined;
- }
-
pub fn clearValues(decl: *Decl, mod: *Module) void {
- const gpa = mod.gpa;
- if (decl.getExternFn()) |extern_fn| {
- extern_fn.deinit(gpa);
- gpa.destroy(extern_fn);
- }
- if (decl.getFunction()) |func| {
+ if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| {
_ = mod.align_stack_fns.remove(func);
- if (func.comptime_args != null) {
- _ = mod.monomorphed_funcs.remove(func);
- }
- func.deinit(gpa);
- gpa.destroy(func);
- }
- if (decl.getVariable()) |variable| {
- variable.deinit(gpa);
- gpa.destroy(variable);
- }
- if (decl.value_arena) |value_arena| {
- if (decl.owns_tv) {
- if (decl.val.castTag(.str_lit)) |str_lit| {
- mod.string_literal_table.getPtrContext(str_lit.data, .{
- .bytes = &mod.string_literal_bytes,
- }).?.* = .none;
- }
- }
- value_arena.deinit(gpa);
- decl.value_arena = null;
- decl.has_tv = false;
- decl.owns_tv = false;
+ mod.destroyFunc(func);
}
}
- pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void {
- assert(decl.value_arena == null);
- const value_arena = try arena.allocator().create(ValueArena);
- value_arena.* = .{ .state = arena.state };
- decl.value_arena = value_arena;
- }
-
/// This name is relative to the containing namespace of the decl.
/// The memory is owned by the containing File ZIR.
- pub fn getName(decl: Decl) ?[:0]const u8 {
- const zir = decl.getFileScope().zir;
+ pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 {
+ const zir = decl.getFileScope(mod).zir;
return decl.getNameZir(zir);
}
@@ -683,8 +610,8 @@ pub const Decl = struct {
return zir.nullTerminatedString(name_index);
}
- pub fn contentsHash(decl: Decl) std.zig.SrcHash {
- const zir = decl.getFileScope().zir;
+ pub fn contentsHash(decl: Decl, mod: *Module) std.zig.SrcHash {
+ const zir = decl.getFileScope(mod).zir;
return decl.contentsHashZir(zir);
}
@@ -695,31 +622,31 @@ pub const Decl = struct {
return contents_hash;
}
- pub fn zirBlockIndex(decl: *const Decl) Zir.Inst.Index {
+ pub fn zirBlockIndex(decl: *const Decl, mod: *Module) Zir.Inst.Index {
assert(decl.zir_decl_index != 0);
- const zir = decl.getFileScope().zir;
+ const zir = decl.getFileScope(mod).zir;
return zir.extra[decl.zir_decl_index + 6];
}
- pub fn zirAlignRef(decl: Decl) Zir.Inst.Ref {
+ pub fn zirAlignRef(decl: Decl, mod: *Module) Zir.Inst.Ref {
if (!decl.has_align) return .none;
assert(decl.zir_decl_index != 0);
- const zir = decl.getFileScope().zir;
+ const zir = decl.getFileScope(mod).zir;
return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]);
}
- pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref {
+ pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref {
if (!decl.has_linksection_or_addrspace) return .none;
assert(decl.zir_decl_index != 0);
- const zir = decl.getFileScope().zir;
+ const zir = decl.getFileScope(mod).zir;
const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align);
return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
}
- pub fn zirAddrspaceRef(decl: Decl) Zir.Inst.Ref {
+ pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref {
if (!decl.has_linksection_or_addrspace) return .none;
assert(decl.zir_decl_index != 0);
- const zir = decl.getFileScope().zir;
+ const zir = decl.getFileScope(mod).zir;
const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align) + 1;
return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
}
@@ -744,154 +671,167 @@ pub const Decl = struct {
return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(node_index));
}
- pub fn srcLoc(decl: Decl) SrcLoc {
- return decl.nodeOffsetSrcLoc(0);
+ pub fn srcLoc(decl: Decl, mod: *Module) SrcLoc {
+ return decl.nodeOffsetSrcLoc(0, mod);
}
- pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32) SrcLoc {
+ pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32, mod: *Module) SrcLoc {
return .{
- .file_scope = decl.getFileScope(),
+ .file_scope = decl.getFileScope(mod),
.parent_decl_node = decl.src_node,
.lazy = LazySrcLoc.nodeOffset(node_offset),
};
}
- pub fn srcToken(decl: Decl) Ast.TokenIndex {
- const tree = &decl.getFileScope().tree;
+ pub fn srcToken(decl: Decl, mod: *Module) Ast.TokenIndex {
+ const tree = &decl.getFileScope(mod).tree;
return tree.firstToken(decl.src_node);
}
- pub fn srcByteOffset(decl: Decl) u32 {
- const tree = &decl.getFileScope().tree;
+ pub fn srcByteOffset(decl: Decl, mod: *Module) u32 {
+ const tree = &decl.getFileScope(mod).tree;
return tree.tokens.items(.start)[decl.srcToken()];
}
pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void {
- const unqualified_name = mem.sliceTo(decl.name, 0);
if (decl.name_fully_qualified) {
- return writer.writeAll(unqualified_name);
+ try writer.print("{}", .{decl.name.fmt(&mod.intern_pool)});
+ } else {
+ try mod.namespacePtr(decl.src_namespace).renderFullyQualifiedName(mod, decl.name, writer);
}
- return decl.src_namespace.renderFullyQualifiedName(mod, unqualified_name, writer);
}
pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void {
- const unqualified_name = mem.sliceTo(decl.name, 0);
- return decl.src_namespace.renderFullyQualifiedDebugName(mod, unqualified_name, writer);
+ return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, decl.name, writer);
}
- pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 {
- var buffer = std.ArrayList(u8).init(mod.gpa);
- defer buffer.deinit();
- try decl.renderFullyQualifiedName(mod, buffer.writer());
+ pub fn getFullyQualifiedName(decl: Decl, mod: *Module) !InternPool.NullTerminatedString {
+ if (decl.name_fully_qualified) return decl.name;
+
+ const ip = &mod.intern_pool;
+ const count = count: {
+ var count: usize = ip.stringToSlice(decl.name).len + 1;
+ var ns: Namespace.Index = decl.src_namespace;
+ while (true) {
+ const namespace = mod.namespacePtr(ns);
+ const ns_decl = mod.declPtr(namespace.getDeclIndex(mod));
+ count += ip.stringToSlice(ns_decl.name).len + 1;
+ ns = namespace.parent.unwrap() orelse {
+ count += namespace.file_scope.sub_file_path.len;
+ break :count count;
+ };
+ }
+ };
+
+ const gpa = mod.gpa;
+ const start = ip.string_bytes.items.len;
+ // Protects reads of interned strings from being reallocated during the call to
+ // renderFullyQualifiedName.
+ try ip.string_bytes.ensureUnusedCapacity(gpa, count);
+ decl.renderFullyQualifiedName(mod, ip.string_bytes.writer(gpa)) catch unreachable;
// Sanitize the name for nvptx which is more restrictive.
+ // TODO This should be handled by the backend, not the frontend. Have a
+ // look at how the C backend does it for inspiration.
if (mod.comp.bin_file.options.target.cpu.arch.isNvptx()) {
- for (buffer.items) |*byte| switch (byte.*) {
+ for (ip.string_bytes.items[start..]) |*byte| switch (byte.*) {
'{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_',
else => {},
};
}
- return buffer.toOwnedSliceSentinel(0);
+ return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start);
}
pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue {
if (!decl.has_tv) return error.AnalysisFail;
- return TypedValue{
- .ty = decl.ty,
- .val = decl.val,
- };
+ return TypedValue{ .ty = decl.ty, .val = decl.val };
}
- pub fn value(decl: *Decl) error{AnalysisFail}!Value {
- return (try decl.typedValue()).val;
+ pub fn internValue(decl: *Decl, mod: *Module) Allocator.Error!InternPool.Index {
+ assert(decl.has_tv);
+ const ip_index = try decl.val.intern(decl.ty, mod);
+ decl.val = ip_index.toValue();
+ return ip_index;
}
- pub fn isFunction(decl: Decl) !bool {
+ pub fn isFunction(decl: Decl, mod: *const Module) !bool {
const tv = try decl.typedValue();
- return tv.ty.zigTypeTag() == .Fn;
+ return tv.ty.zigTypeTag(mod) == .Fn;
}
- /// If the Decl has a value and it is a struct, return it,
+ /// If the Decl owns its value and it is a struct, return it,
/// otherwise null.
- pub fn getStruct(decl: *Decl) ?*Struct {
- if (!decl.owns_tv) return null;
- const ty = (decl.val.castTag(.ty) orelse return null).data;
- const struct_obj = (ty.castTag(.@"struct") orelse return null).data;
- return struct_obj;
+ pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct {
+ return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod));
}
- /// If the Decl has a value and it is a union, return it,
+ pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex {
+ if (!decl.owns_tv) return .none;
+ if (decl.val.ip_index == .none) return .none;
+ return mod.intern_pool.indexToStructType(decl.val.toIntern());
+ }
+
+ /// If the Decl owns its value and it is a union, return it,
/// otherwise null.
- pub fn getUnion(decl: *Decl) ?*Union {
+ pub fn getOwnedUnion(decl: Decl, mod: *Module) ?*Union {
if (!decl.owns_tv) return null;
- const ty = (decl.val.castTag(.ty) orelse return null).data;
- const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data;
- return union_obj;
+ if (decl.val.ip_index == .none) return null;
+ return mod.typeToUnion(decl.val.toType());
}
- /// If the Decl has a value and it is a function, return it,
+ /// If the Decl owns its value and it is a function, return it,
/// otherwise null.
- pub fn getFunction(decl: *const Decl) ?*Fn {
- if (!decl.owns_tv) return null;
- const func = (decl.val.castTag(.function) orelse return null).data;
- return func;
+ pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn {
+ return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod));
+ }
+
+ pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex {
+ return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none;
}
- /// If the Decl has a value and it is an extern function, returns it,
+ /// If the Decl owns its value and it is an extern function, returns it,
/// otherwise null.
- pub fn getExternFn(decl: *const Decl) ?*ExternFn {
- if (!decl.owns_tv) return null;
- const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data;
- return extern_fn;
+ pub fn getOwnedExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc {
+ return if (decl.owns_tv) decl.val.getExternFunc(mod) else null;
}
- /// If the Decl has a value and it is a variable, returns it,
+ /// If the Decl owns its value and it is a variable, returns it,
/// otherwise null.
- pub fn getVariable(decl: *const Decl) ?*Var {
- if (!decl.owns_tv) return null;
- const variable = (decl.val.castTag(.variable) orelse return null).data;
- return variable;
+ pub fn getOwnedVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable {
+ return if (decl.owns_tv) decl.val.getVariable(mod) else null;
}
/// Gets the namespace that this Decl creates by being a struct, union,
/// enum, or opaque.
/// Only returns it if the Decl is the owner.
- pub fn getInnerNamespace(decl: *Decl) ?*Namespace {
- if (!decl.owns_tv) return null;
- const ty = (decl.val.castTag(.ty) orelse return null).data;
- switch (ty.tag()) {
- .@"struct" => {
- const struct_obj = ty.castTag(.@"struct").?.data;
- return &struct_obj.namespace;
- },
- .enum_full, .enum_nonexhaustive => {
- const enum_obj = ty.cast(Type.Payload.EnumFull).?.data;
- return &enum_obj.namespace;
- },
- .empty_struct => {
- return ty.castTag(.empty_struct).?.data;
- },
- .@"opaque" => {
- const opaque_obj = ty.cast(Type.Payload.Opaque).?.data;
- return &opaque_obj.namespace;
- },
- .@"union", .union_safety_tagged, .union_tagged => {
- const union_obj = ty.cast(Type.Payload.Union).?.data;
- return &union_obj.namespace;
+ pub fn getOwnedInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex {
+ if (!decl.owns_tv) return .none;
+ return switch (decl.val.ip_index) {
+ .empty_struct_type => .none,
+ .none => .none,
+ else => switch (mod.intern_pool.indexToKey(decl.val.toIntern())) {
+ .opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
+ .struct_type => |struct_type| struct_type.namespace,
+ .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(),
+ .enum_type => |enum_type| enum_type.namespace,
+ else => .none,
},
+ };
+ }
- else => return null,
- }
+ /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer.
+ pub fn getOwnedInnerNamespace(decl: Decl, mod: *Module) ?*Namespace {
+ return mod.namespacePtrUnwrap(decl.getOwnedInnerNamespaceIndex(mod));
}
pub fn dump(decl: *Decl) void {
const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src);
- std.debug.print("{s}:{d}:{d} name={s} status={s}", .{
+ std.debug.print("{s}:{d}:{d} name={d} status={s}", .{
decl.scope.sub_file_path,
loc.line + 1,
loc.column + 1,
- mem.sliceTo(decl.name, 0),
+ @enumToInt(decl.name),
@tagName(decl.analysis),
});
if (decl.has_tv) {
@@ -900,8 +840,8 @@ pub const Decl = struct {
std.debug.print("\n", .{});
}
- pub fn getFileScope(decl: Decl) *File {
- return decl.src_namespace.file_scope;
+ pub fn getFileScope(decl: Decl, mod: *Module) *File {
+ return mod.namespacePtr(decl.src_namespace).file_scope;
}
pub fn removeDependant(decl: *Decl, other: Decl.Index) void {
@@ -912,25 +852,29 @@ pub const Decl = struct {
assert(decl.dependencies.swapRemove(other));
}
- pub fn isExtern(decl: Decl) bool {
+ pub fn isExtern(decl: Decl, mod: *Module) bool {
assert(decl.has_tv);
- return switch (decl.val.tag()) {
- .extern_fn => true,
- .variable => decl.val.castTag(.variable).?.data.init.tag() == .unreachable_value,
+ return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) {
+ .variable => |variable| variable.is_extern,
+ .extern_func => true,
else => false,
};
}
- pub fn getAlignment(decl: Decl, target: Target) u32 {
+ pub fn getAlignment(decl: Decl, mod: *Module) u32 {
assert(decl.has_tv);
if (decl.@"align" != 0) {
// Explicit alignment.
return decl.@"align";
} else {
// Natural alignment.
- return decl.ty.abiAlignment(target);
+ return decl.ty.abiAlignment(mod);
}
}
+
+ pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void {
+ decl.val = (try decl.val.intern(decl.ty, mod)).toValue();
+ }
};
/// This state is attached to every Decl when Module emit_h is non-null.
@@ -938,38 +882,6 @@ pub const EmitH = struct {
fwd_decl: ArrayListUnmanaged(u8) = .{},
};
-/// Represents the data that an explicit error set syntax provides.
-pub const ErrorSet = struct {
- /// The Decl that corresponds to the error set itself.
- owner_decl: Decl.Index,
- /// The string bytes are stored in the owner Decl arena.
- /// These must be in sorted order. See sortNames.
- names: NameMap,
-
- pub const NameMap = std.StringArrayHashMapUnmanaged(void);
-
- pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc {
- const owner_decl = mod.declPtr(self.owner_decl);
- return .{
- .file_scope = owner_decl.getFileScope(),
- .parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(0),
- };
- }
-
- /// sort the NameMap. This should be called whenever the map is modified.
- /// alloc should be the allocator used for the NameMap data.
- pub fn sortNames(names: *NameMap) void {
- const Context = struct {
- keys: [][]const u8,
- pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
- return std.mem.lessThan(u8, ctx.keys[a_index], ctx.keys[b_index]);
- }
- };
- names.sort(Context{ .keys = names.keys() });
- }
-};
-
pub const PropertyBoolean = enum { no, yes, unknown, wip };
/// Represents the data that a struct declaration provides.
@@ -977,7 +889,7 @@ pub const Struct = struct {
/// Set of field names in declaration order.
fields: Fields,
/// Represents the declarations inside this struct.
- namespace: Namespace,
+ namespace: Namespace.Index,
/// The Decl that corresponds to the struct itself.
owner_decl: Decl.Index,
/// Index of the struct_decl ZIR instruction.
@@ -989,7 +901,7 @@ pub const Struct = struct {
/// If the layout is packed, this is the backing integer type of the packed struct.
/// Whether zig chooses this type or the user specifies it, it is stored here.
/// This will be set to the noreturn type until status is `have_layout`.
- backing_int_ty: Type = Type.initTag(.noreturn),
+ backing_int_ty: Type = Type.noreturn,
status: enum {
none,
field_types_wip,
@@ -1011,15 +923,37 @@ pub const Struct = struct {
is_tuple: bool,
assumed_runtime_bits: bool = false,
- pub const Fields = std.StringArrayHashMapUnmanaged(Field);
+ pub const Index = enum(u32) {
+ _,
+
+ pub fn toOptional(i: Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(i));
+ }
+ };
+
+ pub const OptionalIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn init(oi: ?Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
+ }
+
+ pub fn unwrap(oi: OptionalIndex) ?Index {
+ if (oi == .none) return null;
+ return @intToEnum(Index, @enumToInt(oi));
+ }
+ };
+
+ pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field);
/// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
pub const Field = struct {
/// Uses `noreturn` to indicate `anytype`.
/// undefined until `status` is >= `have_field_types`.
ty: Type,
- /// Uses `unreachable_value` to indicate no default.
- default_val: Value,
+ /// Uses `none` to indicate no default.
+ default_val: InternPool.Index,
/// Zero means to use the ABI alignment of the type.
abi_align: u32,
/// undefined until `status` is `have_layout`.
@@ -1030,7 +964,7 @@ pub const Struct = struct {
/// Returns the field alignment. If the struct is packed, returns 0.
pub fn alignment(
field: Field,
- target: Target,
+ mod: *Module,
layout: std.builtin.Type.ContainerLayout,
) u32 {
if (field.abi_align != 0) {
@@ -1038,24 +972,26 @@ pub const Struct = struct {
return field.abi_align;
}
+ const target = mod.getTarget();
+
switch (layout) {
.Packed => return 0,
.Auto => {
if (target.ofmt == .c) {
- return alignmentExtern(field, target);
+ return alignmentExtern(field, mod);
} else {
- return field.ty.abiAlignment(target);
+ return field.ty.abiAlignment(mod);
}
},
- .Extern => return alignmentExtern(field, target),
+ .Extern => return alignmentExtern(field, mod),
}
}
- pub fn alignmentExtern(field: Field, target: Target) u32 {
+ pub fn alignmentExtern(field: Field, mod: *Module) u32 {
// This logic is duplicated in Type.abiAlignmentAdvanced.
- const ty_abi_align = field.ty.abiAlignment(target);
+ const ty_abi_align = field.ty.abiAlignment(mod);
- if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) {
+ if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
return @max(ty_abi_align, 16);
@@ -1069,39 +1005,12 @@ pub const Struct = struct {
/// runtime version of the struct.
pub const omitted_field = std.math.maxInt(u32);
- pub fn getFullyQualifiedName(s: *Struct, mod: *Module) ![:0]u8 {
+ pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString {
return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
}
pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
- const owner_decl = mod.declPtr(s.owner_decl);
- return .{
- .file_scope = owner_decl.getFileScope(),
- .parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(0),
- };
- }
-
- pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc {
- @setCold(true);
- const owner_decl = mod.declPtr(s.owner_decl);
- const file = owner_decl.getFileScope();
- const tree = file.getTree(mod.gpa) catch |err| {
- // In this case we emit a warning + a less precise source location.
- log.warn("unable to load {s}: {s}", .{
- file.sub_file_path, @errorName(err),
- });
- return s.srcLoc(mod);
- };
- const node = owner_decl.relativeToNodeIndex(0);
-
- var buf: [2]Ast.Node.Index = undefined;
- if (tree.fullContainerDecl(&buf, node)) |container_decl| {
- return queryFieldSrc(tree.*, query, file, container_decl);
- } else {
- // This struct was generated using @Type
- return s.srcLoc(mod);
- }
+ return mod.declPtr(s.owner_decl).srcLoc(mod);
}
pub fn haveFieldTypes(s: Struct) bool {
@@ -1132,7 +1041,7 @@ pub const Struct = struct {
};
}
- pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 {
+ pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 {
assert(s.layout == .Packed);
assert(s.haveLayout());
var bit_sum: u64 = 0;
@@ -1140,12 +1049,13 @@ pub const Struct = struct {
if (i == index) {
return @intCast(u16, bit_sum);
}
- bit_sum += field.ty.bitSize(target);
+ bit_sum += field.ty.bitSize(mod);
}
unreachable; // index out of bounds
}
pub const RuntimeFieldIterator = struct {
+ module: *Module,
struct_obj: *const Struct,
index: u32 = 0,
@@ -1155,6 +1065,7 @@ pub const Struct = struct {
};
pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex {
+ const mod = it.module;
while (true) {
var i = it.index;
it.index += 1;
@@ -1167,122 +1078,21 @@ pub const Struct = struct {
}
const field = it.struct_obj.fields.values()[i];
- if (!field.is_comptime and field.ty.hasRuntimeBits()) {
+ if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) {
return FieldAndIndex{ .index = i, .field = field };
}
}
}
};
- pub fn runtimeFieldIterator(s: *const Struct) RuntimeFieldIterator {
- return .{ .struct_obj = s };
- }
-};
-
-/// Represents the data that an enum declaration provides, when the fields
-/// are auto-numbered, and there are no declarations. The integer tag type
-/// is inferred to be the smallest power of two unsigned int that fits
-/// the number of fields.
-pub const EnumSimple = struct {
- /// The Decl that corresponds to the enum itself.
- owner_decl: Decl.Index,
- /// Set of field names in declaration order.
- fields: NameMap,
-
- pub const NameMap = EnumFull.NameMap;
-
- pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc {
- const owner_decl = mod.declPtr(self.owner_decl);
+ pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator {
return .{
- .file_scope = owner_decl.getFileScope(),
- .parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(0),
+ .struct_obj = s,
+ .module = module,
};
}
};
-/// Represents the data that an enum declaration provides, when there are no
-/// declarations. However an integer tag type is provided, and the enum tag values
-/// are explicitly provided.
-pub const EnumNumbered = struct {
- /// The Decl that corresponds to the enum itself.
- owner_decl: Decl.Index,
- /// An integer type which is used for the numerical value of the enum.
- /// Whether zig chooses this type or the user specifies it, it is stored here.
- tag_ty: Type,
- /// Set of field names in declaration order.
- fields: NameMap,
- /// Maps integer tag value to field index.
- /// Entries are in declaration order, same as `fields`.
- /// If this hash map is empty, it means the enum tags are auto-numbered.
- values: ValueMap,
-
- pub const NameMap = EnumFull.NameMap;
- pub const ValueMap = EnumFull.ValueMap;
-
- pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc {
- const owner_decl = mod.declPtr(self.owner_decl);
- return .{
- .file_scope = owner_decl.getFileScope(),
- .parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(0),
- };
- }
-};
-
-/// Represents the data that an enum declaration provides, when there is
-/// at least one tag value explicitly specified, or at least one declaration.
-pub const EnumFull = struct {
- /// The Decl that corresponds to the enum itself.
- owner_decl: Decl.Index,
- /// An integer type which is used for the numerical value of the enum.
- /// Whether zig chooses this type or the user specifies it, it is stored here.
- tag_ty: Type,
- /// Set of field names in declaration order.
- fields: NameMap,
- /// Maps integer tag value to field index.
- /// Entries are in declaration order, same as `fields`.
- /// If this hash map is empty, it means the enum tags are auto-numbered.
- values: ValueMap,
- /// Represents the declarations inside this enum.
- namespace: Namespace,
- /// true if zig inferred this tag type, false if user specified it
- tag_ty_inferred: bool,
-
- pub const NameMap = std.StringArrayHashMapUnmanaged(void);
- pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false);
-
- pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc {
- const owner_decl = mod.declPtr(self.owner_decl);
- return .{
- .file_scope = owner_decl.getFileScope(),
- .parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(0),
- };
- }
-
- pub fn fieldSrcLoc(e: EnumFull, mod: *Module, query: FieldSrcQuery) SrcLoc {
- @setCold(true);
- const owner_decl = mod.declPtr(e.owner_decl);
- const file = owner_decl.getFileScope();
- const tree = file.getTree(mod.gpa) catch |err| {
- // In this case we emit a warning + a less precise source location.
- log.warn("unable to load {s}: {s}", .{
- file.sub_file_path, @errorName(err),
- });
- return e.srcLoc(mod);
- };
- const node = owner_decl.relativeToNodeIndex(0);
- var buf: [2]Ast.Node.Index = undefined;
- if (tree.fullContainerDecl(&buf, node)) |container_decl| {
- return queryFieldSrc(tree.*, query, file, container_decl);
- } else {
- // This enum was generated using @Type
- return e.srcLoc(mod);
- }
- }
-};
-
pub const Union = struct {
/// An enum type which is used for the tag of the union.
/// This type is created even for untagged unions, even when the memory
@@ -1293,7 +1103,7 @@ pub const Union = struct {
/// Set of field names in declaration order.
fields: Fields,
/// Represents the declarations inside this union.
- namespace: Namespace,
+ namespace: Namespace.Index,
/// The Decl that corresponds to the union itself.
owner_decl: Decl.Index,
/// Index of the union_decl ZIR instruction.
@@ -1314,6 +1124,28 @@ pub const Union = struct {
requires_comptime: PropertyBoolean = .unknown,
assumed_runtime_bits: bool = false,
+ pub const Index = enum(u32) {
+ _,
+
+ pub fn toOptional(i: Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(i));
+ }
+ };
+
+ pub const OptionalIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn init(oi: ?Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
+ }
+
+ pub fn unwrap(oi: OptionalIndex) ?Index {
+ if (oi == .none) return null;
+ return @intToEnum(Index, @enumToInt(oi));
+ }
+ };
+
pub const Field = struct {
/// undefined until `status` is `have_field_types` or `have_layout`.
ty: Type,
@@ -1323,52 +1155,30 @@ pub const Union = struct {
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
- pub fn normalAlignment(field: Field, target: Target) u32 {
+ pub fn normalAlignment(field: Field, mod: *Module) u32 {
if (field.abi_align == 0) {
- return field.ty.abiAlignment(target);
+ return field.ty.abiAlignment(mod);
} else {
return field.abi_align;
}
}
};
- pub const Fields = std.StringArrayHashMapUnmanaged(Field);
+ pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field);
- pub fn getFullyQualifiedName(s: *Union, mod: *Module) ![:0]u8 {
+ pub fn getFullyQualifiedName(s: *Union, mod: *Module) !InternPool.NullTerminatedString {
return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
}
pub fn srcLoc(self: Union, mod: *Module) SrcLoc {
const owner_decl = mod.declPtr(self.owner_decl);
return .{
- .file_scope = owner_decl.getFileScope(),
+ .file_scope = owner_decl.getFileScope(mod),
.parent_decl_node = owner_decl.src_node,
.lazy = LazySrcLoc.nodeOffset(0),
};
}
- pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc {
- @setCold(true);
- const owner_decl = mod.declPtr(u.owner_decl);
- const file = owner_decl.getFileScope();
- const tree = file.getTree(mod.gpa) catch |err| {
- // In this case we emit a warning + a less precise source location.
- log.warn("unable to load {s}: {s}", .{
- file.sub_file_path, @errorName(err),
- });
- return u.srcLoc(mod);
- };
- const node = owner_decl.relativeToNodeIndex(0);
-
- var buf: [2]Ast.Node.Index = undefined;
- if (tree.fullContainerDecl(&buf, node)) |container_decl| {
- return queryFieldSrc(tree.*, query, file, container_decl);
- } else {
- // This union was generated using @Type
- return u.srcLoc(mod);
- }
- }
-
pub fn haveFieldTypes(u: Union) bool {
return switch (u.status) {
.none,
@@ -1383,22 +1193,22 @@ pub const Union = struct {
};
}
- pub fn hasAllZeroBitFieldTypes(u: Union) bool {
+ pub fn hasAllZeroBitFieldTypes(u: Union, mod: *Module) bool {
assert(u.haveFieldTypes());
for (u.fields.values()) |field| {
- if (field.ty.hasRuntimeBits()) return false;
+ if (field.ty.hasRuntimeBits(mod)) return false;
}
return true;
}
- pub fn mostAlignedField(u: Union, target: Target) u32 {
+ pub fn mostAlignedField(u: Union, mod: *Module) u32 {
assert(u.haveFieldTypes());
var most_alignment: u32 = 0;
var most_index: usize = undefined;
for (u.fields.values(), 0..) |field, i| {
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
- const field_align = field.normalAlignment(target);
+ const field_align = field.normalAlignment(mod);
if (field_align > most_alignment) {
most_alignment = field_align;
most_index = i;
@@ -1408,20 +1218,20 @@ pub const Union = struct {
}
/// Returns 0 if the union is represented with 0 bits at runtime.
- pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 {
+ pub fn abiAlignment(u: Union, mod: *Module, have_tag: bool) u32 {
var max_align: u32 = 0;
- if (have_tag) max_align = u.tag_ty.abiAlignment(target);
+ if (have_tag) max_align = u.tag_ty.abiAlignment(mod);
for (u.fields.values()) |field| {
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
- const field_align = field.normalAlignment(target);
+ const field_align = field.normalAlignment(mod);
max_align = @max(max_align, field_align);
}
return max_align;
}
- pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 {
- return u.getLayout(target, have_tag).abi_size;
+ pub fn abiSize(u: Union, mod: *Module, have_tag: bool) u64 {
+ return u.getLayout(mod, have_tag).abi_size;
}
pub const Layout = struct {
@@ -1451,7 +1261,7 @@ pub const Union = struct {
};
}
- pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout {
+ pub fn getLayout(u: Union, mod: *Module, have_tag: bool) Layout {
assert(u.haveLayout());
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
@@ -1460,16 +1270,16 @@ pub const Union = struct {
var payload_align: u32 = 0;
const fields = u.fields.values();
for (fields, 0..) |field, i| {
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = a: {
if (field.abi_align == 0) {
- break :a field.ty.abiAlignment(target);
+ break :a field.ty.abiAlignment(mod);
} else {
break :a field.abi_align;
}
};
- const field_size = field.ty.abiSize(target);
+ const field_size = field.ty.abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(u32, i);
@@ -1481,7 +1291,7 @@ pub const Union = struct {
}
}
payload_align = @max(payload_align, 1);
- if (!have_tag or !u.tag_ty.hasRuntimeBits()) {
+ if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) {
return .{
.abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align),
.abi_align = payload_align,
@@ -1497,8 +1307,8 @@ pub const Union = struct {
}
// Put the tag before or after the payload depending on which one's
// alignment is greater.
- const tag_size = u.tag_ty.abiSize(target);
- const tag_align = @max(1, u.tag_ty.abiAlignment(target));
+ const tag_size = u.tag_ty.abiSize(mod);
+ const tag_align = @max(1, u.tag_ty.abiAlignment(mod));
var size: u64 = 0;
var padding: u32 = undefined;
if (tag_align >= payload_align) {
@@ -1533,26 +1343,6 @@ pub const Union = struct {
}
};
-pub const Opaque = struct {
- /// The Decl that corresponds to the opaque itself.
- owner_decl: Decl.Index,
- /// Represents the declarations inside this opaque.
- namespace: Namespace,
-
- pub fn srcLoc(self: Opaque, mod: *Module) SrcLoc {
- const owner_decl = mod.declPtr(self.owner_decl);
- return .{
- .file_scope = owner_decl.getFileScope(),
- .parent_decl_node = owner_decl.src_node,
- .lazy = LazySrcLoc.nodeOffset(0),
- };
- }
-
- pub fn getFullyQualifiedName(s: *Opaque, mod: *Module) ![:0]u8 {
- return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
- }
-};
-
/// Some extern function struct memory is owned by the Decl's TypedValue.Managed
/// arena allocator.
pub const ExternFn = struct {
@@ -1630,12 +1420,27 @@ pub const Fn = struct {
is_noinline: bool,
calls_or_awaits_errorable_fn: bool = false,
- /// Any inferred error sets that this function owns, both its own inferred error set and
- /// inferred error sets of any inline/comptime functions called. Not to be confused
- /// with inferred error sets of generic instantiations of this function, which are
- /// *not* tracked here - they are tracked in the new `Fn` object created for the
- /// instantiations.
- inferred_error_sets: InferredErrorSetList = .{},
+ pub const Index = enum(u32) {
+ _,
+
+ pub fn toOptional(i: Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(i));
+ }
+ };
+
+ pub const OptionalIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn init(oi: ?Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
+ }
+
+ pub fn unwrap(oi: OptionalIndex) ?Index {
+ if (oi == .none) return null;
+ return @intToEnum(Index, @enumToInt(oi));
+ }
+ };
pub const Analysis = enum {
/// This function has not yet undergone analysis, because we have not
@@ -1662,16 +1467,16 @@ pub const Fn = struct {
/// or comptime functions.
pub const InferredErrorSet = struct {
/// The function from which this error set originates.
- func: *Fn,
+ func: Fn.Index,
/// All currently known errors that this error set contains. This includes
/// direct additions via `return error.Foo;`, and possibly also errors that
/// are returned from any dependent functions. When the inferred error set is
/// fully resolved, this map contains all the errors that the function might return.
- errors: ErrorSet.NameMap = .{},
+ errors: NameMap = .{},
/// Other inferred error sets which this inferred error set should include.
- inferred_error_sets: std.AutoArrayHashMapUnmanaged(*InferredErrorSet, void) = .{},
+ inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{},
/// Whether the function returned anyerror. This is true if either of
/// the dependent functions returns anyerror.
@@ -1681,52 +1486,57 @@ pub const Fn = struct {
/// can skip resolving any dependents of this inferred error set.
is_resolved: bool = false,
- pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void {
- switch (err_set_ty.tag()) {
- .error_set => {
- const names = err_set_ty.castTag(.error_set).?.data.names.keys();
- for (names) |name| {
- try self.errors.put(gpa, name, {});
- }
- },
- .error_set_single => {
- const name = err_set_ty.castTag(.error_set_single).?.data;
- try self.errors.put(gpa, name, {});
- },
- .error_set_inferred => {
- const ies = err_set_ty.castTag(.error_set_inferred).?.data;
- try self.inferred_error_sets.put(gpa, ies, {});
- },
- .error_set_merged => {
- const names = err_set_ty.castTag(.error_set_merged).?.data.keys();
- for (names) |name| {
- try self.errors.put(gpa, name, {});
- }
- },
- .anyerror => {
+ pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
+
+ pub const Index = enum(u32) {
+ _,
+
+ pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
+ return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(i));
+ }
+ };
+
+ pub const OptionalIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
+ return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(oi orelse return .none));
+ }
+
+ pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
+ if (oi == .none) return null;
+ return @intToEnum(InferredErrorSet.Index, @enumToInt(oi));
+ }
+ };
+
+ pub fn addErrorSet(
+ self: *InferredErrorSet,
+ err_set_ty: Type,
+ ip: *InternPool,
+ gpa: Allocator,
+ ) !void {
+ switch (err_set_ty.toIntern()) {
+ .anyerror_type => {
self.is_anyerror = true;
},
- else => unreachable,
+ else => switch (ip.indexToKey(err_set_ty.toIntern())) {
+ .error_set_type => |error_set_type| {
+ for (error_set_type.names) |name| {
+ try self.errors.put(gpa, name, {});
+ }
+ },
+ .inferred_error_set_type => |ies_index| {
+ try self.inferred_error_sets.put(gpa, ies_index, {});
+ },
+ else => unreachable,
+ },
}
}
};
- pub const InferredErrorSetList = std.SinglyLinkedList(InferredErrorSet);
- pub const InferredErrorSetListNode = InferredErrorSetList.Node;
-
- pub fn deinit(func: *Fn, gpa: Allocator) void {
- var it = func.inferred_error_sets.first;
- while (it) |node| {
- const next = node.next;
- node.data.errors.deinit(gpa);
- node.data.inferred_error_sets.deinit(gpa);
- gpa.destroy(node);
- it = next;
- }
- }
-
pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool {
- const file = mod.declPtr(func.owner_decl).getFileScope();
+ const file = mod.declPtr(func.owner_decl).getFileScope(mod);
const tags = file.zir.instructions.items(.tag);
@@ -1741,7 +1551,7 @@ pub const Fn = struct {
}
pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 {
- const file = mod.declPtr(func.owner_decl).getFileScope();
+ const file = mod.declPtr(func.owner_decl).getFileScope(mod);
const tags = file.zir.instructions.items(.tag);
const data = file.zir.instructions.items(.data);
@@ -1764,7 +1574,7 @@ pub const Fn = struct {
pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool {
const owner_decl = mod.declPtr(func.owner_decl);
- const zir = owner_decl.getFileScope().zir;
+ const zir = owner_decl.getFileScope(mod).zir;
const zir_tags = zir.instructions.items(.tag);
switch (zir_tags[func.zir_body_inst]) {
.func => return false,
@@ -1779,46 +1589,24 @@ pub const Fn = struct {
}
};
-pub const Var = struct {
- /// if is_extern == true this is undefined
- init: Value,
- owner_decl: Decl.Index,
-
- /// Library name if specified.
- /// For example `extern "c" var stderrp = ...` would have 'c' as library name.
- /// Allocated with Module's allocator; outlives the ZIR code.
- lib_name: ?[*:0]const u8,
-
- is_extern: bool,
- is_mutable: bool,
- is_threadlocal: bool,
- is_weak_linkage: bool,
-
- pub fn deinit(variable: *Var, gpa: Allocator) void {
- if (variable.lib_name) |lib_name| {
- gpa.free(mem.sliceTo(lib_name, 0));
- }
- }
-};
-
pub const DeclAdapter = struct {
mod: *Module,
- pub fn hash(self: @This(), s: []const u8) u32 {
+ pub fn hash(self: @This(), s: InternPool.NullTerminatedString) u32 {
_ = self;
- return @truncate(u32, std.hash.Wyhash.hash(0, s));
+ return std.hash.uint32(@enumToInt(s));
}
- pub fn eql(self: @This(), a: []const u8, b_decl_index: Decl.Index, b_index: usize) bool {
+ pub fn eql(self: @This(), a: InternPool.NullTerminatedString, b_decl_index: Decl.Index, b_index: usize) bool {
_ = b_index;
const b_decl = self.mod.declPtr(b_decl_index);
- return mem.eql(u8, a, mem.sliceTo(b_decl.name, 0));
+ return a == b_decl.name;
}
};
/// The container that structs, enums, unions, and opaques have.
pub const Namespace = struct {
- parent: ?*Namespace,
+ parent: OptionalIndex,
file_scope: *File,
/// Will be a struct, enum, union, or opaque.
ty: Type,
@@ -1836,21 +1624,41 @@ pub const Namespace = struct {
/// Value is whether the usingnamespace decl is marked `pub`.
usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{},
+ pub const Index = enum(u32) {
+ _,
+
+ pub fn toOptional(i: Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(i));
+ }
+ };
+
+ pub const OptionalIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn init(oi: ?Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
+ }
+
+ pub fn unwrap(oi: OptionalIndex) ?Index {
+ if (oi == .none) return null;
+ return @intToEnum(Index, @enumToInt(oi));
+ }
+ };
+
const DeclContext = struct {
module: *Module,
pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 {
const decl = ctx.module.declPtr(decl_index);
- return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceTo(decl.name, 0)));
+ return std.hash.uint32(@enumToInt(decl.name));
}
pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool {
_ = b_index;
const a_decl = ctx.module.declPtr(a_decl_index);
const b_decl = ctx.module.declPtr(b_decl_index);
- const a_name = mem.sliceTo(a_decl.name, 0);
- const b_name = mem.sliceTo(b_decl.name, 0);
- return mem.eql(u8, a_name, b_name);
+ return a_decl.name == b_decl.name;
}
};
@@ -1862,8 +1670,6 @@ pub const Namespace = struct {
pub fn destroyDecls(ns: *Namespace, mod: *Module) void {
const gpa = mod.gpa;
- log.debug("destroyDecls {*}", .{ns});
-
var decls = ns.decls;
ns.decls = .{};
@@ -1889,8 +1695,6 @@ pub const Namespace = struct {
) !void {
const gpa = mod.gpa;
- log.debug("deleteAllDecls {*}", .{ns});
-
var decls = ns.decls;
ns.decls = .{};
@@ -1919,46 +1723,38 @@ pub const Namespace = struct {
pub fn renderFullyQualifiedName(
ns: Namespace,
mod: *Module,
- name: []const u8,
+ name: InternPool.NullTerminatedString,
writer: anytype,
) @TypeOf(writer).Error!void {
- if (ns.parent) |parent| {
- const decl_index = ns.getDeclIndex();
- const decl = mod.declPtr(decl_index);
- try parent.renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer);
+ if (ns.parent.unwrap()) |parent| {
+ const decl = mod.declPtr(ns.getDeclIndex(mod));
+ try mod.namespacePtr(parent).renderFullyQualifiedName(mod, decl.name, writer);
} else {
try ns.file_scope.renderFullyQualifiedName(writer);
}
- if (name.len != 0) {
- try writer.writeAll(".");
- try writer.writeAll(name);
- }
+ if (name != .empty) try writer.print(".{}", .{name.fmt(&mod.intern_pool)});
}
/// This renders e.g. "std/fs.zig:Dir.OpenOptions"
pub fn renderFullyQualifiedDebugName(
ns: Namespace,
mod: *Module,
- name: []const u8,
+ name: InternPool.NullTerminatedString,
writer: anytype,
) @TypeOf(writer).Error!void {
- var separator_char: u8 = '.';
- if (ns.parent) |parent| {
- const decl_index = ns.getDeclIndex();
- const decl = mod.declPtr(decl_index);
- try parent.renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer);
- } else {
+ const separator_char: u8 = if (ns.parent.unwrap()) |parent| sep: {
+ const decl = mod.declPtr(ns.getDeclIndex(mod));
+ try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, decl.name, writer);
+ break :sep '.';
+ } else sep: {
try ns.file_scope.renderFullyQualifiedDebugName(writer);
- separator_char = ':';
- }
- if (name.len != 0) {
- try writer.writeByte(separator_char);
- try writer.writeAll(name);
- }
+ break :sep ':';
+ };
+ if (name != .empty) try writer.print("{c}{}", .{ separator_char, name.fmt(&mod.intern_pool) });
}
- pub fn getDeclIndex(ns: Namespace) Decl.Index {
- return ns.ty.getOwnerDecl();
+ pub fn getDeclIndex(ns: Namespace, mod: *Module) Decl.Index {
+ return ns.ty.getOwnerDecl(mod);
}
};
@@ -2140,11 +1936,11 @@ pub const File = struct {
};
}
- pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 {
- var buf = std.ArrayList(u8).init(gpa);
- defer buf.deinit();
- try file.renderFullyQualifiedName(buf.writer());
- return buf.toOwnedSliceSentinel(0);
+ pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString {
+ const ip = &mod.intern_pool;
+ const start = ip.string_bytes.items.len;
+ try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa));
+ return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start);
}
/// Returns the full path to this file relative to its package.
@@ -2268,7 +2064,7 @@ pub const ErrorMsg = struct {
reference_trace: []Trace = &.{},
pub const Trace = struct {
- decl: ?[*:0]const u8,
+ decl: InternPool.OptionalNullTerminatedString,
src_loc: SrcLoc,
hidden: u32 = 0,
};
@@ -2281,7 +2077,7 @@ pub const ErrorMsg = struct {
) !*ErrorMsg {
const err_msg = try gpa.create(ErrorMsg);
errdefer gpa.destroy(err_msg);
- err_msg.* = try init(gpa, src_loc, format, args);
+ err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args);
return err_msg;
}
@@ -3287,7 +3083,7 @@ pub const LazySrcLoc = union(enum) {
}
/// Upgrade to a `SrcLoc` based on the `Decl` provided.
- pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl) SrcLoc {
+ pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl, mod: *Module) SrcLoc {
return switch (lazy) {
.unneeded,
.entire_file,
@@ -3295,7 +3091,7 @@ pub const LazySrcLoc = union(enum) {
.token_abs,
.node_abs,
=> .{
- .file_scope = decl.getFileScope(),
+ .file_scope = decl.getFileScope(mod),
.parent_decl_node = 0,
.lazy = lazy,
},
@@ -3361,7 +3157,7 @@ pub const LazySrcLoc = union(enum) {
.for_input,
.for_capture_from_input,
=> .{
- .file_scope = decl.getFileScope(),
+ .file_scope = decl.getFileScope(mod),
.parent_decl_node = decl.src_node,
.lazy = lazy,
},
@@ -3391,6 +3187,12 @@ pub const CompileError = error{
ComptimeBreak,
};
+pub fn init(mod: *Module) !void {
+ const gpa = mod.gpa;
+ try mod.intern_pool.init(gpa);
+ try mod.global_error_set.put(gpa, .empty, {});
+}
+
pub fn deinit(mod: *Module) void {
const gpa = mod.gpa;
@@ -3489,42 +3291,29 @@ pub fn deinit(mod: *Module) void {
}
mod.export_owners.deinit(gpa);
- {
- var it = mod.global_error_set.keyIterator();
- while (it.next()) |key| {
- gpa.free(key.*);
- }
- mod.global_error_set.deinit(gpa);
- }
+ mod.global_error_set.deinit(gpa);
- mod.error_name_list.deinit(gpa);
mod.test_functions.deinit(gpa);
mod.align_stack_fns.deinit(gpa);
mod.monomorphed_funcs.deinit(gpa);
- {
- var it = mod.memoized_calls.iterator();
- while (it.next()) |entry| {
- gpa.free(entry.key_ptr.args);
- entry.value_ptr.arena.promote(gpa).deinit();
- }
- mod.memoized_calls.deinit(gpa);
- }
-
mod.decls_free_list.deinit(gpa);
mod.allocated_decls.deinit(gpa);
mod.global_assembly.deinit(gpa);
mod.reference_table.deinit(gpa);
- mod.string_literal_table.deinit(gpa);
- mod.string_literal_bytes.deinit(gpa);
+ mod.namespaces_free_list.deinit(gpa);
+ mod.allocated_namespaces.deinit(gpa);
+
+ mod.memoized_decls.deinit(gpa);
+ mod.intern_pool.deinit(gpa);
+ mod.tmp_hack_arena.deinit();
}
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
const gpa = mod.gpa;
{
const decl = mod.declPtr(decl_index);
- log.debug("destroy {*} ({s})", .{ decl, decl.name });
_ = mod.test_functions.swapRemove(decl_index);
if (decl.deletion_flag) {
assert(mod.deletion_set.swapRemove(decl_index));
@@ -3533,14 +3322,15 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
gpa.free(kv.value);
}
if (decl.has_tv) {
- if (decl.getInnerNamespace()) |namespace| {
- namespace.destroyDecls(mod);
+ if (decl.getOwnedInnerNamespaceIndex(mod).unwrap()) |i| {
+ mod.namespacePtr(i).destroyDecls(mod);
+ mod.destroyNamespace(i);
}
}
+ if (decl.src_scope) |scope| scope.decRef(gpa);
decl.clearValues(mod);
decl.dependants.deinit(gpa);
decl.dependencies.deinit(gpa);
- decl.clearName(gpa);
decl.* = undefined;
}
mod.decls_free_list.append(gpa, decl_index) catch {
@@ -3554,24 +3344,55 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
}
}
-pub fn declPtr(mod: *Module, decl_index: Decl.Index) *Decl {
- return mod.allocated_decls.at(@enumToInt(decl_index));
+pub fn declPtr(mod: *Module, index: Decl.Index) *Decl {
+ return mod.allocated_decls.at(@enumToInt(index));
+}
+
+pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
+ return mod.allocated_namespaces.at(@enumToInt(index));
+}
+
+pub fn unionPtr(mod: *Module, index: Union.Index) *Union {
+ return mod.intern_pool.unionPtr(index);
+}
+
+pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
+ return mod.intern_pool.structPtr(index);
+}
+
+pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn {
+ return mod.intern_pool.funcPtr(index);
+}
+
+pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet {
+ return mod.intern_pool.inferredErrorSetPtr(index);
+}
+
+pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
+ return mod.namespacePtr(index.unwrap() orelse return null);
+}
+
+/// This one accepts an index from the InternPool and asserts that it is not
+/// the anonymous empty struct type.
+pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
+ return mod.structPtr(index.unwrap() orelse return null);
+}
+
+pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn {
+ return mod.funcPtr(index.unwrap() orelse return null);
}
/// Returns true if and only if the Decl is the top level struct associated with a File.
pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
const decl = mod.declPtr(decl_index);
- if (decl.src_namespace.parent != null)
+ const namespace = mod.namespacePtr(decl.src_namespace);
+ if (namespace.parent != .none)
return false;
- return decl_index == decl.src_namespace.getDeclIndex();
+ return decl_index == namespace.getDeclIndex(mod);
}
fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void {
- for (export_list.items) |exp| {
- gpa.free(exp.options.name);
- if (exp.options.section) |s| gpa.free(s);
- gpa.destroy(exp);
- }
+ for (export_list.items) |exp| gpa.destroy(exp);
export_list.deinit(gpa);
}
@@ -3990,9 +3811,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
if (decl.zir_decl_index != 0) {
const old_zir_decl_index = decl.zir_decl_index;
const new_zir_decl_index = extra_map.get(old_zir_decl_index) orelse {
- log.debug("updateZirRefs {s}: delete {*} ({s})", .{
- file.sub_file_path, decl, decl.name,
- });
try file.deleted_decls.append(gpa, decl_index);
continue;
};
@@ -4000,41 +3818,34 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
decl.zir_decl_index = new_zir_decl_index;
const new_hash = decl.contentsHashZir(new_zir);
if (!std.zig.srcHashEql(old_hash, new_hash)) {
- log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{
- file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index,
- });
try file.outdated_decls.append(gpa, decl_index);
- } else {
- log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{
- file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index,
- });
}
}
if (!decl.owns_tv) continue;
- if (decl.getStruct()) |struct_obj| {
+ if (decl.getOwnedStruct(mod)) |struct_obj| {
struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
try file.deleted_decls.append(gpa, decl_index);
continue;
};
}
- if (decl.getUnion()) |union_obj| {
+ if (decl.getOwnedUnion(mod)) |union_obj| {
union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse {
try file.deleted_decls.append(gpa, decl_index);
continue;
};
}
- if (decl.getFunction()) |func| {
+ if (decl.getOwnedFunction(mod)) |func| {
func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse {
try file.deleted_decls.append(gpa, decl_index);
continue;
};
}
- if (decl.getInnerNamespace()) |namespace| {
+ if (decl.getOwnedInnerNamespace(mod)) |namespace| {
for (namespace.decls.keys()) |sub_decl| {
try decl_stack.append(gpa, sub_decl);
}
@@ -4207,14 +4018,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
.complete => return,
.outdated => blk: {
- log.debug("re-analyzing {*} ({s})", .{ decl, decl.name });
-
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered.
- if (decl.getFunction()) |func| {
+ if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| {
_ = mod.align_stack_fns.remove(func);
}
@@ -4223,9 +4032,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
const dep = mod.declPtr(dep_index);
dep.removeDependant(decl_index);
if (dep.dependants.count() == 0 and !dep.deletion_flag) {
- log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{
- decl, decl.name, dep, dep.name,
- });
try mod.markDeclForDeletion(dep_index);
}
}
@@ -4237,7 +4043,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
.unreferenced => false,
};
- var decl_prog_node = mod.sema_prog_node.start(mem.sliceTo(decl.name, 0), 0);
+ var decl_prog_node = mod.sema_prog_node.start("", 0);
decl_prog_node.activate();
defer decl_prog_node.end();
@@ -4264,7 +4070,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1);
mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create(
mod.gpa,
- decl.srcLoc(),
+ decl.srcLoc(mod),
"unable to analyze: {s}",
.{@errorName(e)},
));
@@ -4277,7 +4083,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// Update all dependents which have at least this level of dependency.
// If our type remained the same and we're a function, only update
// decls which depend on our body; otherwise, update all dependents.
- const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag() == .Fn) .function_body else .normal;
+ const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag(mod) == .Fn) .function_body else .normal;
for (decl.dependants.keys(), decl.dependants.values()) |dep_index, dep_type| {
if (@enumToInt(dep_type) < @enumToInt(update_level)) continue;
@@ -4304,10 +4110,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
}
}
-pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
+pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void {
const tracy = trace(@src());
defer tracy.end();
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -4339,7 +4146,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
defer tmp_arena.deinit();
const sema_arena = tmp_arena.allocator();
- var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) {
+ var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) {
error.AnalysisFail => {
if (func.state == .in_progress) {
// If this decl caused the compile error, the analysis field would
@@ -4365,17 +4172,14 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
if (no_bin_file and !dump_air and !dump_llvm_ir) return;
- log.debug("analyze liveness of {s}", .{decl.name});
- var liveness = try Liveness.analyze(gpa, air);
+ var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool);
defer liveness.deinit(gpa);
if (dump_air) {
const fqn = try decl.getFullyQualifiedName(mod);
- defer mod.gpa.free(fqn);
-
- std.debug.print("# Begin Function AIR: {s}:\n", .{fqn});
+ std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(&mod.intern_pool)});
@import("print_air.zig").dump(mod, air, liveness);
- std.debug.print("# End Function AIR: {s}\n\n", .{fqn});
+ std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(&mod.intern_pool)});
}
if (std.debug.runtime_safety) {
@@ -4383,6 +4187,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
.gpa = gpa,
.air = air,
.liveness = liveness,
+ .intern_pool = &mod.intern_pool,
};
defer verify.deinit();
@@ -4394,7 +4199,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
decl_index,
try Module.ErrorMsg.create(
gpa,
- decl.srcLoc(),
+ decl.srcLoc(mod),
"invalid liveness: {s}",
.{@errorName(err)},
),
@@ -4407,7 +4212,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
if (no_bin_file and !dump_llvm_ir) return;
- comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) {
+ comp.bin_file.updateFunc(mod, func_index, air, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
decl.analysis = .codegen_failure;
@@ -4417,7 +4222,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
- decl.srcLoc(),
+ decl.srcLoc(mod),
"unable to codegen: {s}",
.{@errorName(err)},
));
@@ -4437,7 +4242,8 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
/// analyzed, and for ensuring it can exist at runtime (see
/// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body
/// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
-pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void {
+pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void {
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -4475,7 +4281,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void {
// Decl itself is safely analyzed, and body analysis is not yet queued
- try mod.comp.work_queue.writeItem(.{ .codegen_func = func });
+ try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index });
if (mod.emit_h != null) {
// TODO: we ideally only want to do this if the function's type changed
// since the last update
@@ -4527,42 +4333,54 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
if (file.root_decl != .none) return;
const gpa = mod.gpa;
- var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
- errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.allocator();
-
- const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
- const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
- const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
- const ty_ty = comptime Type.initTag(.type);
- struct_obj.* = .{
- .owner_decl = undefined, // set below
+
+ // Because these three things each reference each other, `undefined`
+ // placeholders are used before being set after the struct type gains an
+ // InternPool index.
+ const new_namespace_index = try mod.createNamespace(.{
+ .parent = .none,
+ .ty = undefined,
+ .file_scope = file,
+ });
+ const new_namespace = mod.namespacePtr(new_namespace_index);
+ errdefer mod.destroyNamespace(new_namespace_index);
+
+ const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, null);
+ const new_decl = mod.declPtr(new_decl_index);
+ errdefer @panic("TODO error handling");
+
+ const struct_index = try mod.createStruct(.{
+ .owner_decl = new_decl_index,
.fields = .{},
.zir_index = undefined, // set below
.layout = .Auto,
.status = .none,
.known_non_opv = undefined,
.is_tuple = undefined, // set below
- .namespace = .{
- .parent = null,
- .ty = struct_ty,
- .file_scope = file,
- },
- };
- const new_decl_index = try mod.allocateNewDecl(&struct_obj.namespace, 0, null);
- const new_decl = mod.declPtr(new_decl_index);
+ .namespace = new_namespace_index,
+ });
+ errdefer mod.destroyStruct(struct_index);
+
+ const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
+ .index = struct_index.toOptional(),
+ .namespace = new_namespace_index.toOptional(),
+ } });
+ // TODO: figure out InternPool removals for incremental compilation
+ //errdefer mod.intern_pool.remove(struct_ty);
+
+ new_namespace.ty = struct_ty.toType();
file.root_decl = new_decl_index.toOptional();
- struct_obj.owner_decl = new_decl_index;
- new_decl.name = try file.fullyQualifiedNameZ(gpa);
+
+ new_decl.name = try file.fullyQualifiedName(mod);
new_decl.src_line = 0;
new_decl.is_pub = true;
new_decl.is_exported = false;
new_decl.has_align = false;
new_decl.has_linksection_or_addrspace = false;
- new_decl.ty = ty_ty;
- new_decl.val = struct_val;
+ new_decl.ty = Type.type;
+ new_decl.val = struct_ty.toValue();
new_decl.@"align" = 0;
- new_decl.@"linksection" = null;
+ new_decl.@"linksection" = .none;
new_decl.has_tv = true;
new_decl.owns_tv = true;
new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive.
@@ -4573,6 +4391,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
if (file.status == .success_zir) {
assert(file.zir_loaded);
const main_struct_inst = Zir.main_struct_inst;
+ const struct_obj = mod.structPtr(struct_index);
struct_obj.zir_index = main_struct_inst;
const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
@@ -4582,25 +4401,34 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
defer sema_arena.deinit();
const sema_arena_allocator = sema_arena.allocator();
+ var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
+ defer comptime_mutable_decls.deinit();
+
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = sema_arena_allocator,
- .perm_arena = new_decl_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
+ .comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
- var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null);
+ var wip_captures = try WipCaptureScope.init(gpa, null);
defer wip_captures.deinit();
- if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| {
+ if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| {
try wip_captures.finalize();
+ for (comptime_mutable_decls.items) |decl_index| {
+ const decl = mod.declPtr(decl_index);
+ try decl.intern(mod);
+ }
new_decl.analysis = .complete;
} else |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -4632,8 +4460,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
} else {
new_decl.analysis = .file_failure;
}
-
- try new_decl.finalizeNewArena(&new_decl_arena);
}
/// Returns `true` if the Decl type changed.
@@ -4645,68 +4471,52 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
const decl = mod.declPtr(decl_index);
- if (decl.getFileScope().status != .success_zir) {
+ if (decl.getFileScope(mod).status != .success_zir) {
return error.AnalysisFail;
}
const gpa = mod.gpa;
- const zir = decl.getFileScope().zir;
+ const zir = decl.getFileScope(mod).zir;
const zir_datas = zir.instructions.items(.data);
decl.analysis = .in_progress;
- // We need the memory for the Type to go into the arena for the Decl
- var decl_arena = std.heap.ArenaAllocator.init(gpa);
- const decl_arena_allocator = decl_arena.allocator();
- const decl_value_arena = blk: {
- errdefer decl_arena.deinit();
- const s = try decl_arena_allocator.create(ValueArena);
- s.* = .{ .state = undefined };
- break :blk s;
- };
- defer {
- if (decl.value_arena) |value_arena| {
- assert(value_arena.state_acquired == null);
- decl_value_arena.prev = value_arena;
- }
-
- decl_value_arena.state = decl_arena.state;
- decl.value_arena = decl_value_arena;
- }
-
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
- const analysis_arena_allocator = analysis_arena.allocator();
+
+ var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
+ defer comptime_mutable_decls.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = analysis_arena_allocator,
- .perm_arena = decl_arena_allocator,
+ .arena = analysis_arena.allocator(),
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
+ .comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
if (mod.declIsRoot(decl_index)) {
- log.debug("semaDecl root {*} ({s})", .{ decl, decl.name });
const main_struct_inst = Zir.main_struct_inst;
- const struct_obj = decl.getStruct().?;
+ const struct_index = decl.getOwnedStructIndex(mod).unwrap().?;
+ const struct_obj = mod.structPtr(struct_index);
// This might not have gotten set in `semaFile` if the first time had
// a ZIR failure, so we set it here in case.
struct_obj.zir_index = main_struct_inst;
- try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj);
+ try sema.analyzeStructDecl(decl, main_struct_inst, struct_index);
decl.analysis = .complete;
decl.generation = mod.generation;
return false;
}
- log.debug("semaDecl {*} ({s})", .{ decl, decl.name });
- var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Sema.Block = .{
@@ -4724,12 +4534,16 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
block_scope.params.deinit(gpa);
}
- const zir_block_index = decl.zirBlockIndex();
+ const zir_block_index = decl.zirBlockIndex(mod);
const inst_data = zir_datas[zir_block_index].pl_node;
const extra = zir.extraData(Zir.Inst.Block, inst_data.payload_index);
const body = zir.extra[extra.end..][0..extra.data.body_len];
const result_ref = (try sema.analyzeBodyBreak(&block_scope, body)).?.operand;
try wip_captures.finalize();
+ for (comptime_mutable_decls.items) |ct_decl_index| {
+ const ct_decl = mod.declPtr(ct_decl_index);
+ try ct_decl.intern(mod);
+ }
const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 };
const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 };
const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 };
@@ -4748,16 +4562,15 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl_tv.ty.fmt(mod),
});
}
- var buffer: Value.ToTypeBuffer = undefined;
- const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator);
- if (ty.getNamespace() == null) {
+ const ty = decl_tv.val.toType();
+ if (ty.getNamespace(mod) == null) {
return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)});
}
- decl.ty = Type.type;
- decl.val = try Value.Tag.ty.create(decl_arena_allocator, ty);
+ decl.ty = InternPool.Index.type_type.toType();
+ decl.val = ty.toValue();
decl.@"align" = 0;
- decl.@"linksection" = null;
+ decl.@"linksection" = .none;
decl.has_tv = true;
decl.owns_tv = false;
decl.analysis = .complete;
@@ -4766,8 +4579,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
return true;
}
- if (decl_tv.val.castTag(.function)) |fn_payload| {
- const func = fn_payload.data;
+ if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| {
+ const func = mod.funcPtr(func_index);
const owns_tv = func.owner_decl == decl_index;
if (owns_tv) {
var prev_type_has_bits = false;
@@ -4775,31 +4588,30 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
var type_changed = true;
if (decl.has_tv) {
- prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits();
+ prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod);
type_changed = !decl.ty.eql(decl_tv.ty, mod);
- if (decl.getFunction()) |prev_func| {
+ if (decl.getOwnedFunction(mod)) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
}
}
decl.clearValues(mod);
- decl.ty = try decl_tv.ty.copy(decl_arena_allocator);
- decl.val = try decl_tv.val.copy(decl_arena_allocator);
+ decl.ty = decl_tv.ty;
+ decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue();
// linksection, align, and addrspace were already set by Sema
decl.has_tv = true;
decl.owns_tv = owns_tv;
decl.analysis = .complete;
decl.generation = mod.generation;
- const is_inline = decl.ty.fnCallingConvention() == .Inline;
+ const is_inline = decl.ty.fnCallingConvention(mod) == .Inline;
if (decl.is_exported) {
const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) };
if (is_inline) {
return sema.fail(&block_scope, export_src, "export of inline function", .{});
}
// The scope needs to have the decl in it.
- const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) };
- try sema.analyzeExport(&block_scope, export_src, options, decl_index);
+ try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index);
}
return type_changed or is_inline != prev_is_inline;
}
@@ -4813,64 +4625,57 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl.owns_tv = false;
var queue_linker_work = false;
var is_extern = false;
- switch (decl_tv.val.tag()) {
- .variable => {
- const variable = decl_tv.val.castTag(.variable).?.data;
- if (variable.owner_decl == decl_index) {
+ switch (decl_tv.val.toIntern()) {
+ .generic_poison => unreachable,
+ .unreachable_value => unreachable,
+ else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) {
+ .variable => |variable| if (variable.decl == decl_index) {
decl.owns_tv = true;
queue_linker_work = true;
+ },
- const copied_init = try variable.init.copy(decl_arena_allocator);
- variable.init = copied_init;
- }
- },
- .extern_fn => {
- const extern_fn = decl_tv.val.castTag(.extern_fn).?.data;
- if (extern_fn.owner_decl == decl_index) {
+ .extern_func => |extern_fn| if (extern_fn.decl == decl_index) {
decl.owns_tv = true;
queue_linker_work = true;
is_extern = true;
- }
- },
-
- .generic_poison => unreachable,
- .unreachable_value => unreachable,
+ },
- .function => {},
+ .func => {},
- else => {
- log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name });
- queue_linker_work = true;
+ else => {
+ queue_linker_work = true;
+ },
},
}
- decl.ty = try decl_tv.ty.copy(decl_arena_allocator);
- decl.val = try decl_tv.val.copy(decl_arena_allocator);
+ decl.ty = decl_tv.ty;
+ decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue();
decl.@"align" = blk: {
- const align_ref = decl.zirAlignRef();
+ const align_ref = decl.zirAlignRef(mod);
if (align_ref == .none) break :blk 0;
break :blk try sema.resolveAlign(&block_scope, align_src, align_ref);
};
decl.@"linksection" = blk: {
- const linksection_ref = decl.zirLinksectionRef();
- if (linksection_ref == .none) break :blk null;
+ const linksection_ref = decl.zirLinksectionRef(mod);
+ if (linksection_ref == .none) break :blk .none;
const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime-known");
if (mem.indexOfScalar(u8, bytes, 0) != null) {
return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{});
} else if (bytes.len == 0) {
return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{});
}
- break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr;
+ const section = try mod.intern_pool.getOrPutString(gpa, bytes);
+ break :blk section.toOptional();
};
decl.@"addrspace" = blk: {
- const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.tag()) {
- .function, .extern_fn => .function,
+ const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) {
.variable => .variable,
+ .extern_func, .func => .function,
else => .constant,
};
const target = sema.mod.getTarget();
- break :blk switch (decl.zirAddrspaceRef()) {
+ break :blk switch (decl.zirAddrspaceRef(mod)) {
.none => switch (addrspace_ctx) {
.function => target_util.defaultAddressSpace(target, .function),
.variable => target_util.defaultAddressSpace(target, .global_mutable),
@@ -4888,7 +4693,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
(queue_linker_work and try sema.typeHasRuntimeBits(decl.ty));
if (has_runtime_bits) {
- log.debug("queue linker work for {*} ({s})", .{ decl, decl.name });
// Needed for codegen_decl which will call updateDecl and then the
// codegen backend wants full access to the Decl Type.
@@ -4904,8 +4708,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
if (decl.is_exported) {
const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) };
// The scope needs to have the decl in it.
- const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) };
- try sema.analyzeExport(&block_scope, export_src, options, decl_index);
+ try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index);
}
return type_changed;
@@ -4930,10 +4733,6 @@ pub fn declareDeclDependencyType(mod: *Module, depender_index: Decl.Index, depen
}
}
- log.debug("{*} ({s}) depends on {*} ({s})", .{
- depender, depender.name, dependee, dependee.name,
- });
-
if (dependee.deletion_flag) {
dependee.deletion_flag = false;
assert(mod.deletion_set.swapRemove(dependee_index));
@@ -5222,7 +5021,7 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
pub fn scanNamespace(
mod: *Module,
- namespace: *Namespace,
+ namespace_index: Namespace.Index,
extra_start: usize,
decls_len: u32,
parent_decl: *Decl,
@@ -5231,6 +5030,7 @@ pub fn scanNamespace(
defer tracy.end();
const gpa = mod.gpa;
+ const namespace = mod.namespacePtr(namespace_index);
const zir = namespace.file_scope.zir;
try mod.comp.work_queue.ensureUnusedCapacity(decls_len);
@@ -5243,7 +5043,7 @@ pub fn scanNamespace(
var decl_i: u32 = 0;
var scan_decl_iter: ScanDeclIter = .{
.module = mod,
- .namespace = namespace,
+ .namespace_index = namespace_index,
.parent_decl = parent_decl,
};
while (decl_i < decls_len) : (decl_i += 1) {
@@ -5266,7 +5066,7 @@ pub fn scanNamespace(
const ScanDeclIter = struct {
module: *Module,
- namespace: *Namespace,
+ namespace_index: Namespace.Index,
parent_decl: *Decl,
usingnamespace_index: usize = 0,
comptime_index: usize = 0,
@@ -5278,9 +5078,11 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
defer tracy.end();
const mod = iter.module;
- const namespace = iter.namespace;
+ const namespace_index = iter.namespace_index;
+ const namespace = mod.namespacePtr(namespace_index);
const gpa = mod.gpa;
const zir = namespace.file_scope.zir;
+ const ip = &mod.intern_pool;
// zig fmt: off
const is_pub = (flags & 0b0001) != 0;
@@ -5300,31 +5102,31 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
// Every Decl needs a name.
var is_named_test = false;
var kind: Decl.Kind = .named;
- const decl_name: [:0]const u8 = switch (decl_name_index) {
+ const decl_name: InternPool.NullTerminatedString = switch (decl_name_index) {
0 => name: {
if (export_bit) {
const i = iter.usingnamespace_index;
iter.usingnamespace_index += 1;
kind = .@"usingnamespace";
- break :name try std.fmt.allocPrintZ(gpa, "usingnamespace_{d}", .{i});
+ break :name try ip.getOrPutStringFmt(gpa, "usingnamespace_{d}", .{i});
} else {
const i = iter.comptime_index;
iter.comptime_index += 1;
kind = .@"comptime";
- break :name try std.fmt.allocPrintZ(gpa, "comptime_{d}", .{i});
+ break :name try ip.getOrPutStringFmt(gpa, "comptime_{d}", .{i});
}
},
1 => name: {
const i = iter.unnamed_test_index;
iter.unnamed_test_index += 1;
kind = .@"test";
- break :name try std.fmt.allocPrintZ(gpa, "test_{d}", .{i});
+ break :name try ip.getOrPutStringFmt(gpa, "test_{d}", .{i});
},
2 => name: {
is_named_test = true;
const test_name = zir.nullTerminatedString(decl_doccomment_index);
kind = .@"test";
- break :name try std.fmt.allocPrintZ(gpa, "decltest.{s}", .{test_name});
+ break :name try ip.getOrPutStringFmt(gpa, "decltest.{s}", .{test_name});
},
else => name: {
const raw_name = zir.nullTerminatedString(decl_name_index);
@@ -5332,14 +5134,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
is_named_test = true;
const test_name = zir.nullTerminatedString(decl_name_index + 1);
kind = .@"test";
- break :name try std.fmt.allocPrintZ(gpa, "test.{s}", .{test_name});
+ break :name try ip.getOrPutStringFmt(gpa, "test.{s}", .{test_name});
} else {
- break :name try gpa.dupeZ(u8, raw_name);
+ break :name try ip.getOrPutString(gpa, raw_name);
}
},
};
- var must_free_decl_name = true;
- defer if (must_free_decl_name) gpa.free(decl_name);
const is_exported = export_bit and decl_name_index != 0;
if (kind == .@"usingnamespace") try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1);
@@ -5347,21 +5147,19 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
// We create a Decl for it regardless of analysis status.
const gop = try namespace.decls.getOrPutContextAdapted(
gpa,
- @as([]const u8, mem.sliceTo(decl_name, 0)),
+ decl_name,
DeclAdapter{ .mod = mod },
Namespace.DeclContext{ .module = mod },
);
const comp = mod.comp;
if (!gop.found_existing) {
- const new_decl_index = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope);
+ const new_decl_index = try mod.allocateNewDecl(namespace_index, decl_node, iter.parent_decl.src_scope);
const new_decl = mod.declPtr(new_decl_index);
new_decl.kind = kind;
new_decl.name = decl_name;
- must_free_decl_name = false;
if (kind == .@"usingnamespace") {
namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, is_pub);
}
- log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace });
new_decl.src_line = line;
gop.key_ptr.* = new_decl_index;
// Exported decls, comptime decls, usingnamespace decls, and
@@ -5382,7 +5180,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
if (!comp.bin_file.options.is_test) break :blk false;
if (decl_pkg != mod.main_pkg) break :blk false;
if (comp.test_filter) |test_filter| {
- if (mem.indexOf(u8, decl_name, test_filter) == null) {
+ if (mem.indexOf(u8, ip.stringToSlice(decl_name), test_filter) == null) {
break :blk false;
}
}
@@ -5405,16 +5203,13 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
const decl = mod.declPtr(decl_index);
if (kind == .@"test") {
const src_loc = SrcLoc{
- .file_scope = decl.getFileScope(),
+ .file_scope = decl.getFileScope(mod),
.parent_decl_node = decl.src_node,
.lazy = .{ .token_offset = 1 },
};
- const msg = try ErrorMsg.create(
- gpa,
- src_loc,
- "duplicate test name: {s}",
- .{decl_name},
- );
+ const msg = try ErrorMsg.create(gpa, src_loc, "duplicate test name: {}", .{
+ decl_name.fmt(&mod.intern_pool),
+ });
errdefer msg.destroy(gpa);
try mod.failed_decls.putNoClobber(gpa, decl_index, msg);
const other_src_loc = SrcLoc{
@@ -5424,7 +5219,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
};
try mod.errNoteNonLazy(other_src_loc, msg, "other test here", .{});
}
- log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace });
// Update the AST node of the decl; even if its contents are unchanged, it may
// have been re-ordered.
decl.src_node = decl_node;
@@ -5436,7 +5230,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.has_align = has_align;
decl.has_linksection_or_addrspace = has_linksection_or_addrspace;
decl.zir_decl_index = @intCast(u32, decl_sub_index);
- if (decl.getFunction()) |_| {
+ if (decl.getOwnedFunctionIndex(mod) != .none) {
switch (comp.bin_file.tag) {
.coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
@@ -5458,7 +5252,6 @@ pub fn clearDecl(
defer tracy.end();
const decl = mod.declPtr(decl_index);
- log.debug("clearing {*} ({s})", .{ decl, decl.name });
const gpa = mod.gpa;
try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count());
@@ -5473,9 +5266,6 @@ pub fn clearDecl(
const dep = mod.declPtr(dep_index);
dep.removeDependant(decl_index);
if (dep.dependants.count() == 0 and !dep.deletion_flag) {
- log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{
- decl, decl.name, dep, dep.name,
- });
// We don't recursively perform a deletion here, because during the update,
// another reference to it may turn up.
dep.deletion_flag = true;
@@ -5510,10 +5300,10 @@ pub fn clearDecl(
try mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
- if (decl.ty.isFnOrHasRuntimeBits()) {
+ if (decl.ty.isFnOrHasRuntimeBits(mod)) {
mod.comp.bin_file.freeDecl(decl_index);
}
- if (decl.getInnerNamespace()) |namespace| {
+ if (decl.getOwnedInnerNamespace(mod)) |namespace| {
try namespace.deleteAllDecls(mod, outdated_decls);
}
}
@@ -5530,10 +5320,9 @@ pub fn clearDecl(
/// This function is exclusively called for anonymous decls.
pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
const decl = mod.declPtr(decl_index);
- log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name });
assert(!mod.declIsRoot(decl_index));
- assert(decl.src_namespace.anon_decls.swapRemove(decl_index));
+ assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index));
const dependants = decl.dependants.keys();
for (dependants) |dep| {
@@ -5558,10 +5347,9 @@ fn markDeclForDeletion(mod: *Module, decl_index: Decl.Index) !void {
/// If other decls depend on this decl, they must be aborted first.
pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
const decl = mod.declPtr(decl_index);
- log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name });
assert(!mod.declIsRoot(decl_index));
- assert(decl.src_namespace.anon_decls.swapRemove(decl_index));
+ assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index));
// An aborted decl must not have dependants -- they must have
// been aborted first and removed from this list.
@@ -5575,6 +5363,17 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
mod.destroyDecl(decl_index);
}
+/// Finalize the creation of an anon decl.
+pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
+ // The Decl starts off with alive=false and the codegen backend will set alive=true
+ // if the Decl is referenced by an instruction or another constant. Otherwise,
+ // the Decl will be garbage collected by the `codegen_decl` task instead of sent
+ // to the linker.
+ if (mod.declPtr(decl_index).ty.isFnOrHasRuntimeBits(mod)) {
+ try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = decl_index });
+ }
+}
+
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
@@ -5600,51 +5399,53 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void
}
}
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
- elf.deleteDeclExport(decl_index, exp.options.name);
+ elf.deleteDeclExport(decl_index, exp.opts.name);
}
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
- try macho.deleteDeclExport(decl_index, exp.options.name);
+ try macho.deleteDeclExport(decl_index, exp.opts.name);
}
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
wasm.deleteDeclExport(decl_index);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
- coff.deleteDeclExport(decl_index, exp.options.name);
+ coff.deleteDeclExport(decl_index, exp.opts.name);
}
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
}
- mod.gpa.free(exp.options.name);
mod.gpa.destroy(exp);
}
export_owners.deinit(mod.gpa);
}
-pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
+pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
const gpa = mod.gpa;
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
- // Use the Decl's arena for captured values.
- var decl_arena: std.heap.ArenaAllocator = undefined;
- const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena);
- defer decl.value_arena.?.release(&decl_arena);
+ var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
+ defer comptime_mutable_decls.deinit();
+
+ const fn_ty = decl.ty;
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = arena,
- .perm_arena = decl_arena_allocator,
- .code = decl.getFileScope().zir,
+ .code = decl.getFileScope(mod).zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = func,
- .fn_ret_ty = decl.ty.fnReturnType(),
+ .func_index = func_index.toOptional(),
+ .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(),
.owner_func = func,
+ .owner_func_index = func_index.toOptional(),
.branch_quota = @max(func.branch_quota, Sema.default_branch_quota),
+ .comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
@@ -5656,7 +5457,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
sema.air_extra.items.len += reserved_count;
- var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope);
defer wip_captures.deinit();
var inner_block: Sema.Block = .{
@@ -5680,9 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// This could be a generic function instantiation, however, in which case we need to
// map the comptime parameters to constant values and only emit arg AIR instructions
// for the runtime ones.
- const fn_ty = decl.ty;
- const fn_ty_info = fn_ty.fnInfo();
- const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len);
+ const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len);
try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType`
try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
@@ -5697,9 +5496,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
const param_ty = if (func.comptime_args) |comptime_args| t: {
const arg_tv = comptime_args[total_param_index];
- const arg_val = if (arg_tv.val.tag() != .generic_poison)
+ const arg_val = if (!arg_tv.val.isGenericPoison())
arg_tv.val
- else if (arg_tv.ty.onePossibleValue()) |opv|
+ else if (try arg_tv.ty.onePossibleValue(mod)) |opv|
opv
else
break :t arg_tv.ty;
@@ -5708,7 +5507,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
sema.inst_map.putAssumeCapacityNoClobber(inst, arg);
total_param_index += 1;
continue;
- } else fn_ty_info.param_types[runtime_param_index];
+ } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType();
const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
@@ -5740,7 +5539,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
}
func.state = .in_progress;
- log.debug("set {s} to in_progress", .{decl.name});
const last_arg_index = inner_block.instructions.items.len;
@@ -5765,7 +5563,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// is unused so it just has to be a no-op.
sema.air_instructions.set(ptr_inst.*, .{
.tag = .alloc,
- .data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) },
+ .data = .{ .ty = Type.single_const_pointer_to_comptime_int },
});
}
}
@@ -5773,7 +5571,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// If we don't get an error return trace from a caller, create our own.
if (func.calls_or_awaits_errorable_fn and
mod.comp.bin_file.options.error_return_tracing and
- !sema.fn_ret_ty.isError())
+ !sema.fn_ret_ty.isError(mod))
{
sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) {
// TODO make these unreachable instead of @panic
@@ -5786,6 +5584,10 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
}
try wip_captures.finalize();
+ for (comptime_mutable_decls.items) |ct_decl_index| {
+ const ct_decl = mod.declPtr(ct_decl_index);
+ try ct_decl.intern(mod);
+ }
// Copy the block into place and mark that as the main block.
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
@@ -5797,14 +5599,13 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index;
func.state = .success;
- log.debug("set {s} to success", .{decl.name});
// Finally we must resolve the return type and parameter types so that backends
// have full access to type information.
// Crucially, this happens *after* we set the function state to success above,
// so that dependencies on the function body will now be satisfied rather than
// result in circular dependency errors.
- sema.resolveFnTypes(fn_ty_info) catch |err| switch (err) {
+ sema.resolveFnTypes(fn_ty) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
@@ -5820,9 +5621,8 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// Similarly, resolve any queued up types that were requested to be resolved for
// the backends.
- for (sema.types_to_resolve.items) |inst_ref| {
- const ty = sema.getTmpAir().getRefType(inst_ref);
- sema.resolveTypeFully(ty) catch |err| switch (err) {
+ for (sema.types_to_resolve.keys()) |ty| {
+ sema.resolveTypeFully(ty.toType()) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
@@ -5840,13 +5640,11 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
return Air{
.instructions = sema.air_instructions.toOwnedSlice(),
.extra = try sema.air_extra.toOwnedSlice(gpa),
- .values = try sema.air_values.toOwnedSlice(gpa),
};
}
fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void {
const decl = mod.declPtr(decl_index);
- log.debug("mark outdated {*} ({s})", .{ decl, decl.name });
try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl_index });
if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| {
kv.value.destroy(mod.gpa);
@@ -5854,11 +5652,8 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void {
if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| {
for (kv.value) |err| err.deinit(mod.gpa);
}
- if (decl.has_tv and decl.owns_tv) {
- if (decl.val.castTag(.function)) |payload| {
- const func = payload.data;
- _ = mod.align_stack_fns.remove(func);
- }
+ if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| {
+ _ = mod.align_stack_fns.remove(func);
}
if (mod.emit_h) |emit_h| {
if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| {
@@ -5869,9 +5664,51 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void {
decl.analysis = .outdated;
}
+pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index {
+ if (mod.namespaces_free_list.popOrNull()) |index| {
+ mod.allocated_namespaces.at(@enumToInt(index)).* = initialization;
+ return index;
+ }
+ const ptr = try mod.allocated_namespaces.addOne(mod.gpa);
+ ptr.* = initialization;
+ return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1);
+}
+
+pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
+ mod.namespacePtr(index).* = undefined;
+ mod.namespaces_free_list.append(mod.gpa, index) catch {
+ // In order to keep `destroyNamespace` a non-fallible function, we ignore memory
+ // allocation failures here, instead leaking the Namespace until garbage collection.
+ };
+}
+
+pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
+ return mod.intern_pool.createStruct(mod.gpa, initialization);
+}
+
+pub fn destroyStruct(mod: *Module, index: Struct.Index) void {
+ return mod.intern_pool.destroyStruct(mod.gpa, index);
+}
+
+pub fn createUnion(mod: *Module, initialization: Union) Allocator.Error!Union.Index {
+ return mod.intern_pool.createUnion(mod.gpa, initialization);
+}
+
+pub fn destroyUnion(mod: *Module, index: Union.Index) void {
+ return mod.intern_pool.destroyUnion(mod.gpa, index);
+}
+
+pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index {
+ return mod.intern_pool.createFunc(mod.gpa, initialization);
+}
+
+pub fn destroyFunc(mod: *Module, index: Fn.Index) void {
+ return mod.intern_pool.destroyFunc(mod.gpa, index);
+}
+
pub fn allocateNewDecl(
mod: *Module,
- namespace: *Namespace,
+ namespace: Namespace.Index,
src_node: Ast.Node.Index,
src_scope: ?*CaptureScope,
) !Decl.Index {
@@ -5896,6 +5733,7 @@ pub fn allocateNewDecl(
};
};
+ if (src_scope) |scope| scope.incRef();
decl_and_index.new_decl.* = .{
.name = undefined,
.src_namespace = namespace,
@@ -5906,7 +5744,7 @@ pub fn allocateNewDecl(
.ty = undefined,
.val = undefined,
.@"align" = undefined,
- .@"linksection" = undefined,
+ .@"linksection" = .none,
.@"addrspace" = .generic,
.analysis = .unreferenced,
.deletion_flag = false,
@@ -5924,25 +5762,20 @@ pub fn allocateNewDecl(
return decl_and_index.decl_index;
}
-/// Get error value for error tag `name`.
-pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).KV {
+pub fn getErrorValue(
+ mod: *Module,
+ name: InternPool.NullTerminatedString,
+) Allocator.Error!ErrorInt {
const gop = try mod.global_error_set.getOrPut(mod.gpa, name);
- if (gop.found_existing) {
- return std.StringHashMapUnmanaged(ErrorInt).KV{
- .key = gop.key_ptr.*,
- .value = gop.value_ptr.*,
- };
- }
+ return @intCast(ErrorInt, gop.index);
+}
- errdefer assert(mod.global_error_set.remove(name));
- try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1);
- gop.key_ptr.* = try mod.gpa.dupe(u8, name);
- gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len);
- mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*);
- return std.StringHashMapUnmanaged(ErrorInt).KV{
- .key = gop.key_ptr.*,
- .value = gop.value_ptr.*,
- };
+pub fn getErrorValueFromSlice(
+ mod: *Module,
+ name: []const u8,
+) Allocator.Error!ErrorInt {
+ const interned_name = try mod.intern_pool.getOrPutString(mod.gpa, name);
+ return getErrorValue(mod, interned_name);
}
pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index {
@@ -5953,29 +5786,28 @@ pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedV
pub fn createAnonymousDeclFromDecl(
mod: *Module,
src_decl: *Decl,
- namespace: *Namespace,
+ namespace: Namespace.Index,
src_scope: ?*CaptureScope,
tv: TypedValue,
) !Decl.Index {
const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope);
errdefer mod.destroyDecl(new_decl_index);
- const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{
- src_decl.name, @enumToInt(new_decl_index),
+ const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{
+ src_decl.name.fmt(&mod.intern_pool), @enumToInt(new_decl_index),
});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name);
return new_decl_index;
}
-/// Takes ownership of `name` even if it returns an error.
pub fn initNewAnonDecl(
mod: *Module,
new_decl_index: Decl.Index,
src_line: u32,
- namespace: *Namespace,
+ namespace: Namespace.Index,
typed_value: TypedValue,
- name: [:0]u8,
-) !void {
- errdefer mod.gpa.free(name);
+ name: InternPool.NullTerminatedString,
+) Allocator.Error!void {
+ assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern()));
const new_decl = mod.declPtr(new_decl_index);
@@ -5984,34 +5816,12 @@ pub fn initNewAnonDecl(
new_decl.ty = typed_value.ty;
new_decl.val = typed_value.val;
new_decl.@"align" = 0;
- new_decl.@"linksection" = null;
+ new_decl.@"linksection" = .none;
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
- try namespace.anon_decls.putNoClobber(mod.gpa, new_decl_index, {});
-
- // The Decl starts off with alive=false and the codegen backend will set alive=true
- // if the Decl is referenced by an instruction or another constant. Otherwise,
- // the Decl will be garbage collected by the `codegen_decl` task instead of sent
- // to the linker.
- if (typed_value.ty.isFnOrHasRuntimeBits()) {
- try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index });
- }
-}
-
-pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
- const int_payload = try arena.create(Type.Payload.Bits);
- int_payload.* = .{
- .base = .{
- .tag = switch (signedness) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- },
- },
- .data = bits,
- };
- return Type.initPayload(&int_payload.base);
+ try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {});
}
pub fn errNoteNonLazy(
@@ -6073,16 +5883,17 @@ pub const SwitchProngSrc = union(enum) {
/// the LazySrcLoc in order to emit a compile error.
pub fn resolve(
prong_src: SwitchProngSrc,
- gpa: Allocator,
+ mod: *Module,
decl: *Decl,
switch_node_offset: i32,
range_expand: RangeExpand,
) LazySrcLoc {
@setCold(true);
- const tree = decl.getFileScope().getTree(gpa) catch |err| {
+ const gpa = mod.gpa;
+ const tree = decl.getFileScope(mod).getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- decl.getFileScope().sub_file_path, @errorName(err),
+ decl.getFileScope(mod).sub_file_path, @errorName(err),
});
return LazySrcLoc.nodeOffset(0);
};
@@ -6166,11 +5977,12 @@ pub const PeerTypeCandidateSrc = union(enum) {
pub fn resolve(
self: PeerTypeCandidateSrc,
- gpa: Allocator,
+ mod: *Module,
decl: *Decl,
candidate_i: usize,
) ?LazySrcLoc {
@setCold(true);
+ const gpa = mod.gpa;
switch (self) {
.none => {
@@ -6192,10 +6004,10 @@ pub const PeerTypeCandidateSrc = union(enum) {
else => {},
}
- const tree = decl.getFileScope().getTree(gpa) catch |err| {
+ const tree = decl.getFileScope(mod).getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- decl.getFileScope().sub_file_path, @errorName(err),
+ decl.getFileScope(mod).sub_file_path, @errorName(err),
});
return LazySrcLoc.nodeOffset(0);
};
@@ -6254,15 +6066,16 @@ fn queryFieldSrc(
pub fn paramSrc(
func_node_offset: i32,
- gpa: Allocator,
+ mod: *Module,
decl: *Decl,
param_i: usize,
) LazySrcLoc {
@setCold(true);
- const tree = decl.getFileScope().getTree(gpa) catch |err| {
+ const gpa = mod.gpa;
+ const tree = decl.getFileScope(mod).getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- decl.getFileScope().sub_file_path, @errorName(err),
+ decl.getFileScope(mod).sub_file_path, @errorName(err),
});
return LazySrcLoc.nodeOffset(0);
};
@@ -6284,19 +6097,20 @@ pub fn paramSrc(
}
pub fn argSrc(
+ mod: *Module,
call_node_offset: i32,
- gpa: Allocator,
decl: *Decl,
start_arg_i: usize,
bound_arg_src: ?LazySrcLoc,
) LazySrcLoc {
+ @setCold(true);
+ const gpa = mod.gpa;
if (start_arg_i == 0 and bound_arg_src != null) return bound_arg_src.?;
const arg_i = start_arg_i - @boolToInt(bound_arg_src != null);
- @setCold(true);
- const tree = decl.getFileScope().getTree(gpa) catch |err| {
+ const tree = decl.getFileScope(mod).getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- decl.getFileScope().sub_file_path, @errorName(err),
+ decl.getFileScope(mod).sub_file_path, @errorName(err),
});
return LazySrcLoc.nodeOffset(0);
};
@@ -6310,7 +6124,7 @@ pub fn argSrc(
const node_datas = tree.nodes.items(.data);
const call_args_node = tree.extra_data[node_datas[node].rhs - 1];
const call_args_offset = decl.nodeIndexToRelative(call_args_node);
- return initSrc(call_args_offset, gpa, decl, arg_i);
+ return mod.initSrc(call_args_offset, decl, arg_i);
},
else => unreachable,
};
@@ -6318,16 +6132,17 @@ pub fn argSrc(
}
pub fn initSrc(
+ mod: *Module,
init_node_offset: i32,
- gpa: Allocator,
decl: *Decl,
init_index: usize,
) LazySrcLoc {
@setCold(true);
- const tree = decl.getFileScope().getTree(gpa) catch |err| {
+ const gpa = mod.gpa;
+ const tree = decl.getFileScope(mod).getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- decl.getFileScope().sub_file_path, @errorName(err),
+ decl.getFileScope(mod).sub_file_path, @errorName(err),
});
return LazySrcLoc.nodeOffset(0);
};
@@ -6363,12 +6178,13 @@ pub fn initSrc(
}
}
-pub fn optionsSrc(gpa: Allocator, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc {
+pub fn optionsSrc(mod: *Module, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc {
@setCold(true);
- const tree = decl.getFileScope().getTree(gpa) catch |err| {
+ const gpa = mod.gpa;
+ const tree = decl.getFileScope(mod).getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
- decl.getFileScope().sub_file_path, @errorName(err),
+ decl.getFileScope(mod).sub_file_path, @errorName(err),
});
return LazySrcLoc.nodeOffset(0);
};
@@ -6430,11 +6246,13 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {
// deletion set at this time.
for (file.deleted_decls.items) |decl_index| {
const decl = mod.declPtr(decl_index);
- log.debug("deleted from source: {*} ({s})", .{ decl, decl.name });
// Remove from the namespace it resides in, preserving declaration order.
assert(decl.zir_decl_index != 0);
- _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{ .mod = mod });
+ _ = mod.namespacePtr(decl.src_namespace).decls.orderedRemoveAdapted(
+ decl.name,
+ DeclAdapter{ .mod = mod },
+ );
try mod.clearDecl(decl_index, &outdated_decls);
mod.destroyDecl(decl_index);
@@ -6454,7 +6272,7 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {
pub fn processExports(mod: *Module) !void {
const gpa = mod.gpa;
// Map symbol names to `Export` for name collision detection.
- var symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{};
+ var symbol_exports: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, *Export) = .{};
defer symbol_exports.deinit(gpa);
var it = mod.decl_exports.iterator();
@@ -6462,13 +6280,13 @@ pub fn processExports(mod: *Module) !void {
const exported_decl = entry.key_ptr.*;
const exports = entry.value_ptr.items;
for (exports) |new_export| {
- const gop = try symbol_exports.getOrPut(gpa, new_export.options.name);
+ const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name);
if (gop.found_existing) {
new_export.status = .failed_retryable;
try mod.failed_exports.ensureUnusedCapacity(gpa, 1);
const src_loc = new_export.getSrcLoc(mod);
- const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{
- new_export.options.name,
+ const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {}", .{
+ new_export.opts.name.fmt(&mod.intern_pool),
});
errdefer msg.destroy(gpa);
const other_export = gop.value_ptr.*;
@@ -6501,11 +6319,16 @@ pub fn populateTestFunctions(
main_progress_node: *std.Progress.Node,
) !void {
const gpa = mod.gpa;
+ const ip = &mod.intern_pool;
const builtin_pkg = mod.main_pkg.table.get("builtin").?;
const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file;
const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?);
- const builtin_namespace = root_decl.src_namespace;
- const decl_index = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{ .mod = mod }).?;
+ const builtin_namespace = mod.namespacePtr(root_decl.src_namespace);
+ const test_functions_str = try ip.getOrPutString(gpa, "test_functions");
+ const decl_index = builtin_namespace.decls.getKeyAdapted(
+ test_functions_str,
+ DeclAdapter{ .mod = mod },
+ ).?;
{
// We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions`
// was not referenced by start code.
@@ -6518,90 +6341,117 @@ pub fn populateTestFunctions(
try mod.ensureDeclAnalyzed(decl_index);
}
const decl = mod.declPtr(decl_index);
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType();
+ const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod);
+ const null_usize = try mod.intern(.{ .opt = .{
+ .ty = try mod.intern(.{ .opt_type = .usize_type }),
+ .val = .none,
+ } });
const array_decl_index = d: {
// Add mod.test_functions to an array decl then make the test_functions
// decl reference it as a slice.
- var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
- errdefer new_decl_arena.deinit();
- const arena = new_decl_arena.allocator();
-
- const test_fn_vals = try arena.alloc(Value, mod.test_functions.count());
- const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
- .ty = try Type.Tag.array.create(arena, .{
- .len = test_fn_vals.len,
- .elem_type = try tmp_test_fn_ty.copy(arena),
- }),
- .val = try Value.Tag.aggregate.create(arena, test_fn_vals),
- });
- const array_decl = mod.declPtr(array_decl_index);
+ const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count());
+ defer gpa.free(test_fn_vals);
// Add a dependency on each test name and function pointer.
- try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
+ var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){};
+ defer array_decl_dependencies.deinit(gpa);
+ try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
- for (mod.test_functions.keys(), 0..) |test_decl_index, i| {
+ for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| {
const test_decl = mod.declPtr(test_decl_index);
- const test_name_slice = mem.sliceTo(test_decl.name, 0);
+ // TODO: write something like getCoercedInts to avoid needing to dupe
+ const test_decl_name = try gpa.dupe(u8, ip.stringToSlice(test_decl.name));
+ defer gpa.free(test_decl_name);
const test_name_decl_index = n: {
- var name_decl_arena = std.heap.ArenaAllocator.init(gpa);
- errdefer name_decl_arena.deinit();
- const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice);
- const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
- .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len),
- .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes),
+ const test_name_decl_ty = try mod.arrayType(.{
+ .len = test_decl_name.len,
+ .child = .u8_type,
+ });
+ const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
+ .ty = test_name_decl_ty,
+ .val = (try mod.intern(.{ .aggregate = .{
+ .ty = test_name_decl_ty.toIntern(),
+ .storage = .{ .bytes = test_decl_name },
+ } })).toValue(),
});
- try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena);
break :n test_name_decl_index;
};
- array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, .normal);
- array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal);
+ array_decl_dependencies.appendAssumeCapacity(test_decl_index);
+ array_decl_dependencies.appendAssumeCapacity(test_name_decl_index);
try mod.linkerUpdateDecl(test_name_decl_index);
- const field_vals = try arena.create([3]Value);
- field_vals.* = .{
- try Value.Tag.slice.create(arena, .{
- .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index),
- .len = try Value.Tag.int_u64.create(arena, test_name_slice.len),
- }), // name
- try Value.Tag.decl_ref.create(arena, test_decl_index), // func
- Value.initTag(.null_value), // async_frame_size
+ const test_fn_fields = .{
+ // name
+ try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = test_name_decl_index },
+ .len = try mod.intern(.{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = test_decl_name.len },
+ } }),
+ } }),
+ // func
+ try mod.intern(.{ .ptr = .{
+ .ty = try mod.intern(.{ .ptr_type = .{
+ .child = test_decl.ty.toIntern(),
+ .flags = .{
+ .is_const = true,
+ },
+ } }),
+ .addr = .{ .decl = test_decl_index },
+ } }),
+ // async_frame_size
+ null_usize,
};
- test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals);
+ test_fn_val.* = try mod.intern(.{ .aggregate = .{
+ .ty = test_fn_ty.toIntern(),
+ .storage = .{ .elems = &test_fn_fields },
+ } });
+ }
+
+ const array_decl_ty = try mod.arrayType(.{
+ .len = test_fn_vals.len,
+ .child = test_fn_ty.toIntern(),
+ .sentinel = .none,
+ });
+ const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
+ .ty = array_decl_ty,
+ .val = (try mod.intern(.{ .aggregate = .{
+ .ty = array_decl_ty.toIntern(),
+ .storage = .{ .elems = test_fn_vals },
+ } })).toValue(),
+ });
+ for (array_decl_dependencies.items) |array_decl_dependency| {
+ try mod.declareDeclDependency(array_decl_index, array_decl_dependency);
}
- try array_decl.finalizeNewArena(&new_decl_arena);
break :d array_decl_index;
};
try mod.linkerUpdateDecl(array_decl_index);
{
- var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
- errdefer new_decl_arena.deinit();
- const arena = new_decl_arena.allocator();
-
- {
- // This copy accesses the old Decl Type/Value so it must be done before `clearValues`.
- const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena));
- const new_var = try gpa.create(Var);
- errdefer gpa.destroy(new_var);
- new_var.* = decl.val.castTag(.variable).?.data.*;
- new_var.init = try Value.Tag.slice.create(arena, .{
- .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index),
- .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()),
- });
- const new_val = try Value.Tag.variable.create(arena, new_var);
-
- // Since we are replacing the Decl's value we must perform cleanup on the
- // previous value.
- decl.clearValues(mod);
- decl.ty = new_ty;
- decl.val = new_val;
- decl.has_tv = true;
- }
-
- try decl.finalizeNewArena(&new_decl_arena);
+ const new_ty = try mod.ptrType(.{
+ .child = test_fn_ty.toIntern(),
+ .flags = .{
+ .is_const = true,
+ .size = .Slice,
+ },
+ });
+ const new_val = decl.val;
+ const new_init = try mod.intern(.{ .ptr = .{
+ .ty = new_ty.toIntern(),
+ .addr = .{ .decl = array_decl_index },
+ .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(),
+ } });
+ ip.mutateVarInit(decl.val.toIntern(), new_init);
+
+ // Since we are replacing the Decl's value we must perform cleanup on the
+ // previous value.
+ decl.clearValues(mod);
+ decl.ty = new_ty;
+ decl.val = new_val;
+ decl.has_tv = true;
}
try mod.linkerUpdateDecl(decl_index);
}
@@ -6631,7 +6481,7 @@ pub fn linkerUpdateDecl(mod: *Module, decl_index: Decl.Index) !void {
try mod.failed_decls.ensureUnusedCapacity(gpa, 1);
mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create(
gpa,
- decl.srcLoc(),
+ decl.srcLoc(mod),
"unable to codegen: {s}",
.{@errorName(err)},
));
@@ -6673,64 +6523,49 @@ fn reportRetryableFileError(
gop.value_ptr.* = err_msg;
}
-pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void {
- switch (val.tag()) {
- .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index),
- .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl),
- .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl),
- .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl),
- .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data),
-
- .repeated,
- .eu_payload,
- .opt_payload,
- .empty_array_sentinel,
- => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data),
-
- .eu_payload_ptr,
- .opt_payload_ptr,
- => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr),
-
- .slice => {
- const slice = val.cast(Value.Payload.Slice).?.data;
- mod.markReferencedDeclsAlive(slice.ptr);
- mod.markReferencedDeclsAlive(slice.len);
- },
-
- .elem_ptr => {
- const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data;
- return mod.markReferencedDeclsAlive(elem_ptr.array_ptr);
- },
- .field_ptr => {
- const field_ptr = val.cast(Value.Payload.FieldPtr).?.data;
- return mod.markReferencedDeclsAlive(field_ptr.container_ptr);
+pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void {
+ switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ .variable => |variable| try mod.markDeclIndexAlive(variable.decl),
+ .extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl),
+ .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl),
+ .error_union => |error_union| switch (error_union.val) {
+ .err_name => {},
+ .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()),
},
- .aggregate => {
- for (val.castTag(.aggregate).?.data) |field_val| {
- mod.markReferencedDeclsAlive(field_val);
+ .ptr => |ptr| {
+ switch (ptr.addr) {
+ .decl => |decl| try mod.markDeclIndexAlive(decl),
+ .mut_decl => |mut_decl| try mod.markDeclIndexAlive(mut_decl.decl),
+ .int, .comptime_field => {},
+ .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(parent.toValue()),
+ .elem, .field => |base_index| try mod.markReferencedDeclsAlive(base_index.base.toValue()),
}
+ if (ptr.len != .none) try mod.markReferencedDeclsAlive(ptr.len.toValue());
},
- .@"union" => {
- const data = val.cast(Value.Payload.Union).?.data;
- mod.markReferencedDeclsAlive(data.tag);
- mod.markReferencedDeclsAlive(data.val);
+ .opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(opt.val.toValue()),
+ .aggregate => |aggregate| for (aggregate.storage.values()) |elem|
+ try mod.markReferencedDeclsAlive(elem.toValue()),
+ .un => |un| {
+ try mod.markReferencedDeclsAlive(un.tag.toValue());
+ try mod.markReferencedDeclsAlive(un.val.toValue());
},
-
else => {},
}
}
-pub fn markDeclAlive(mod: *Module, decl: *Decl) void {
+pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void {
if (decl.alive) return;
decl.alive = true;
+ try decl.intern(mod);
+
// This is the first time we are marking this Decl alive. We must
// therefore recurse into its value and mark any Decl it references
// as also alive, so that any Decl referenced does not get garbage collected.
- mod.markReferencedDeclsAlive(decl.val);
+ try mod.markReferencedDeclsAlive(decl.val);
}
-fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void {
+fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
return mod.markDeclAlive(mod.declPtr(decl_index));
}
@@ -6779,3 +6614,522 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool {
.field_reordering => mod.comp.bin_file.options.use_llvm,
};
}
+
+/// Shortcut for calling `intern_pool.get`.
+pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index {
+ return mod.intern_pool.get(mod.gpa, key);
+}
+
+/// Shortcut for calling `intern_pool.getCoerced`.
+pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value {
+ return (try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())).toValue();
+}
+
+pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
+ return (try intern(mod, .{ .int_type = .{
+ .signedness = signedness,
+ .bits = bits,
+ } })).toType();
+}
+
+pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type {
+ const i = try intern(mod, .{ .array_type = info });
+ return i.toType();
+}
+
+pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type {
+ const i = try intern(mod, .{ .vector_type = info });
+ return i.toType();
+}
+
+pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type {
+ const i = try intern(mod, .{ .opt_type = child_type });
+ return i.toType();
+}
+
+pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
+ var canon_info = info;
+ const have_elem_layout = info.child.toType().layoutIsResolved(mod);
+
+ if (info.flags.size == .C) canon_info.flags.is_allowzero = true;
+
+ // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee
+ // type, we change it to 0 here. If this causes an assertion trip because the
+ // pointee type needs to be resolved more, that needs to be done before calling
+ // this ptr() function.
+ if (info.flags.alignment.toByteUnitsOptional()) |info_align| {
+ if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) {
+ canon_info.flags.alignment = .none;
+ }
+ }
+
+ switch (info.flags.vector_index) {
+ // Canonicalize host_size. If it matches the bit size of the pointee type,
+ // we change it to 0 here. If this causes an assertion trip, the pointee type
+ // needs to be resolved before calling this ptr() function.
+ .none => if (have_elem_layout and info.packed_offset.host_size != 0) {
+ const elem_bit_size = info.child.toType().bitSize(mod);
+ assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
+ if (info.packed_offset.host_size * 8 == elem_bit_size) {
+ canon_info.packed_offset.host_size = 0;
+ }
+ },
+ .runtime => {},
+ _ => assert(@enumToInt(info.flags.vector_index) < info.packed_offset.host_size),
+ }
+
+ return (try intern(mod, .{ .ptr_type = canon_info })).toType();
+}
+
+pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
+ return ptrType(mod, .{ .child = child_type.toIntern() });
+}
+
+pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
+ return ptrType(mod, .{
+ .child = child_type.toIntern(),
+ .flags = .{
+ .is_const = true,
+ },
+ });
+}
+
+pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
+ return ptrType(mod, .{
+ .child = child_type.toIntern(),
+ .flags = .{
+ .size = .Many,
+ .is_const = true,
+ },
+ });
+}
+
+pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
+ const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern());
+ return mod.ptrType(.{
+ .child = new_child.toIntern(),
+ .sentinel = info.sentinel,
+ .flags = info.flags,
+ .packed_offset = info.packed_offset,
+ });
+}
+
+pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type {
+ return (try intern(mod, .{ .func_type = info })).toType();
+}
+
+/// Use this for `anyframe->T` only.
+/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly.
+pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type {
+ return (try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })).toType();
+}
+
+pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type {
+ return (try intern(mod, .{ .error_union_type = .{
+ .error_set_type = error_set_ty.toIntern(),
+ .payload_type = payload_ty.toIntern(),
+ } })).toType();
+}
+
+pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type {
+ const names: *const [1]InternPool.NullTerminatedString = &name;
+ return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType();
+}
+
+/// Sorts `names` in place.
+pub fn errorSetFromUnsortedNames(
+ mod: *Module,
+ names: []InternPool.NullTerminatedString,
+) Allocator.Error!Type {
+ std.mem.sort(
+ InternPool.NullTerminatedString,
+ names,
+ {},
+ InternPool.NullTerminatedString.indexLessThan,
+ );
+ const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } });
+ return new_ty.toType();
+}
+
+/// Supports optionals in addition to pointers.
+pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
+ return mod.getCoerced(try mod.intValue_u64(Type.usize, x), ty);
+}
+
+/// Supports only pointers. See `ptrIntValue` for pointer-like optional support.
+pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
+ assert(ty.zigTypeTag(mod) == .Pointer);
+ const i = try intern(mod, .{ .ptr = .{
+ .ty = ty.toIntern(),
+ .addr = .{ .int = try mod.intValue_u64(Type.usize, x) },
+ } });
+ return i.toValue();
+}
+
+/// Creates an enum tag value based on the integer tag value.
+pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value {
+ if (std.debug.runtime_safety) {
+ const tag = ty.zigTypeTag(mod);
+ assert(tag == .Enum);
+ }
+ const i = try intern(mod, .{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = tag_int,
+ } });
+ return i.toValue();
+}
+
+/// Creates an enum tag value based on the field index according to source code
+/// declaration order.
+pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value {
+ const ip = &mod.intern_pool;
+ const gpa = mod.gpa;
+ const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+
+ if (enum_type.values.len == 0) {
+ // Auto-numbered fields.
+ return (try ip.get(gpa, .{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = try ip.get(gpa, .{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = field_index },
+ } }),
+ } })).toValue();
+ }
+
+ return (try ip.get(gpa, .{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = enum_type.values[field_index],
+ } })).toValue();
+}
+
+pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
+ if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted);
+ if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted);
+ var limbs_buffer: [4]usize = undefined;
+ var big_int = BigIntMutable.init(&limbs_buffer, x);
+ return intValue_big(mod, ty, big_int.toConst());
+}
+
+pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value {
+ const i = try intern(mod, .{ .int = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .big_int = x },
+ } });
+ return i.toValue();
+}
+
+pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
+ const i = try intern(mod, .{ .int = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .u64 = x },
+ } });
+ return i.toValue();
+}
+
+pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value {
+ const i = try intern(mod, .{ .int = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .i64 = x },
+ } });
+ return i.toValue();
+}
+
+pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value {
+ const i = try intern(mod, .{ .un = .{
+ .ty = union_ty.toIntern(),
+ .tag = tag.toIntern(),
+ .val = val.toIntern(),
+ } });
+ return i.toValue();
+}
+
+/// This function casts the float representation down to the representation of the type, potentially
+/// losing data if the representation wasn't correct.
+pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
+ const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) {
+ 16 => .{ .f16 = @floatCast(f16, x) },
+ 32 => .{ .f32 = @floatCast(f32, x) },
+ 64 => .{ .f64 = @floatCast(f64, x) },
+ 80 => .{ .f80 = @floatCast(f80, x) },
+ 128 => .{ .f128 = @floatCast(f128, x) },
+ else => unreachable,
+ };
+ const i = try intern(mod, .{ .float = .{
+ .ty = ty.toIntern(),
+ .storage = storage,
+ } });
+ return i.toValue();
+}
+
+pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value {
+ const ip = &mod.intern_pool;
+ assert(ip.isOptionalType(opt_ty.toIntern()));
+ const result = try ip.get(mod.gpa, .{ .opt = .{
+ .ty = opt_ty.toIntern(),
+ .val = .none,
+ } });
+ return result.toValue();
+}
+
+pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
+ return intType(mod, .unsigned, Type.smallestUnsignedBits(max));
+}
+
+/// Returns the smallest possible integer type containing both `min` and
+/// `max`. Asserts that neither value is undef.
+/// TODO: if #3806 is implemented, this becomes trivial
+pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type {
+ assert(!min.isUndef(mod));
+ assert(!max.isUndef(mod));
+
+ if (std.debug.runtime_safety) {
+ assert(Value.order(min, max, mod).compare(.lte));
+ }
+
+ const sign = min.orderAgainstZero(mod) == .lt;
+
+ const min_val_bits = intBitsForValue(mod, min, sign);
+ const max_val_bits = intBitsForValue(mod, max, sign);
+
+ return mod.intType(
+ if (sign) .signed else .unsigned,
+ @max(min_val_bits, max_val_bits),
+ );
+}
+
+/// Given a value representing an integer, returns the number of bits necessary to represent
+/// this value in an integer. If `sign` is true, returns the number of bits necessary in a
+/// twos-complement integer; otherwise in an unsigned integer.
+/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
+pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
+ assert(!val.isUndef(mod));
+
+ const key = mod.intern_pool.indexToKey(val.toIntern());
+ switch (key.int.storage) {
+ .i64 => |x| {
+ if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @boolToInt(sign);
+ assert(sign);
+ // Protect against overflow in the following negation.
+ if (x == std.math.minInt(i64)) return 64;
+ return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1;
+ },
+ .u64 => |x| {
+ return Type.smallestUnsignedBits(x) + @boolToInt(sign);
+ },
+ .big_int => |big| {
+ if (big.positive) return @intCast(u16, big.bitCountAbs() + @boolToInt(sign));
+
+ // Zero is still a possibility, in which case unsigned is fine
+ if (big.eqZero()) return 0;
+
+ return @intCast(u16, big.bitCountTwosComp());
+ },
+ .lazy_align => |lazy_ty| {
+ return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @boolToInt(sign);
+ },
+ .lazy_size => |lazy_ty| {
+ return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @boolToInt(sign);
+ },
+ }
+}
+
+pub const AtomicPtrAlignmentError = error{
+ FloatTooBig,
+ IntTooBig,
+ BadType,
+ OutOfMemory,
+};
+
+pub const AtomicPtrAlignmentDiagnostics = struct {
+ bits: u16 = undefined,
+ max_bits: u16 = undefined,
+};
+
+/// If ABI alignment of `ty` is OK for atomic operations, returns 0.
+/// Otherwise returns the alignment required on a pointer for the target
+/// to perform atomic operations.
+// TODO this function does not take into account CPU features, which can affect
+// this value. Audit this!
+pub fn atomicPtrAlignment(
+ mod: *Module,
+ ty: Type,
+ diags: *AtomicPtrAlignmentDiagnostics,
+) AtomicPtrAlignmentError!u32 {
+ const target = mod.getTarget();
+ const max_atomic_bits: u16 = switch (target.cpu.arch) {
+ .avr,
+ .msp430,
+ .spu_2,
+ => 16,
+
+ .arc,
+ .arm,
+ .armeb,
+ .hexagon,
+ .m68k,
+ .le32,
+ .mips,
+ .mipsel,
+ .nvptx,
+ .powerpc,
+ .powerpcle,
+ .r600,
+ .riscv32,
+ .sparc,
+ .sparcel,
+ .tce,
+ .tcele,
+ .thumb,
+ .thumbeb,
+ .x86,
+ .xcore,
+ .amdil,
+ .hsail,
+ .spir,
+ .kalimba,
+ .lanai,
+ .shave,
+ .wasm32,
+ .renderscript32,
+ .csky,
+ .spirv32,
+ .dxil,
+ .loongarch32,
+ .xtensa,
+ => 32,
+
+ .amdgcn,
+ .bpfel,
+ .bpfeb,
+ .le64,
+ .mips64,
+ .mips64el,
+ .nvptx64,
+ .powerpc64,
+ .powerpc64le,
+ .riscv64,
+ .sparc64,
+ .s390x,
+ .amdil64,
+ .hsail64,
+ .spir64,
+ .wasm64,
+ .renderscript64,
+ .ve,
+ .spirv64,
+ .loongarch64,
+ => 64,
+
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ => 128,
+
+ .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64,
+ };
+
+ const int_ty = switch (ty.zigTypeTag(mod)) {
+ .Int => ty,
+ .Enum => ty.intTagType(mod),
+ .Float => {
+ const bit_count = ty.floatBits(target);
+ if (bit_count > max_atomic_bits) {
+ diags.* = .{
+ .bits = bit_count,
+ .max_bits = max_atomic_bits,
+ };
+ return error.FloatTooBig;
+ }
+ return 0;
+ },
+ .Bool => return 0,
+ else => {
+ if (ty.isPtrAtRuntime(mod)) return 0;
+ return error.BadType;
+ },
+ };
+
+ const bit_count = int_ty.intInfo(mod).bits;
+ if (bit_count > max_atomic_bits) {
+ diags.* = .{
+ .bits = bit_count,
+ .max_bits = max_atomic_bits,
+ };
+ return error.IntTooBig;
+ }
+
+ return 0;
+}
+
+pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc {
+ return mod.declPtr(opaque_type.decl).srcLoc(mod);
+}
+
+pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) !InternPool.NullTerminatedString {
+ return mod.declPtr(opaque_type.decl).getFullyQualifiedName(mod);
+}
+
+pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
+ return mod.declPtr(decl_index).getFileScope(mod);
+}
+
+pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index {
+ return mod.namespacePtr(namespace_index).getDeclIndex(mod);
+}
+
+/// Returns null in the following cases:
+/// * `@TypeOf(.{})`
+/// * A struct which has no fields (`struct {}`).
+/// * Not a struct.
+pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
+ if (ty.ip_index == .none) return null;
+ const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null;
+ return mod.structPtr(struct_index);
+}
+
+pub fn typeToUnion(mod: *Module, ty: Type) ?*Union {
+ if (ty.ip_index == .none) return null;
+ const union_index = mod.intern_pool.indexToUnionType(ty.toIntern()).unwrap() orelse return null;
+ return mod.unionPtr(union_index);
+}
+
+pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
+ if (ty.ip_index == .none) return null;
+ return mod.intern_pool.indexToFuncType(ty.toIntern());
+}
+
+pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet {
+ const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null;
+ return mod.inferredErrorSetPtr(index);
+}
+
+pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex {
+ if (ty.ip_index == .none) return .none;
+ return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern());
+}
+
+pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc {
+ @setCold(true);
+ const owner_decl = mod.declPtr(owner_decl_index);
+ const file = owner_decl.getFileScope(mod);
+ const tree = file.getTree(mod.gpa) catch |err| {
+ // In this case we emit a warning + a less precise source location.
+ log.warn("unable to load {s}: {s}", .{
+ file.sub_file_path, @errorName(err),
+ });
+ return owner_decl.srcLoc(mod);
+ };
+ const node = owner_decl.relativeToNodeIndex(0);
+ var buf: [2]Ast.Node.Index = undefined;
+ if (tree.fullContainerDecl(&buf, node)) |container_decl| {
+ return queryFieldSrc(tree.*, query, file, container_decl);
+ } else {
+ // This type was generated using @Type
+ return owner_decl.srcLoc(mod);
+ }
+}
+
+pub fn toEnum(mod: *Module, comptime E: type, val: Value) E {
+ return mod.intern_pool.toEnum(E, val.toIntern());
+}