aboutsummaryrefslogtreecommitdiff
path: root/src/InternPool.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/InternPool.zig')
-rw-r--r--src/InternPool.zig2904
1 files changed, 1733 insertions, 1171 deletions
diff --git a/src/InternPool.zig b/src/InternPool.zig
index beeb0b9520..6a82abedce 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -1,7 +1,6 @@
//! All interned objects have both a value and a type.
//! This data structure is self-contained, with the following exceptions:
//! * Module.Namespace has a pointer to Module.File
-//! * Module.Decl has a pointer to Module.CaptureScope
/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are
/// constructed lazily.
@@ -345,6 +344,7 @@ const KeyAdapter = struct {
pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool {
_ = b_void;
+ if (ctx.intern_pool.items.items(.tag)[b_map_index] == .removed) return false;
return ctx.intern_pool.indexToKey(@as(Index, @enumFromInt(b_map_index))).eql(a, ctx.intern_pool);
}
@@ -502,6 +502,51 @@ pub const OptionalNullTerminatedString = enum(u32) {
}
};
+/// A single value captured in the closure of a namespace type. This is not a plain
+/// `Index` because we must differentiate between the following cases:
+/// * runtime-known value (where we store the type)
+/// * comptime-known value (where we store the value)
+/// * decl val (so that we can analyze the value lazily)
+/// * decl ref (so that we can analyze the reference lazily)
+pub const CaptureValue = packed struct(u32) {
+ tag: enum { @"comptime", runtime, decl_val, decl_ref },
+ idx: u30,
+
+ pub fn wrap(val: Unwrapped) CaptureValue {
+ return switch (val) {
+ .@"comptime" => |i| .{ .tag = .@"comptime", .idx = @intCast(@intFromEnum(i)) },
+ .runtime => |i| .{ .tag = .runtime, .idx = @intCast(@intFromEnum(i)) },
+ .decl_val => |i| .{ .tag = .decl_val, .idx = @intCast(@intFromEnum(i)) },
+ .decl_ref => |i| .{ .tag = .decl_ref, .idx = @intCast(@intFromEnum(i)) },
+ };
+ }
+ pub fn unwrap(val: CaptureValue) Unwrapped {
+ return switch (val.tag) {
+ .@"comptime" => .{ .@"comptime" = @enumFromInt(val.idx) },
+ .runtime => .{ .runtime = @enumFromInt(val.idx) },
+ .decl_val => .{ .decl_val = @enumFromInt(val.idx) },
+ .decl_ref => .{ .decl_ref = @enumFromInt(val.idx) },
+ };
+ }
+
+ pub const Unwrapped = union(enum) {
+ /// Index refers to the value.
+ @"comptime": Index,
+ /// Index refers to the type.
+ runtime: Index,
+ decl_val: DeclIndex,
+ decl_ref: DeclIndex,
+ };
+
+ pub const Slice = struct {
+ start: u32,
+ len: u32,
+ pub fn get(slice: Slice, ip: *const InternPool) []CaptureValue {
+ return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
+ }
+ };
+};
+
pub const Key = union(enum) {
int_type: IntType,
ptr_type: PtrType,
@@ -516,14 +561,14 @@ pub const Key = union(enum) {
/// This represents a struct that has been explicitly declared in source code,
/// or was created with `@Type`. It is unique and based on a declaration.
/// It may be a tuple, if declared like this: `struct {A, B, C}`.
- struct_type: StructType,
+ struct_type: NamespaceType,
/// This is an anonymous struct or tuple type which has no corresponding
/// declaration. It is used for types that have no `struct` keyword in the
/// source code, and were not created via `@Type`.
anon_struct_type: AnonStructType,
- union_type: Key.UnionType,
- opaque_type: OpaqueType,
- enum_type: EnumType,
+ union_type: NamespaceType,
+ opaque_type: NamespaceType,
+ enum_type: NamespaceType,
func_type: FuncType,
error_set_type: ErrorSetType,
/// The payload is the function body, either a `func_decl` or `func_instance`.
@@ -645,348 +690,6 @@ pub const Key = union(enum) {
child: Index,
};
- pub const OpaqueType = extern struct {
- /// The Decl that corresponds to the opaque itself.
- decl: DeclIndex,
- /// Represents the declarations inside this opaque.
- namespace: NamespaceIndex,
- zir_index: TrackedInst.Index.Optional,
- };
-
- /// Although packed structs and non-packed structs are encoded differently,
- /// this struct is used for both categories since they share some common
- /// functionality.
- pub const StructType = struct {
- extra_index: u32,
- /// `none` when the struct is `@TypeOf(.{})`.
- decl: OptionalDeclIndex,
- /// `none` when the struct has no declarations.
- namespace: OptionalNamespaceIndex,
- /// Index of the struct_decl ZIR instruction.
- zir_index: TrackedInst.Index.Optional,
- layout: std.builtin.Type.ContainerLayout,
- field_names: NullTerminatedString.Slice,
- field_types: Index.Slice,
- field_inits: Index.Slice,
- field_aligns: Alignment.Slice,
- runtime_order: RuntimeOrder.Slice,
- comptime_bits: ComptimeBits,
- offsets: Offsets,
- names_map: OptionalMapIndex,
-
- pub const ComptimeBits = struct {
- start: u32,
- /// This is the number of u32 elements, not the number of struct fields.
- len: u32,
-
- pub fn get(this: @This(), ip: *const InternPool) []u32 {
- return ip.extra.items[this.start..][0..this.len];
- }
-
- pub fn getBit(this: @This(), ip: *const InternPool, i: usize) bool {
- if (this.len == 0) return false;
- return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
- }
-
- pub fn setBit(this: @This(), ip: *const InternPool, i: usize) void {
- this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
- }
-
- pub fn clearBit(this: @This(), ip: *const InternPool, i: usize) void {
- this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
- }
- };
-
- pub const Offsets = struct {
- start: u32,
- len: u32,
-
- pub fn get(this: @This(), ip: *const InternPool) []u32 {
- return @ptrCast(ip.extra.items[this.start..][0..this.len]);
- }
- };
-
- pub const RuntimeOrder = enum(u32) {
- /// Placeholder until layout is resolved.
- unresolved = std.math.maxInt(u32) - 0,
- /// Field not present at runtime
- omitted = std.math.maxInt(u32) - 1,
- _,
-
- pub const Slice = struct {
- start: u32,
- len: u32,
-
- pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder {
- return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
- }
- };
-
- pub fn toInt(i: @This()) ?u32 {
- return switch (i) {
- .omitted => null,
- .unresolved => unreachable,
- else => @intFromEnum(i),
- };
- }
- };
-
- /// Look up field index based on field name.
- pub fn nameIndex(self: StructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
- const names_map = self.names_map.unwrap() orelse {
- const i = name.toUnsigned(ip) orelse return null;
- if (i >= self.field_types.len) return null;
- return i;
- };
- const map = &ip.maps.items[@intFromEnum(names_map)];
- const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
- const field_index = map.getIndexAdapted(name, adapter) orelse return null;
- return @intCast(field_index);
- }
-
- /// Returns the already-existing field with the same name, if any.
- pub fn addFieldName(
- self: @This(),
- ip: *InternPool,
- name: NullTerminatedString,
- ) ?u32 {
- return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
- }
-
- pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
- if (s.field_aligns.len == 0) return .none;
- return s.field_aligns.get(ip)[i];
- }
-
- pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
- if (s.field_inits.len == 0) return .none;
- assert(s.haveFieldInits(ip));
- return s.field_inits.get(ip)[i];
- }
-
- /// Returns `none` in the case the struct is a tuple.
- pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
- if (s.field_names.len == 0) return .none;
- return s.field_names.get(ip)[i].toOptional();
- }
-
- pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
- return s.comptime_bits.getBit(ip, i);
- }
-
- pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
- s.comptime_bits.setBit(ip, i);
- }
-
- /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
- /// complicated logic.
- pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
- return switch (s.layout) {
- .Packed => false,
- .Auto, .Extern => s.flagsPtr(ip).known_non_opv,
- };
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- /// Asserts the struct is not packed.
- pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
- assert(self.layout != .Packed);
- const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
- return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- /// Asserts that the struct is packed.
- pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
- assert(self.layout == .Packed);
- const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
- return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
- }
-
- pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.field_types_wip) {
- flags_ptr.assumed_runtime_bits = true;
- return true;
- }
- return false;
- }
-
- pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.field_types_wip) return true;
- flags_ptr.field_types_wip = true;
- return false;
- }
-
- pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
- s.flagsPtr(ip).field_types_wip = false;
- }
-
- pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.layout_wip) return true;
- flags_ptr.layout_wip = true;
- return false;
- }
-
- pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
- s.flagsPtr(ip).layout_wip = false;
- }
-
- pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.alignment_wip) return true;
- flags_ptr.alignment_wip = true;
- return false;
- }
-
- pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
- s.flagsPtr(ip).alignment_wip = false;
- }
-
- pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
- switch (s.layout) {
- .Packed => {
- const flag = &s.packedFlagsPtr(ip).field_inits_wip;
- if (flag.*) return true;
- flag.* = true;
- return false;
- },
- .Auto, .Extern => {
- const flag = &s.flagsPtr(ip).field_inits_wip;
- if (flag.*) return true;
- flag.* = true;
- return false;
- },
- }
- }
-
- pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
- switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
- .Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
- }
- }
-
- pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return true;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.fully_resolved) return true;
- flags_ptr.fully_resolved = true;
- return false;
- }
-
- pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
- s.flagsPtr(ip).fully_resolved = false;
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- /// Asserts the struct is not packed.
- pub fn size(self: @This(), ip: *InternPool) *u32 {
- assert(self.layout != .Packed);
- const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
- return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
- }
-
- /// The backing integer type of the packed struct. Whether zig chooses
- /// this type or the user specifies it, it is stored here. This will be
- /// set to `none` until the layout is resolved.
- /// Asserts the struct is packed.
- pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
- assert(s.layout == .Packed);
- const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
- return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
- }
-
- /// Asserts the struct is not packed.
- pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
- assert(s.layout != .Packed);
- const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
- ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
- }
-
- pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
- const types = s.field_types.get(ip);
- return types.len == 0 or types[0] != .none;
- }
-
- pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
- return switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).inits_resolved,
- .Auto, .Extern => s.flagsPtr(ip).inits_resolved,
- };
- }
-
- pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
- switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).inits_resolved = true,
- .Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
- }
- }
-
- pub fn haveLayout(s: @This(), ip: *InternPool) bool {
- return switch (s.layout) {
- .Packed => s.backingIntType(ip).* != .none,
- .Auto, .Extern => s.flagsPtr(ip).layout_resolved,
- };
- }
-
- pub fn isTuple(s: @This(), ip: *InternPool) bool {
- return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
- }
-
- pub fn hasReorderedFields(s: @This()) bool {
- return s.layout == .Auto;
- }
-
- pub const RuntimeOrderIterator = struct {
- ip: *InternPool,
- field_index: u32,
- struct_type: InternPool.Key.StructType,
-
- pub fn next(it: *@This()) ?u32 {
- var i = it.field_index;
-
- if (i >= it.struct_type.field_types.len)
- return null;
-
- if (it.struct_type.hasReorderedFields()) {
- it.field_index += 1;
- return it.struct_type.runtime_order.get(it.ip)[i].toInt();
- }
-
- while (it.struct_type.fieldIsComptime(it.ip, i)) {
- i += 1;
- if (i >= it.struct_type.field_types.len)
- return null;
- }
-
- it.field_index = i + 1;
- return i;
- }
- };
-
- /// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
- /// May or may not include zero-bit fields.
- /// Asserts the struct is not packed.
- pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
- assert(s.layout != .Packed);
- return .{
- .ip = ip,
- .field_index = 0,
- .struct_type = s,
- };
- }
- };
-
pub const AnonStructType = struct {
types: Index.Slice,
/// This may be empty, indicating this is a tuple.
@@ -1010,192 +713,41 @@ pub const Key = union(enum) {
}
};
- /// Serves two purposes:
- /// * Being the key in the InternPool hash map, which only requires the `decl` field.
- /// * Provide the other fields that do not require chasing the enum type.
- pub const UnionType = struct {
- /// The Decl that corresponds to the union itself.
- decl: DeclIndex,
- /// The index of the `Tag.TypeUnion` payload. Ignored by `get`,
- /// populated by `indexToKey`.
- extra_index: u32,
- namespace: NamespaceIndex,
- flags: Tag.TypeUnion.Flags,
- /// The enum that provides the list of field names and values.
- enum_tag_ty: Index,
- zir_index: TrackedInst.Index.Optional,
-
- /// The returned pointer expires with any addition to the `InternPool`.
- pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeUnion.Flags {
- const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
- return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- pub fn size(self: @This(), ip: *InternPool) *u32 {
- const size_field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
- return &ip.extra.items[self.extra_index + size_field_index];
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- pub fn padding(self: @This(), ip: *InternPool) *u32 {
- const padding_field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
- return &ip.extra.items[self.extra_index + padding_field_index];
- }
-
- pub fn haveFieldTypes(self: @This(), ip: *const InternPool) bool {
- return self.flagsPtr(ip).status.haveFieldTypes();
- }
-
- pub fn hasTag(self: @This(), ip: *const InternPool) bool {
- return self.flagsPtr(ip).runtime_tag.hasTag();
- }
-
- pub fn getLayout(self: @This(), ip: *const InternPool) std.builtin.Type.ContainerLayout {
- return self.flagsPtr(ip).layout;
- }
-
- pub fn haveLayout(self: @This(), ip: *const InternPool) bool {
- return self.flagsPtr(ip).status.haveLayout();
- }
-
- /// Pointer to an enum type which is used for the tag of the union.
- /// This type is created even for untagged unions, even when the memory
- /// layout does not store the tag.
- /// Whether zig chooses this type or the user specifies it, it is stored here.
- /// This will be set to the null type until status is `have_field_types`.
- /// This accessor is provided so that the tag type can be mutated, and so that
- /// when it is mutated, the mutations are observed.
- /// The returned pointer is invalidated when something is added to the `InternPool`.
- pub fn tagTypePtr(self: @This(), ip: *const InternPool) *Index {
- const tag_ty_field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
- return @ptrCast(&ip.extra.items[self.extra_index + tag_ty_field_index]);
- }
-
- pub fn setFieldTypes(self: @This(), ip: *InternPool, types: []const Index) void {
- @memcpy((Index.Slice{
- .start = @intCast(self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len),
- .len = @intCast(types.len),
- }).get(ip), types);
- }
-
- pub fn setFieldAligns(self: @This(), ip: *InternPool, aligns: []const Alignment) void {
- if (aligns.len == 0) return;
- assert(self.flagsPtr(ip).any_aligned_fields);
- @memcpy((Alignment.Slice{
- .start = @intCast(
- self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len + aligns.len,
- ),
- .len = @intCast(aligns.len),
- }).get(ip), aligns);
- }
- };
-
- pub const EnumType = struct {
- /// The Decl that corresponds to the enum itself.
- decl: DeclIndex,
- /// Represents the declarations inside this enum.
- namespace: OptionalNamespaceIndex,
- /// An integer type which is used for the numerical value of the enum.
- /// This field is present regardless of whether the enum has an
- /// explicitly provided tag type or auto-numbered.
- tag_ty: Index,
- /// Set of field names in declaration order.
- names: NullTerminatedString.Slice,
- /// Maps integer tag value to field index.
- /// Entries are in declaration order, same as `fields`.
- /// If this is empty, it means the enum tags are auto-numbered.
- values: Index.Slice,
- tag_mode: TagMode,
- /// This is ignored by `get` but will always be provided by `indexToKey`.
- names_map: OptionalMapIndex = .none,
- /// This is ignored by `get` but will be provided by `indexToKey` when
- /// a value map exists.
- values_map: OptionalMapIndex = .none,
- zir_index: TrackedInst.Index.Optional,
-
- pub const TagMode = enum {
- /// The integer tag type was auto-numbered by zig.
- auto,
- /// The integer tag type was provided by the enum declaration, and the enum
- /// is exhaustive.
- explicit,
- /// The integer tag type was provided by the enum declaration, and the enum
- /// is non-exhaustive.
- nonexhaustive,
- };
-
- /// Look up field index based on field name.
- pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
- const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
- const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
- const field_index = map.getIndexAdapted(name, adapter) orelse return null;
- return @intCast(field_index);
- }
-
- /// Look up field index based on tag value.
- /// Asserts that `values_map` is not `none`.
- /// This function returns `null` when `tag_val` does not have the
- /// integer tag type of the enum.
- pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 {
- assert(tag_val != .none);
- // TODO: we should probably decide a single interface for this function, but currently
- // it's being called with both tag values and underlying ints. Fix this!
- const int_tag_val = switch (ip.indexToKey(tag_val)) {
- .enum_tag => |enum_tag| enum_tag.int,
- .int => tag_val,
- else => unreachable,
- };
- if (self.values_map.unwrap()) |values_map| {
- const map = &ip.maps.items[@intFromEnum(values_map)];
- const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
- const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
- return @intCast(field_index);
- }
- // Auto-numbered enum. Convert `int_tag_val` to field index.
- const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
- inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null,
- .big_int => |x| x.to(u32) catch return null,
- .lazy_align, .lazy_size => unreachable,
- };
- return if (field_index < self.names.len) field_index else null;
- }
- };
-
- pub const IncompleteEnumType = struct {
- /// Same as corresponding `EnumType` field.
- decl: DeclIndex,
- /// Same as corresponding `EnumType` field.
- namespace: OptionalNamespaceIndex,
- /// The field names and field values are not known yet, but
- /// the number of fields must be known ahead of time.
- fields_len: u32,
- /// This information is needed so that the size does not change
- /// later when populating field values.
- has_values: bool,
- /// Same as corresponding `EnumType` field.
- tag_mode: EnumType.TagMode,
- /// This may be updated via `setTagType` later.
- tag_ty: Index = .none,
- zir_index: TrackedInst.Index.Optional,
-
- pub fn toEnumType(self: @This()) EnumType {
- return .{
- .decl = self.decl,
- .namespace = self.namespace,
- .tag_ty = self.tag_ty,
- .tag_mode = self.tag_mode,
- .names = .{ .start = 0, .len = 0 },
- .values = .{ .start = 0, .len = 0 },
- .zir_index = self.zir_index,
- };
- }
-
- /// Only the decl is used for hashing and equality, so we can construct
- /// this minimal key for use with `map`.
- pub fn toKey(self: @This()) Key {
- return .{ .enum_type = self.toEnumType() };
- }
+ /// This is the hashmap key. To fetch other data associated with the type, see:
+ /// * `loadStructType`
+ /// * `loadUnionType`
+ /// * `loadEnumType`
+ /// * `loadOpaqueType`
+ pub const NamespaceType = union(enum) {
+ /// This type corresponds to an actual source declaration, e.g. `struct { ... }`.
+ /// It is hashed based on its ZIR instruction index and set of captures.
+ declared: struct {
+ /// A `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl` instruction.
+ zir_index: TrackedInst.Index,
+ /// The captured values of this type. These values must be fully resolved per the language spec.
+ captures: union(enum) {
+ owned: CaptureValue.Slice,
+ external: []const CaptureValue,
+ },
+ },
+ /// This type is an automatically-generated enum tag type for a union.
+ /// It is hashed based on the index of the union type it corresponds to.
+ generated_tag: struct {
+ /// The union for which this is a tag type.
+ union_type: Index,
+ },
+ /// This type originates from a reification via `@Type`.
+ /// It is hased based on its ZIR instruction index and fields, attributes, etc.
+ /// To avoid making this key overly complex, the type-specific data is hased by Sema.
+ reified: struct {
+ /// A `reify` instruction.
+ zir_index: TrackedInst.Index,
+ /// A hash of this type's attributes, fields, etc, generated by Sema.
+ type_hash: u64,
+ },
+ /// This type is `@TypeOf(.{})`.
+ /// TODO: can we change the language spec to not special-case this type?
+ empty_struct: void,
};
pub const FuncType = struct {
@@ -1546,12 +1098,37 @@ pub const Key = union(enum) {
.payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)),
},
- inline .opaque_type,
+ .variable => |variable| Hash.hash(seed, asBytes(&variable.decl)),
+
+ .opaque_type,
.enum_type,
- .variable,
.union_type,
.struct_type,
- => |x| Hash.hash(seed, asBytes(&x.decl)),
+ => |namespace_type| {
+ var hasher = Hash.init(seed);
+ std.hash.autoHash(&hasher, std.meta.activeTag(namespace_type));
+ switch (namespace_type) {
+ .declared => |declared| {
+ std.hash.autoHash(&hasher, declared.zir_index);
+ const captures = switch (declared.captures) {
+ .owned => |cvs| cvs.get(ip),
+ .external => |cvs| cvs,
+ };
+ for (captures) |cv| {
+ std.hash.autoHash(&hasher, cv);
+ }
+ },
+ .generated_tag => |generated_tag| {
+ std.hash.autoHash(&hasher, generated_tag.union_type);
+ },
+ .reified => |reified| {
+ std.hash.autoHash(&hasher, reified.zir_index);
+ std.hash.autoHash(&hasher, reified.type_hash);
+ },
+ .empty_struct => {},
+ }
+ return hasher.final();
+ },
.int => |int| {
var hasher = Hash.init(seed);
@@ -1956,21 +1533,31 @@ pub const Key = union(enum) {
}
},
- .opaque_type => |a_info| {
- const b_info = b.opaque_type;
- return a_info.decl == b_info.decl;
- },
- .enum_type => |a_info| {
- const b_info = b.enum_type;
- return a_info.decl == b_info.decl;
- },
- .union_type => |a_info| {
- const b_info = b.union_type;
- return a_info.decl == b_info.decl;
- },
- .struct_type => |a_info| {
- const b_info = b.struct_type;
- return a_info.decl == b_info.decl;
+ inline .opaque_type, .enum_type, .union_type, .struct_type => |a_info, a_tag_ct| {
+ const b_info = @field(b, @tagName(a_tag_ct));
+ if (std.meta.activeTag(a_info) != b_info) return false;
+ switch (a_info) {
+ .declared => |a_d| {
+ const b_d = b_info.declared;
+ if (a_d.zir_index != b_d.zir_index) return false;
+ const a_captures = switch (a_d.captures) {
+ .owned => |s| s.get(ip),
+ .external => |cvs| cvs,
+ };
+ const b_captures = switch (b_d.captures) {
+ .owned => |s| s.get(ip),
+ .external => |cvs| cvs,
+ };
+ return std.mem.eql(u32, @ptrCast(a_captures), @ptrCast(b_captures));
+ },
+ .generated_tag => |a_gt| return a_gt.union_type == b_info.generated_tag.union_type,
+ .reified => |a_r| {
+ const b_r = b_info.reified;
+ return a_r.zir_index == b_r.zir_index and
+ a_r.type_hash == b_r.type_hash;
+ },
+ .empty_struct => return true,
+ }
},
.aggregate => |a_info| {
const b_info = b.aggregate;
@@ -2112,21 +1699,15 @@ pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
// Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a
// minimal hashmap key, this type is a convenience type that contains info
// needed by semantic analysis.
-pub const UnionType = struct {
+pub const LoadedUnionType = struct {
+ /// The index of the `Tag.TypeUnion` payload.
+ extra_index: u32,
/// The Decl that corresponds to the union itself.
decl: DeclIndex,
/// Represents the declarations inside this union.
- namespace: NamespaceIndex,
+ namespace: OptionalNamespaceIndex,
/// The enum tag type.
enum_tag_ty: Index,
- /// The integer tag type of the enum.
- int_tag_ty: Index,
- /// ABI size of the union, including padding
- size: u64,
- /// Trailing padding bytes
- padding: u32,
- /// List of field names in declaration order.
- field_names: NullTerminatedString.Slice,
/// List of field types in declaration order.
/// These are `none` until `status` is `have_field_types` or `have_layout`.
field_types: Index.Slice,
@@ -2134,12 +1715,9 @@ pub const UnionType = struct {
/// `none` means the ABI alignment of the type.
/// If this slice has length 0 it means all elements are `none`.
field_aligns: Alignment.Slice,
- /// Index of the union_decl ZIR instruction.
- zir_index: TrackedInst.Index.Optional,
- /// Index into extra array of the `flags` field.
- flags_index: u32,
- /// Copied from `enum_tag_ty`.
- names_map: OptionalMapIndex,
+ /// Index of the union_decl or reify ZIR instruction.
+ zir_index: TrackedInst.Index,
+ captures: CaptureValue.Slice,
pub const RuntimeTag = enum(u2) {
none,
@@ -2194,78 +1772,783 @@ pub const UnionType = struct {
}
};
+ pub fn loadTagType(self: LoadedUnionType, ip: *InternPool) LoadedEnumType {
+ return ip.loadEnumType(self.enum_tag_ty);
+ }
+
+ /// Pointer to an enum type which is used for the tag of the union.
+ /// This type is created even for untagged unions, even when the memory
+ /// layout does not store the tag.
+ /// Whether zig chooses this type or the user specifies it, it is stored here.
+ /// This will be set to the null type until status is `have_field_types`.
+ /// This accessor is provided so that the tag type can be mutated, and so that
+ /// when it is mutated, the mutations are observed.
/// The returned pointer expires with any addition to the `InternPool`.
- pub fn flagsPtr(self: UnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
- return @ptrCast(&ip.extra.items[self.flags_index]);
+ pub fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + field_index]);
}
- /// Look up field index based on field name.
- pub fn nameIndex(self: UnionType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
- const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
- const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
- const field_index = map.getIndexAdapted(name, adapter) orelse return null;
- return @intCast(field_index);
+ /// The returned pointer expires with any addition to the `InternPool`.
+ pub fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + field_index]);
}
- pub fn hasTag(self: UnionType, ip: *const InternPool) bool {
+ /// The returned pointer expires with any addition to the `InternPool`.
+ pub fn size(self: LoadedUnionType, ip: *const InternPool) *u32 {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
+ return &ip.extra.items[self.extra_index + field_index];
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ pub fn padding(self: LoadedUnionType, ip: *const InternPool) *u32 {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
+ return &ip.extra.items[self.extra_index + field_index];
+ }
+
+ pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).runtime_tag.hasTag();
}
- pub fn haveFieldTypes(self: UnionType, ip: *const InternPool) bool {
+ pub fn haveFieldTypes(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveFieldTypes();
}
- pub fn haveLayout(self: UnionType, ip: *const InternPool) bool {
+ pub fn haveLayout(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveLayout();
}
- pub fn getLayout(self: UnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout {
+ pub fn getLayout(self: LoadedUnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout {
return self.flagsPtr(ip).layout;
}
- pub fn fieldAlign(self: UnionType, ip: *const InternPool, field_index: u32) Alignment {
+ pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: u32) Alignment {
if (self.field_aligns.len == 0) return .none;
return self.field_aligns.get(ip)[field_index];
}
- /// This does not mutate the field of UnionType.
- pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
+ /// This does not mutate the field of LoadedUnionType.
+ pub fn setZirIndex(self: LoadedUnionType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?;
const ptr: *TrackedInst.Index.Optional =
@ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]);
ptr.* = new_zir_index;
}
+
+ pub fn setFieldTypes(self: LoadedUnionType, ip: *const InternPool, types: []const Index) void {
+ @memcpy(self.field_types.get(ip), types);
+ }
+
+ pub fn setFieldAligns(self: LoadedUnionType, ip: *const InternPool, aligns: []const Alignment) void {
+ if (aligns.len == 0) return;
+ assert(self.flagsPtr(ip).any_aligned_fields);
+ @memcpy(self.field_aligns.get(ip), aligns);
+ }
};
-/// Fetch all the interesting fields of a union type into a convenient data
-/// structure.
-/// This asserts that the union's enum tag type has been resolved.
-pub fn loadUnionType(ip: *InternPool, key: Key.UnionType) UnionType {
- const type_union = ip.extraDataTrail(Tag.TypeUnion, key.extra_index);
- const enum_ty = type_union.data.tag_ty;
- const enum_info = ip.indexToKey(enum_ty).enum_type;
- const fields_len: u32 = @intCast(enum_info.names.len);
+pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType {
+ const data = ip.items.items(.data)[@intFromEnum(index)];
+ const type_union = ip.extraDataTrail(Tag.TypeUnion, data);
+ const fields_len = type_union.data.fields_len;
+
+ var extra_index = type_union.end;
+ const captures_len = if (type_union.data.flags.any_captures) c: {
+ const len = ip.extra.items[extra_index];
+ extra_index += 1;
+ break :c len;
+ } else 0;
+
+ const captures: CaptureValue.Slice = .{
+ .start = extra_index,
+ .len = captures_len,
+ };
+ extra_index += captures_len;
+ if (type_union.data.flags.is_reified) {
+ extra_index += 2; // PackedU64
+ }
+
+ const field_types: Index.Slice = .{
+ .start = extra_index,
+ .len = fields_len,
+ };
+ extra_index += fields_len;
+
+ const field_aligns: Alignment.Slice = if (type_union.data.flags.any_aligned_fields) a: {
+ const a: Alignment.Slice = .{
+ .start = extra_index,
+ .len = fields_len,
+ };
+ extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable;
+ break :a a;
+ } else .{ .start = 0, .len = 0 };
return .{
+ .extra_index = data,
.decl = type_union.data.decl,
.namespace = type_union.data.namespace,
- .enum_tag_ty = enum_ty,
- .int_tag_ty = enum_info.tag_ty,
- .size = type_union.data.size,
- .padding = type_union.data.padding,
- .field_names = enum_info.names,
- .names_map = enum_info.names_map,
- .field_types = .{
- .start = type_union.end,
- .len = fields_len,
+ .enum_tag_ty = type_union.data.tag_ty,
+ .field_types = field_types,
+ .field_aligns = field_aligns,
+ .zir_index = type_union.data.zir_index,
+ .captures = captures,
+ };
+}
+
+pub const LoadedStructType = struct {
+ /// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload.
+ extra_index: u32,
+ /// The struct's owner Decl. `none` when the struct is `@TypeOf(.{})`.
+ decl: OptionalDeclIndex,
+ /// `none` when the struct has no declarations.
+ namespace: OptionalNamespaceIndex,
+ /// Index of the `struct_decl` or `reify` ZIR instruction.
+ /// Only `none` when the struct is `@TypeOf(.{})`.
+ zir_index: TrackedInst.Index.Optional,
+ layout: std.builtin.Type.ContainerLayout,
+ field_names: NullTerminatedString.Slice,
+ field_types: Index.Slice,
+ field_inits: Index.Slice,
+ field_aligns: Alignment.Slice,
+ runtime_order: RuntimeOrder.Slice,
+ comptime_bits: ComptimeBits,
+ offsets: Offsets,
+ names_map: OptionalMapIndex,
+ captures: CaptureValue.Slice,
+
+ pub const ComptimeBits = struct {
+ start: u32,
+ /// This is the number of u32 elements, not the number of struct fields.
+ len: u32,
+
+ pub fn get(this: ComptimeBits, ip: *const InternPool) []u32 {
+ return ip.extra.items[this.start..][0..this.len];
+ }
+
+ pub fn getBit(this: ComptimeBits, ip: *const InternPool, i: usize) bool {
+ if (this.len == 0) return false;
+ return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
+ }
+
+ pub fn setBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
+ this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
+ }
+
+ pub fn clearBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
+ this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
+ }
+ };
+
+ pub const Offsets = struct {
+ start: u32,
+ len: u32,
+
+ pub fn get(this: Offsets, ip: *const InternPool) []u32 {
+ return @ptrCast(ip.extra.items[this.start..][0..this.len]);
+ }
+ };
+
+ pub const RuntimeOrder = enum(u32) {
+ /// Placeholder until layout is resolved.
+ unresolved = std.math.maxInt(u32) - 0,
+ /// Field not present at runtime
+ omitted = std.math.maxInt(u32) - 1,
+ _,
+
+ pub const Slice = struct {
+ start: u32,
+ len: u32,
+
+ pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder {
+ return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
+ }
+ };
+
+ pub fn toInt(i: RuntimeOrder) ?u32 {
+ return switch (i) {
+ .omitted => null,
+ .unresolved => unreachable,
+ else => @intFromEnum(i),
+ };
+ }
+ };
+
+ /// Look up field index based on field name.
+ pub fn nameIndex(self: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
+ const names_map = self.names_map.unwrap() orelse {
+ const i = name.toUnsigned(ip) orelse return null;
+ if (i >= self.field_types.len) return null;
+ return i;
+ };
+ const map = &ip.maps.items[@intFromEnum(names_map)];
+ const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
+ const field_index = map.getIndexAdapted(name, adapter) orelse return null;
+ return @intCast(field_index);
+ }
+
+ /// Returns the already-existing field with the same name, if any.
+ pub fn addFieldName(
+ self: @This(),
+ ip: *InternPool,
+ name: NullTerminatedString,
+ ) ?u32 {
+ return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
+ }
+
+ pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
+ if (s.field_aligns.len == 0) return .none;
+ return s.field_aligns.get(ip)[i];
+ }
+
+ pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
+ if (s.field_inits.len == 0) return .none;
+ assert(s.haveFieldInits(ip));
+ return s.field_inits.get(ip)[i];
+ }
+
+ /// Returns `none` in the case the struct is a tuple.
+ pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
+ if (s.field_names.len == 0) return .none;
+ return s.field_names.get(ip)[i].toOptional();
+ }
+
+ pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
+ return s.comptime_bits.getBit(ip, i);
+ }
+
+ pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
+ s.comptime_bits.setBit(ip, i);
+ }
+
+ /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
+ /// complicated logic.
+ pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
+ return switch (s.layout) {
+ .Packed => false,
+ .Auto, .Extern => s.flagsPtr(ip).known_non_opv,
+ };
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ /// Asserts the struct is not packed.
+ pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
+ assert(self.layout != .Packed);
+ const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ /// Asserts that the struct is packed.
+ pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
+ assert(self.layout == .Packed);
+ const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
+ }
+
+ pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.field_types_wip) {
+ flags_ptr.assumed_runtime_bits = true;
+ return true;
+ }
+ return false;
+ }
+
+ pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.field_types_wip) return true;
+ flags_ptr.field_types_wip = true;
+ return false;
+ }
+
+ pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
+ if (s.layout == .Packed) return;
+ s.flagsPtr(ip).field_types_wip = false;
+ }
+
+ pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.layout_wip) return true;
+ flags_ptr.layout_wip = true;
+ return false;
+ }
+
+ pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
+ if (s.layout == .Packed) return;
+ s.flagsPtr(ip).layout_wip = false;
+ }
+
+ pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.alignment_wip) return true;
+ flags_ptr.alignment_wip = true;
+ return false;
+ }
+
+ pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
+ if (s.layout == .Packed) return;
+ s.flagsPtr(ip).alignment_wip = false;
+ }
+
+ pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
+ switch (s.layout) {
+ .Packed => {
+ const flag = &s.packedFlagsPtr(ip).field_inits_wip;
+ if (flag.*) return true;
+ flag.* = true;
+ return false;
+ },
+ .Auto, .Extern => {
+ const flag = &s.flagsPtr(ip).field_inits_wip;
+ if (flag.*) return true;
+ flag.* = true;
+ return false;
+ },
+ }
+ }
+
+ pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
+ switch (s.layout) {
+ .Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
+ .Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
+ }
+ }
+
+ pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return true;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.fully_resolved) return true;
+ flags_ptr.fully_resolved = true;
+ return false;
+ }
+
+ pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
+ s.flagsPtr(ip).fully_resolved = false;
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ /// Asserts the struct is not packed.
+ pub fn size(self: @This(), ip: *InternPool) *u32 {
+ assert(self.layout != .Packed);
+ const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
+ }
+
+ /// The backing integer type of the packed struct. Whether zig chooses
+ /// this type or the user specifies it, it is stored here. This will be
+ /// set to `none` until the layout is resolved.
+ /// Asserts the struct is packed.
+ pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
+ assert(s.layout == .Packed);
+ const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
+ return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
+ }
+
+ /// Asserts the struct is not packed.
+ pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
+ assert(s.layout != .Packed);
+ const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
+ ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
+ }
+
+ pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
+ const types = s.field_types.get(ip);
+ return types.len == 0 or types[0] != .none;
+ }
+
+ pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
+ return switch (s.layout) {
+ .Packed => s.packedFlagsPtr(ip).inits_resolved,
+ .Auto, .Extern => s.flagsPtr(ip).inits_resolved,
+ };
+ }
+
+ pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
+ switch (s.layout) {
+ .Packed => s.packedFlagsPtr(ip).inits_resolved = true,
+ .Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
+ }
+ }
+
+ pub fn haveLayout(s: @This(), ip: *InternPool) bool {
+ return switch (s.layout) {
+ .Packed => s.backingIntType(ip).* != .none,
+ .Auto, .Extern => s.flagsPtr(ip).layout_resolved,
+ };
+ }
+
+ pub fn isTuple(s: @This(), ip: *InternPool) bool {
+ return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
+ }
+
+ pub fn hasReorderedFields(s: @This()) bool {
+ return s.layout == .Auto;
+ }
+
+ pub const RuntimeOrderIterator = struct {
+ ip: *InternPool,
+ field_index: u32,
+ struct_type: InternPool.LoadedStructType,
+
+ pub fn next(it: *@This()) ?u32 {
+ var i = it.field_index;
+
+ if (i >= it.struct_type.field_types.len)
+ return null;
+
+ if (it.struct_type.hasReorderedFields()) {
+ it.field_index += 1;
+ return it.struct_type.runtime_order.get(it.ip)[i].toInt();
+ }
+
+ while (it.struct_type.fieldIsComptime(it.ip, i)) {
+ i += 1;
+ if (i >= it.struct_type.field_types.len)
+ return null;
+ }
+
+ it.field_index = i + 1;
+ return i;
+ }
+ };
+
+ /// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
+ /// May or may not include zero-bit fields.
+ /// Asserts the struct is not packed.
+ pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
+ assert(s.layout != .Packed);
+ return .{
+ .ip = ip,
+ .field_index = 0,
+ .struct_type = s,
+ };
+ }
+};
+
+pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
+ const item = ip.items.get(@intFromEnum(index));
+ switch (item.tag) {
+ .type_struct => {
+ if (item.data == 0) return .{
+ .extra_index = 0,
+ .decl = .none,
+ .namespace = .none,
+ .zir_index = .none,
+ .layout = .Auto,
+ .field_names = .{ .start = 0, .len = 0 },
+ .field_types = .{ .start = 0, .len = 0 },
+ .field_inits = .{ .start = 0, .len = 0 },
+ .field_aligns = .{ .start = 0, .len = 0 },
+ .runtime_order = .{ .start = 0, .len = 0 },
+ .comptime_bits = .{ .start = 0, .len = 0 },
+ .offsets = .{ .start = 0, .len = 0 },
+ .names_map = .none,
+ .captures = .{ .start = 0, .len = 0 },
+ };
+ const extra = ip.extraDataTrail(Tag.TypeStruct, item.data);
+ const fields_len = extra.data.fields_len;
+ var extra_index = extra.end;
+ const captures_len = if (extra.data.flags.any_captures) c: {
+ const len = ip.extra.items[extra_index];
+ extra_index += 1;
+ break :c len;
+ } else 0;
+ const captures: CaptureValue.Slice = .{
+ .start = extra_index,
+ .len = captures_len,
+ };
+ extra_index += captures_len;
+ if (extra.data.flags.is_reified) {
+ extra_index += 2; // PackedU64
+ }
+ const field_types: Index.Slice = .{
+ .start = extra_index,
+ .len = fields_len,
+ };
+ extra_index += fields_len;
+ const names_map: OptionalMapIndex, const names: NullTerminatedString.Slice = if (!extra.data.flags.is_tuple) n: {
+ const names_map: OptionalMapIndex = @enumFromInt(ip.extra.items[extra_index]);
+ extra_index += 1;
+ const names: NullTerminatedString.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :n .{ names_map, names };
+ } else .{ .none, .{ .start = 0, .len = 0 } };
+ const inits: Index.Slice = if (extra.data.flags.any_default_inits) i: {
+ const inits: Index.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :i inits;
+ } else .{ .start = 0, .len = 0 };
+ const namespace: OptionalNamespaceIndex = if (extra.data.flags.has_namespace) n: {
+ const n: NamespaceIndex = @enumFromInt(ip.extra.items[extra_index]);
+ extra_index += 1;
+ break :n n.toOptional();
+ } else .none;
+ const aligns: Alignment.Slice = if (extra.data.flags.any_aligned_fields) a: {
+ const a: Alignment.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable;
+ break :a a;
+ } else .{ .start = 0, .len = 0 };
+ const comptime_bits: LoadedStructType.ComptimeBits = if (extra.data.flags.any_comptime_fields) c: {
+ const len = std.math.divCeil(u32, fields_len, 32) catch unreachable;
+ const c: LoadedStructType.ComptimeBits = .{ .start = extra_index, .len = len };
+ extra_index += len;
+ break :c c;
+ } else .{ .start = 0, .len = 0 };
+ const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!extra.data.flags.is_extern) ro: {
+ const ro: LoadedStructType.RuntimeOrder.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :ro ro;
+ } else .{ .start = 0, .len = 0 };
+ const offsets: LoadedStructType.Offsets = o: {
+ const o: LoadedStructType.Offsets = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :o o;
+ };
+ return .{
+ .extra_index = item.data,
+ .decl = extra.data.decl.toOptional(),
+ .namespace = namespace,
+ .zir_index = extra.data.zir_index.toOptional(),
+ .layout = if (extra.data.flags.is_extern) .Extern else .Auto,
+ .field_names = names,
+ .field_types = field_types,
+ .field_inits = inits,
+ .field_aligns = aligns,
+ .runtime_order = runtime_order,
+ .comptime_bits = comptime_bits,
+ .offsets = offsets,
+ .names_map = names_map,
+ .captures = captures,
+ };
},
- .field_aligns = .{
- .start = type_union.end + fields_len,
- .len = if (type_union.data.flags.any_aligned_fields) fields_len else 0,
+ .type_struct_packed, .type_struct_packed_inits => {
+ const extra = ip.extraDataTrail(Tag.TypeStructPacked, item.data);
+ const has_inits = item.tag == .type_struct_packed_inits;
+ const fields_len = extra.data.fields_len;
+ var extra_index = extra.end;
+ const captures_len = if (extra.data.flags.any_captures) c: {
+ const len = ip.extra.items[extra_index];
+ extra_index += 1;
+ break :c len;
+ } else 0;
+ const captures: CaptureValue.Slice = .{
+ .start = extra_index,
+ .len = captures_len,
+ };
+ extra_index += captures_len;
+ if (extra.data.flags.is_reified) {
+ extra_index += 2; // PackedU64
+ }
+ const field_types: Index.Slice = .{
+ .start = extra_index,
+ .len = fields_len,
+ };
+ extra_index += fields_len;
+ const field_names: NullTerminatedString.Slice = .{
+ .start = extra_index,
+ .len = fields_len,
+ };
+ extra_index += fields_len;
+ const field_inits: Index.Slice = if (has_inits) inits: {
+ const i: Index.Slice = .{
+ .start = extra_index,
+ .len = fields_len,
+ };
+ extra_index += fields_len;
+ break :inits i;
+ } else .{ .start = 0, .len = 0 };
+ return .{
+ .extra_index = item.data,
+ .decl = extra.data.decl.toOptional(),
+ .namespace = extra.data.namespace,
+ .zir_index = extra.data.zir_index.toOptional(),
+ .layout = .Packed,
+ .field_names = field_names,
+ .field_types = field_types,
+ .field_inits = field_inits,
+ .field_aligns = .{ .start = 0, .len = 0 },
+ .runtime_order = .{ .start = 0, .len = 0 },
+ .comptime_bits = .{ .start = 0, .len = 0 },
+ .offsets = .{ .start = 0, .len = 0 },
+ .names_map = extra.data.names_map.toOptional(),
+ .captures = captures,
+ };
+ },
+ else => unreachable,
+ }
+}
+
+const LoadedEnumType = struct {
+ /// The Decl that corresponds to the enum itself.
+ decl: DeclIndex,
+ /// Represents the declarations inside this enum.
+ namespace: OptionalNamespaceIndex,
+ /// An integer type which is used for the numerical value of the enum.
+ /// This field is present regardless of whether the enum has an
+ /// explicitly provided tag type or auto-numbered.
+ tag_ty: Index,
+ /// Set of field names in declaration order.
+ names: NullTerminatedString.Slice,
+ /// Maps integer tag value to field index.
+ /// Entries are in declaration order, same as `fields`.
+ /// If this is empty, it means the enum tags are auto-numbered.
+ values: Index.Slice,
+ tag_mode: TagMode,
+ names_map: MapIndex,
+ /// This is guaranteed to not be `.none` if explicit values are provided.
+ values_map: OptionalMapIndex,
+ /// This is `none` only if this is a generated tag type.
+ zir_index: TrackedInst.Index.Optional,
+ captures: CaptureValue.Slice,
+
+ pub const TagMode = enum {
+ /// The integer tag type was auto-numbered by zig.
+ auto,
+ /// The integer tag type was provided by the enum declaration, and the enum
+ /// is exhaustive.
+ explicit,
+ /// The integer tag type was provided by the enum declaration, and the enum
+ /// is non-exhaustive.
+ nonexhaustive,
+ };
+
+ /// Look up field index based on field name.
+ pub fn nameIndex(self: LoadedEnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
+ const map = &ip.maps.items[@intFromEnum(self.names_map)];
+ const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
+ const field_index = map.getIndexAdapted(name, adapter) orelse return null;
+ return @intCast(field_index);
+ }
+
+ /// Look up field index based on tag value.
+ /// Asserts that `values_map` is not `none`.
+ /// This function returns `null` when `tag_val` does not have the
+ /// integer tag type of the enum.
+ pub fn tagValueIndex(self: LoadedEnumType, ip: *const InternPool, tag_val: Index) ?u32 {
+ assert(tag_val != .none);
+ // TODO: we should probably decide a single interface for this function, but currently
+ // it's being called with both tag values and underlying ints. Fix this!
+ const int_tag_val = switch (ip.indexToKey(tag_val)) {
+ .enum_tag => |enum_tag| enum_tag.int,
+ .int => tag_val,
+ else => unreachable,
+ };
+ if (self.values_map.unwrap()) |values_map| {
+ const map = &ip.maps.items[@intFromEnum(values_map)];
+ const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
+ const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
+ return @intCast(field_index);
+ }
+ // Auto-numbered enum. Convert `int_tag_val` to field index.
+ const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
+ inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null,
+ .big_int => |x| x.to(u32) catch return null,
+ .lazy_align, .lazy_size => unreachable,
+ };
+ return if (field_index < self.names.len) field_index else null;
+ }
+};
+
+pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType {
+ const item = ip.items.get(@intFromEnum(index));
+ const tag_mode: LoadedEnumType.TagMode = switch (item.tag) {
+ .type_enum_auto => {
+ const extra = ip.extraDataTrail(EnumAuto, item.data);
+ var extra_index: u32 = @intCast(extra.end);
+ if (extra.data.zir_index == .none) {
+ extra_index += 1; // owner_union
+ }
+ const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: {
+ extra_index += 2; // type_hash: PackedU64
+ break :c 0;
+ } else extra.data.captures_len;
+ return .{
+ .decl = extra.data.decl,
+ .namespace = extra.data.namespace,
+ .tag_ty = extra.data.int_tag_type,
+ .names = .{
+ .start = extra_index + captures_len,
+ .len = extra.data.fields_len,
+ },
+ .values = .{ .start = 0, .len = 0 },
+ .tag_mode = .auto,
+ .names_map = extra.data.names_map,
+ .values_map = .none,
+ .zir_index = extra.data.zir_index,
+ .captures = .{
+ .start = extra_index,
+ .len = captures_len,
+ },
+ };
+ },
+ .type_enum_explicit => .explicit,
+ .type_enum_nonexhaustive => .nonexhaustive,
+ else => unreachable,
+ };
+ const extra = ip.extraDataTrail(EnumExplicit, item.data);
+ var extra_index: u32 = @intCast(extra.end);
+ if (extra.data.zir_index == .none) {
+ extra_index += 1; // owner_union
+ }
+ const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: {
+ extra_index += 2; // type_hash: PackedU64
+ break :c 0;
+ } else extra.data.captures_len;
+ return .{
+ .decl = extra.data.decl,
+ .namespace = extra.data.namespace,
+ .tag_ty = extra.data.int_tag_type,
+ .names = .{
+ .start = extra_index + captures_len,
+ .len = extra.data.fields_len,
+ },
+ .values = .{
+ .start = extra_index + captures_len + extra.data.fields_len,
+ .len = if (extra.data.values_map != .none) extra.data.fields_len else 0,
+ },
+ .tag_mode = tag_mode,
+ .names_map = extra.data.names_map,
+ .values_map = extra.data.values_map,
+ .zir_index = extra.data.zir_index,
+ .captures = .{
+ .start = extra_index,
+ .len = captures_len,
+ },
+ };
+}
+
+/// Note that this type doubles as the payload for `Tag.type_opaque`.
+pub const LoadedOpaqueType = struct {
+ /// The opaque's owner Decl.
+ decl: DeclIndex,
+ /// Contains the declarations inside this opaque.
+ namespace: OptionalNamespaceIndex,
+ /// Index of the `opaque_decl` or `reify` instruction.
+ zir_index: TrackedInst.Index,
+ captures: CaptureValue.Slice,
+};
+
+pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType {
+ assert(ip.items.items(.tag)[@intFromEnum(index)] == .type_opaque);
+ const extra_index = ip.items.items(.data)[@intFromEnum(index)];
+ const extra = ip.extraDataTrail(Tag.TypeOpaque, extra_index);
+ const captures_len = if (extra.data.captures_len == std.math.maxInt(u32))
+ 0
+ else
+ extra.data.captures_len;
+ return .{
+ .decl = extra.data.decl,
+ .namespace = extra.data.namespace,
+ .zir_index = extra.data.zir_index,
+ .captures = .{
+ .start = extra.end,
+ .len = captures_len,
},
- .zir_index = type_union.data.zir_index,
- .flags_index = key.extra_index + std.meta.fieldIndex(Tag.TypeUnion, "flags").?,
};
}
@@ -2457,6 +2740,7 @@ pub const Index = enum(u32) {
},
};
+ removed: void,
type_int_signed: struct { data: u32 },
type_int_unsigned: struct { data: u32 },
type_array_big: struct { data: *Array },
@@ -2484,9 +2768,8 @@ pub const Index = enum(u32) {
type_enum_explicit: DataIsExtraIndexOfEnumExplicit,
type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit,
simple_type: struct { data: SimpleType },
- type_opaque: struct { data: *Key.OpaqueType },
+ type_opaque: struct { data: *Tag.TypeOpaque },
type_struct: struct { data: *Tag.TypeStruct },
- type_struct_ns: struct { data: NamespaceIndex },
type_struct_anon: DataIsExtraIndexOfTypeStructAnon,
type_struct_packed: struct { data: *Tag.TypeStructPacked },
type_struct_packed_inits: struct { data: *Tag.TypeStructPacked },
@@ -2865,6 +3148,12 @@ comptime {
}
pub const Tag = enum(u8) {
+ /// This special tag represents a value which was removed from this pool via
+ /// `InternPool.remove`. The item remains allocated to preserve indices, but
+ /// lookups will consider it not equal to any other item, and all queries
+ /// assert not this tag. `data` is unused.
+ removed,
+
/// An integer type.
/// data is number of bits
type_int_signed,
@@ -2920,15 +3209,12 @@ pub const Tag = enum(u8) {
/// data is SimpleType enum value.
simple_type,
/// An opaque type.
- /// data is index of Key.OpaqueType in extra.
+ /// data is index of Tag.TypeOpaque in extra.
type_opaque,
/// A non-packed struct type.
/// data is 0 or extra index of `TypeStruct`.
/// data == 0 represents `@TypeOf(.{})`.
type_struct,
- /// A non-packed struct type that has only a namespace; no fields.
- /// data is NamespaceIndex.
- type_struct_ns,
/// An AnonStructType which stores types, names, and values for fields.
/// data is extra index of `TypeStructAnon`.
type_struct_anon,
@@ -3126,7 +3412,6 @@ pub const Tag = enum(u8) {
memoized_call,
const ErrorUnionType = Key.ErrorUnionType;
- const OpaqueType = Key.OpaqueType;
const TypeValue = Key.TypeValue;
const Error = Key.Error;
const EnumTag = Key.EnumTag;
@@ -3136,6 +3421,7 @@ pub const Tag = enum(u8) {
fn Payload(comptime tag: Tag) type {
return switch (tag) {
+ .removed => unreachable,
.type_int_signed => unreachable,
.type_int_unsigned => unreachable,
.type_array_big => Array,
@@ -3153,9 +3439,8 @@ pub const Tag = enum(u8) {
.type_enum_explicit => EnumExplicit,
.type_enum_nonexhaustive => EnumExplicit,
.simple_type => unreachable,
- .type_opaque => OpaqueType,
+ .type_opaque => TypeOpaque,
.type_struct => TypeStruct,
- .type_struct_ns => unreachable,
.type_struct_anon => TypeStructAnon,
.type_struct_packed, .type_struct_packed_inits => TypeStructPacked,
.type_tuple_anon => TypeStructAnon,
@@ -3311,43 +3596,54 @@ pub const Tag = enum(u8) {
};
};
- /// The number of fields is provided by the `tag_ty` field.
/// Trailing:
- /// 0. field type: Index for each field; declaration order
- /// 1. field align: Alignment for each field; declaration order
+ /// 0. captures_len: u32 // if `any_captures`
+ /// 1. capture: CaptureValue // for each `captures_len`
+ /// 2. type_hash: PackedU64 // if `is_reified`
+ /// 3. field type: Index for each field; declaration order
+ /// 4. field align: Alignment for each field; declaration order
pub const TypeUnion = struct {
flags: Flags,
+ /// This could be provided through the tag type, but it is more convenient
+ /// to store it directly. This is also necessary for `dumpStatsFallible` to
+ /// work on unresolved types.
+ fields_len: u32,
/// Only valid after .have_layout
size: u32,
/// Only valid after .have_layout
padding: u32,
decl: DeclIndex,
- namespace: NamespaceIndex,
+ namespace: OptionalNamespaceIndex,
/// The enum that provides the list of field names and values.
tag_ty: Index,
- zir_index: TrackedInst.Index.Optional,
+ zir_index: TrackedInst.Index,
pub const Flags = packed struct(u32) {
- runtime_tag: UnionType.RuntimeTag,
+ any_captures: bool,
+ runtime_tag: LoadedUnionType.RuntimeTag,
/// If false, the field alignment trailing data is omitted.
any_aligned_fields: bool,
layout: std.builtin.Type.ContainerLayout,
- status: UnionType.Status,
+ status: LoadedUnionType.Status,
requires_comptime: RequiresComptime,
assumed_runtime_bits: bool,
assumed_pointer_aligned: bool,
alignment: Alignment,
- _: u14 = 0,
+ is_reified: bool,
+ _: u12 = 0,
};
};
/// Trailing:
- /// 0. type: Index for each fields_len
- /// 1. name: NullTerminatedString for each fields_len
- /// 2. init: Index for each fields_len // if tag is type_struct_packed_inits
+ /// 0. captures_len: u32 // if `any_captures`
+ /// 1. capture: CaptureValue // for each `captures_len`
+ /// 2. type_hash: PackedU64 // if `is_reified`
+ /// 3. type: Index for each fields_len
+ /// 4. name: NullTerminatedString for each fields_len
+ /// 5. init: Index for each fields_len // if tag is type_struct_packed_inits
pub const TypeStructPacked = struct {
decl: DeclIndex,
- zir_index: TrackedInst.Index.Optional,
+ zir_index: TrackedInst.Index,
fields_len: u32,
namespace: OptionalNamespaceIndex,
backing_int_ty: Index,
@@ -3355,10 +3651,12 @@ pub const Tag = enum(u8) {
flags: Flags,
pub const Flags = packed struct(u32) {
+ any_captures: bool,
/// Dependency loop detection when resolving field inits.
field_inits_wip: bool,
inits_resolved: bool,
- _: u30 = 0,
+ is_reified: bool,
+ _: u28 = 0,
};
};
@@ -3377,29 +3675,33 @@ pub const Tag = enum(u8) {
/// than coming up with some other scheme for the data.
///
/// Trailing:
- /// 0. type: Index for each field in declared order
- /// 1. if not is_tuple:
+ /// 0. captures_len: u32 // if `any_captures`
+ /// 1. capture: CaptureValue // for each `captures_len`
+ /// 2. type_hash: PackedU64 // if `is_reified`
+ /// 3. type: Index for each field in declared order
+ /// 4. if not is_tuple:
/// names_map: MapIndex,
/// name: NullTerminatedString // for each field in declared order
- /// 2. if any_default_inits:
+ /// 5. if any_default_inits:
/// init: Index // for each field in declared order
- /// 3. if has_namespace:
+ /// 6. if has_namespace:
/// namespace: NamespaceIndex
- /// 4. if any_aligned_fields:
+ /// 7. if any_aligned_fields:
/// align: Alignment // for each field in declared order
- /// 5. if any_comptime_fields:
+ /// 8. if any_comptime_fields:
/// field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0
- /// 6. if not is_extern:
+ /// 9. if not is_extern:
/// field_index: RuntimeOrder // for each field in runtime order
- /// 7. field_offset: u32 // for each field in declared order, undef until layout_resolved
+ /// 10. field_offset: u32 // for each field in declared order, undef until layout_resolved
pub const TypeStruct = struct {
decl: DeclIndex,
- zir_index: TrackedInst.Index.Optional,
+ zir_index: TrackedInst.Index,
fields_len: u32,
flags: Flags,
size: u32,
pub const Flags = packed struct(u32) {
+ any_captures: bool,
is_extern: bool,
known_non_opv: bool,
requires_comptime: RequiresComptime,
@@ -3428,10 +3730,23 @@ pub const Tag = enum(u8) {
// The types and all its fields have had their layout resolved. Even through pointer,
// which `layout_resolved` does not ensure.
fully_resolved: bool,
-
- _: u8 = 0,
+ is_reified: bool,
+ _: u6 = 0,
};
};
+
+ /// Trailing:
+ /// 0. capture: CaptureValue // for each `captures_len`
+ pub const TypeOpaque = struct {
+ /// The opaque's owner Decl.
+ decl: DeclIndex,
+ /// Contains the declarations inside this opaque.
+ namespace: OptionalNamespaceIndex,
+ /// The index of the `opaque_decl` instruction.
+ zir_index: TrackedInst.Index,
+ /// `std.math.maxInt(u32)` indicates this type is reified.
+ captures_len: u32,
+ };
};
/// State that is mutable during semantic analysis. This data is not used for
@@ -3738,11 +4053,16 @@ pub const Array = struct {
};
/// Trailing:
-/// 0. field name: NullTerminatedString for each fields_len; declaration order
-/// 1. tag value: Index for each fields_len; declaration order
+/// 0. owner_union: Index // if `zir_index == .none`
+/// 1. capture: CaptureValue // for each `captures_len`
+/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`)
+/// 3. field name: NullTerminatedString for each fields_len; declaration order
+/// 4. tag value: Index for each fields_len; declaration order
pub const EnumExplicit = struct {
/// The Decl that corresponds to the enum itself.
decl: DeclIndex,
+ /// `std.math.maxInt(u32)` indicates this type is reified.
+ captures_len: u32,
/// This may be `none` if there are no declarations.
namespace: OptionalNamespaceIndex,
/// An integer type which is used for the numerical value of the enum, which
@@ -3755,14 +4075,21 @@ pub const EnumExplicit = struct {
/// If this is `none`, it means the trailing tag values are absent because
/// they are auto-numbered.
values_map: OptionalMapIndex,
+ /// `none` means this is a generated tag type.
+ /// There will be a trailing union type for which this is a tag.
zir_index: TrackedInst.Index.Optional,
};
/// Trailing:
-/// 0. field name: NullTerminatedString for each fields_len; declaration order
+/// 0. owner_union: Index // if `zir_index == .none`
+/// 1. capture: CaptureValue // for each `captures_len`
+/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`)
+/// 3. field name: NullTerminatedString for each fields_len; declaration order
pub const EnumAuto = struct {
/// The Decl that corresponds to the enum itself.
decl: DeclIndex,
+ /// `std.math.maxInt(u32)` indicates this type is reified.
+ captures_len: u32,
/// This may be `none` if there are no declarations.
namespace: OptionalNamespaceIndex,
/// An integer type which is used for the numerical value of the enum, which
@@ -3771,6 +4098,8 @@ pub const EnumAuto = struct {
fields_len: u32,
/// Maps field names to declaration index.
names_map: MapIndex,
+ /// `none` means this is a generated tag type.
+ /// There will be a trailing union type for which this is a tag.
zir_index: TrackedInst.Index.Optional,
};
@@ -4011,6 +4340,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const item = ip.items.get(@intFromEnum(index));
const data = item.data;
return switch (item.tag) {
+ .removed => unreachable,
.type_int_signed => .{
.int_type = .{
.signedness = .signed,
@@ -4072,68 +4402,124 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.inferred_error_set_type = @enumFromInt(data),
},
- .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
-
- .type_struct => .{ .struct_type = if (data == 0) .{
- .extra_index = 0,
- .namespace = .none,
- .decl = .none,
- .zir_index = undefined,
- .layout = .Auto,
- .field_names = .{ .start = 0, .len = 0 },
- .field_types = .{ .start = 0, .len = 0 },
- .field_inits = .{ .start = 0, .len = 0 },
- .field_aligns = .{ .start = 0, .len = 0 },
- .runtime_order = .{ .start = 0, .len = 0 },
- .comptime_bits = .{ .start = 0, .len = 0 },
- .offsets = .{ .start = 0, .len = 0 },
- .names_map = undefined,
- } else extraStructType(ip, data) },
-
- .type_struct_ns => .{ .struct_type = .{
- .extra_index = 0,
- .namespace = @as(NamespaceIndex, @enumFromInt(data)).toOptional(),
- .decl = .none,
- .zir_index = undefined,
- .layout = .Auto,
- .field_names = .{ .start = 0, .len = 0 },
- .field_types = .{ .start = 0, .len = 0 },
- .field_inits = .{ .start = 0, .len = 0 },
- .field_aligns = .{ .start = 0, .len = 0 },
- .runtime_order = .{ .start = 0, .len = 0 },
- .comptime_bits = .{ .start = 0, .len = 0 },
- .offsets = .{ .start = 0, .len = 0 },
- .names_map = undefined,
+ .type_opaque => .{ .opaque_type = ns: {
+ const extra = ip.extraDataTrail(Tag.TypeOpaque, data);
+ if (extra.data.captures_len == std.math.maxInt(u32)) {
+ break :ns .{ .reified = .{
+ .zir_index = extra.data.zir_index,
+ .type_hash = 0,
+ } };
+ }
+ break :ns .{ .declared = .{
+ .zir_index = extra.data.zir_index,
+ .captures = .{ .owned = .{
+ .start = extra.end,
+ .len = extra.data.captures_len,
+ } },
+ } };
+ } },
+
+ .type_struct => .{ .struct_type = ns: {
+ if (data == 0) break :ns .empty_struct;
+ const extra = ip.extraDataTrail(Tag.TypeStruct, data);
+ if (extra.data.flags.is_reified) {
+ assert(!extra.data.flags.any_captures);
+ break :ns .{ .reified = .{
+ .zir_index = extra.data.zir_index,
+ .type_hash = ip.extraData(PackedU64, extra.end).get(),
+ } };
+ }
+ break :ns .{ .declared = .{
+ .zir_index = extra.data.zir_index,
+ .captures = .{ .owned = if (extra.data.flags.any_captures) .{
+ .start = extra.end + 1,
+ .len = ip.extra.items[extra.end],
+ } else .{ .start = 0, .len = 0 } },
+ } };
+ } },
+
+ .type_struct_packed, .type_struct_packed_inits => .{ .struct_type = ns: {
+ const extra = ip.extraDataTrail(Tag.TypeStructPacked, data);
+ if (extra.data.flags.is_reified) {
+ assert(!extra.data.flags.any_captures);
+ break :ns .{ .reified = .{
+ .zir_index = extra.data.zir_index,
+ .type_hash = ip.extraData(PackedU64, extra.end).get(),
+ } };
+ }
+ break :ns .{ .declared = .{
+ .zir_index = extra.data.zir_index,
+ .captures = .{ .owned = if (extra.data.flags.any_captures) .{
+ .start = extra.end + 1,
+ .len = ip.extra.items[extra.end],
+ } else .{ .start = 0, .len = 0 } },
+ } };
} },
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
- .type_struct_packed => .{ .struct_type = extraPackedStructType(ip, data, false) },
- .type_struct_packed_inits => .{ .struct_type = extraPackedStructType(ip, data, true) },
- .type_union => .{ .union_type = extraUnionType(ip, data) },
+ .type_union => .{ .union_type = ns: {
+ const extra = ip.extraDataTrail(Tag.TypeUnion, data);
+ if (extra.data.flags.is_reified) {
+ assert(!extra.data.flags.any_captures);
+ break :ns .{ .reified = .{
+ .zir_index = extra.data.zir_index,
+ .type_hash = ip.extraData(PackedU64, extra.end).get(),
+ } };
+ }
+ break :ns .{ .declared = .{
+ .zir_index = extra.data.zir_index,
+ .captures = .{ .owned = if (extra.data.flags.any_captures) .{
+ .start = extra.end + 1,
+ .len = ip.extra.items[extra.end],
+ } else .{ .start = 0, .len = 0 } },
+ } };
+ } },
- .type_enum_auto => {
- const enum_auto = ip.extraDataTrail(EnumAuto, data);
- return .{ .enum_type = .{
- .decl = enum_auto.data.decl,
- .namespace = enum_auto.data.namespace,
- .tag_ty = enum_auto.data.int_tag_type,
- .names = .{
- .start = @intCast(enum_auto.end),
- .len = enum_auto.data.fields_len,
- },
- .values = .{
- .start = 0,
- .len = 0,
- },
- .tag_mode = .auto,
- .names_map = enum_auto.data.names_map.toOptional(),
- .values_map = .none,
- .zir_index = enum_auto.data.zir_index,
+ .type_enum_auto => .{ .enum_type = ns: {
+ const extra = ip.extraDataTrail(EnumAuto, data);
+ const zir_index = extra.data.zir_index.unwrap() orelse {
+ assert(extra.data.captures_len == 0);
+ break :ns .{ .generated_tag = .{
+ .union_type = @enumFromInt(ip.extra.items[extra.end]),
+ } };
+ };
+ if (extra.data.captures_len == std.math.maxInt(u32)) {
+ break :ns .{ .reified = .{
+ .zir_index = zir_index,
+ .type_hash = ip.extraData(PackedU64, extra.end).get(),
+ } };
+ }
+ break :ns .{ .declared = .{
+ .zir_index = zir_index,
+ .captures = .{ .owned = .{
+ .start = extra.end,
+ .len = extra.data.captures_len,
+ } },
} };
- },
- .type_enum_explicit => ip.indexToKeyEnum(data, .explicit),
- .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive),
+ } },
+ .type_enum_explicit, .type_enum_nonexhaustive => .{ .enum_type = ns: {
+ const extra = ip.extraDataTrail(EnumExplicit, data);
+ const zir_index = extra.data.zir_index.unwrap() orelse {
+ assert(extra.data.captures_len == 0);
+ break :ns .{ .generated_tag = .{
+ .union_type = @enumFromInt(ip.extra.items[extra.end]),
+ } };
+ };
+ if (extra.data.captures_len == std.math.maxInt(u32)) {
+ break :ns .{ .reified = .{
+ .zir_index = zir_index,
+ .type_hash = ip.extraData(PackedU64, extra.end).get(),
+ } };
+ }
+ break :ns .{ .declared = .{
+ .zir_index = zir_index,
+ .captures = .{ .owned = .{
+ .start = extra.end,
+ .len = extra.data.captures_len,
+ } },
+ } };
+ } },
.type_function => .{ .func_type = ip.extraFuncType(data) },
.undef => .{ .undef = @as(Index, @enumFromInt(data)) },
@@ -4366,7 +4752,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.type_array_small,
.type_vector,
- .type_struct_ns,
.type_struct_packed,
=> .{ .aggregate = .{
.ty = ty,
@@ -4375,16 +4760,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
// There is only one possible value precisely due to the
// fact that this values slice is fully populated!
- .type_struct => {
- const info = extraStructType(ip, ty_item.data);
- return .{ .aggregate = .{
- .ty = ty,
- .storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
- } };
- },
-
- .type_struct_packed_inits => {
- const info = extraPackedStructType(ip, ty_item.data, true);
+ .type_struct, .type_struct_packed_inits => {
+ const info = loadStructType(ip, ty);
return .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
@@ -4476,18 +4853,6 @@ fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType {
};
}
-fn extraUnionType(ip: *const InternPool, extra_index: u32) Key.UnionType {
- const type_union = ip.extraData(Tag.TypeUnion, extra_index);
- return .{
- .decl = type_union.decl,
- .namespace = type_union.namespace,
- .flags = type_union.flags,
- .enum_tag_ty = type_union.tag_ty,
- .zir_index = type_union.zir_index,
- .extra_index = extra_index,
- };
-}
-
fn extraTypeStructAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index);
const fields_len = type_struct_anon.data.fields_len;
@@ -4526,109 +4891,6 @@ fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructTyp
};
}
-fn extraStructType(ip: *const InternPool, extra_index: u32) Key.StructType {
- const s = ip.extraDataTrail(Tag.TypeStruct, extra_index);
- const fields_len = s.data.fields_len;
-
- var index = s.end;
-
- const field_types = t: {
- const types: Index.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t types;
- };
- const names_map, const field_names: NullTerminatedString.Slice = t: {
- if (s.data.flags.is_tuple) break :t .{ .none, .{ .start = 0, .len = 0 } };
- const names_map: MapIndex = @enumFromInt(ip.extra.items[index]);
- index += 1;
- const names: NullTerminatedString.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t .{ names_map.toOptional(), names };
- };
- const field_inits: Index.Slice = t: {
- if (!s.data.flags.any_default_inits) break :t .{ .start = 0, .len = 0 };
- const inits: Index.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t inits;
- };
- const namespace = t: {
- if (!s.data.flags.has_namespace) break :t .none;
- const namespace: NamespaceIndex = @enumFromInt(ip.extra.items[index]);
- index += 1;
- break :t namespace.toOptional();
- };
- const field_aligns: Alignment.Slice = t: {
- if (!s.data.flags.any_aligned_fields) break :t .{ .start = 0, .len = 0 };
- const aligns: Alignment.Slice = .{ .start = index, .len = fields_len };
- index += (fields_len + 3) / 4;
- break :t aligns;
- };
- const comptime_bits: Key.StructType.ComptimeBits = t: {
- if (!s.data.flags.any_comptime_fields) break :t .{ .start = 0, .len = 0 };
- const comptime_bits: Key.StructType.ComptimeBits = .{ .start = index, .len = fields_len };
- index += (fields_len + 31) / 32;
- break :t comptime_bits;
- };
- const runtime_order: Key.StructType.RuntimeOrder.Slice = t: {
- if (s.data.flags.is_extern) break :t .{ .start = 0, .len = 0 };
- const ro: Key.StructType.RuntimeOrder.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t ro;
- };
- const offsets = t: {
- const offsets: Key.StructType.Offsets = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t offsets;
- };
- return .{
- .extra_index = extra_index,
- .decl = s.data.decl.toOptional(),
- .zir_index = s.data.zir_index,
- .layout = if (s.data.flags.is_extern) .Extern else .Auto,
- .field_types = field_types,
- .names_map = names_map,
- .field_names = field_names,
- .field_inits = field_inits,
- .namespace = namespace,
- .field_aligns = field_aligns,
- .comptime_bits = comptime_bits,
- .runtime_order = runtime_order,
- .offsets = offsets,
- };
-}
-
-fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) Key.StructType {
- const type_struct_packed = ip.extraDataTrail(Tag.TypeStructPacked, extra_index);
- const fields_len = type_struct_packed.data.fields_len;
- return .{
- .extra_index = extra_index,
- .decl = type_struct_packed.data.decl.toOptional(),
- .namespace = type_struct_packed.data.namespace,
- .zir_index = type_struct_packed.data.zir_index,
- .layout = .Packed,
- .field_types = .{
- .start = type_struct_packed.end,
- .len = fields_len,
- },
- .field_names = .{
- .start = type_struct_packed.end + fields_len,
- .len = fields_len,
- },
- .field_inits = if (inits) .{
- .start = type_struct_packed.end + fields_len * 2,
- .len = fields_len,
- } else .{
- .start = 0,
- .len = 0,
- },
- .field_aligns = .{ .start = 0, .len = 0 },
- .runtime_order = .{ .start = 0, .len = 0 },
- .comptime_bits = .{ .start = 0, .len = 0 },
- .offsets = .{ .start = 0, .len = 0 },
- .names_map = type_struct_packed.data.names_map.toOptional(),
- };
-}
-
fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType {
const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index);
var index: usize = type_function.end;
@@ -4720,28 +4982,6 @@ fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func {
return func;
}
-fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key {
- const enum_explicit = ip.extraDataTrail(EnumExplicit, data);
- const fields_len = enum_explicit.data.fields_len;
- return .{ .enum_type = .{
- .decl = enum_explicit.data.decl,
- .namespace = enum_explicit.data.namespace,
- .tag_ty = enum_explicit.data.int_tag_type,
- .names = .{
- .start = @intCast(enum_explicit.end),
- .len = fields_len,
- },
- .values = .{
- .start = @intCast(enum_explicit.end + fields_len),
- .len = if (enum_explicit.data.values_map != .none) fields_len else 0,
- },
- .tag_mode = tag_mode,
- .names_map = enum_explicit.data.names_map.toOptional(),
- .values_map = enum_explicit.data.values_map,
- .zir_index = enum_explicit.data.zir_index,
- } };
-}
-
fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key {
const int_info = ip.limbData(Int, limb_index);
return .{ .int = .{
@@ -4901,15 +5141,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.struct_type => unreachable, // use getStructType() instead
.anon_struct_type => unreachable, // use getAnonStructType() instead
.union_type => unreachable, // use getUnionType() instead
+ .opaque_type => unreachable, // use getOpaqueType() instead
- .opaque_type => |opaque_type| {
- ip.items.appendAssumeCapacity(.{
- .tag = .type_opaque,
- .data = try ip.addExtra(gpa, opaque_type),
- });
- },
-
- .enum_type => unreachable, // use getEnum() or getIncompleteEnum() instead
+ .enum_type => unreachable, // use getEnumType() instead
.func_type => unreachable, // use getFuncType() instead
.extern_func => unreachable, // use getExternFunc() instead
.func => unreachable, // use getFuncInstance() or getFuncDecl() instead
@@ -5027,14 +5261,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ptr.addr == .field);
assert(base_index.index < anon_struct_type.types.len);
},
- .struct_type => |struct_type| {
+ .struct_type => {
assert(ptr.addr == .field);
- assert(base_index.index < struct_type.field_types.len);
+ assert(base_index.index < ip.loadStructType(base_ptr_type.child).field_types.len);
},
- .union_type => |union_key| {
- const union_type = ip.loadUnionType(union_key);
+ .union_type => {
+ const union_type = ip.loadUnionType(base_ptr_type.child);
assert(ptr.addr == .field);
- assert(base_index.index < union_type.field_names.len);
+ assert(base_index.index < union_type.field_types.len);
},
.ptr_type => |slice_type| {
assert(ptr.addr == .field);
@@ -5305,7 +5539,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ip.isEnumType(enum_tag.ty));
switch (ip.indexToKey(enum_tag.ty)) {
.simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))),
- .enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty),
+ .enum_type => assert(ip.typeOf(enum_tag.int) == ip.loadEnumType(enum_tag.ty).tag_ty),
else => unreachable,
}
ip.items.appendAssumeCapacity(.{
@@ -5398,8 +5632,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ip.typeOf(elem) == child);
}
},
- .struct_type => |t| {
- for (aggregate.storage.values(), t.field_types.get(ip)) |elem, field_ty| {
+ .struct_type => {
+ for (aggregate.storage.values(), ip.loadStructType(aggregate.ty).field_types.get(ip)) |elem, field_ty| {
assert(ip.typeOf(elem) == field_ty);
}
},
@@ -5572,10 +5806,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
pub const UnionTypeInit = struct {
- flags: Tag.TypeUnion.Flags,
- decl: DeclIndex,
- namespace: NamespaceIndex,
- zir_index: TrackedInst.Index.Optional,
+ flags: packed struct {
+ runtime_tag: LoadedUnionType.RuntimeTag,
+ any_aligned_fields: bool,
+ layout: std.builtin.Type.ContainerLayout,
+ status: LoadedUnionType.Status,
+ requires_comptime: RequiresComptime,
+ assumed_runtime_bits: bool,
+ assumed_pointer_aligned: bool,
+ alignment: Alignment,
+ },
+ has_namespace: bool,
fields_len: u32,
enum_tag_ty: Index,
/// May have length 0 which leaves the values unset until later.
@@ -5584,27 +5825,84 @@ pub const UnionTypeInit = struct {
/// The logic for `any_aligned_fields` is asserted to have been done before
/// calling this function.
field_aligns: []const Alignment,
+ key: union(enum) {
+ declared: struct {
+ zir_index: TrackedInst.Index,
+ captures: []const CaptureValue,
+ },
+ reified: struct {
+ zir_index: TrackedInst.Index,
+ type_hash: u64,
+ },
+ },
};
-pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!Index {
- const prev_extra_len = ip.extra.items.len;
+pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocator.Error!WipNamespaceType.Result {
+ const adapter: KeyAdapter = .{ .intern_pool = ip };
+ const gop = try ip.map.getOrPutAdapted(gpa, Key{ .union_type = switch (ini.key) {
+ .declared => |d| .{ .declared = .{
+ .zir_index = d.zir_index,
+ .captures = .{ .external = d.captures },
+ } },
+ .reified => |r| .{ .reified = .{
+ .zir_index = r.zir_index,
+ .type_hash = r.type_hash,
+ } },
+ } }, adapter);
+ if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
+ errdefer _ = ip.map.pop();
+
const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0;
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeUnion).Struct.fields.len +
+ // TODO: fmt bug
+ // zig fmt: off
+ switch (ini.key) {
+ .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
+ .reified => 2, // type_hash: PackedU64
+ } +
+ // zig fmt: on
ini.fields_len + // field types
align_elements_len);
try ip.items.ensureUnusedCapacity(gpa, 1);
- const union_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{
- .flags = ini.flags,
+ const extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{
+ .flags = .{
+ .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
+ .runtime_tag = ini.flags.runtime_tag,
+ .any_aligned_fields = ini.flags.any_aligned_fields,
+ .layout = ini.flags.layout,
+ .status = ini.flags.status,
+ .requires_comptime = ini.flags.requires_comptime,
+ .assumed_runtime_bits = ini.flags.assumed_runtime_bits,
+ .assumed_pointer_aligned = ini.flags.assumed_pointer_aligned,
+ .alignment = ini.flags.alignment,
+ .is_reified = ini.key == .reified,
+ },
+ .fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
.padding = std.math.maxInt(u32),
- .decl = ini.decl,
- .namespace = ini.namespace,
+ .decl = undefined, // set by `finish`
+ .namespace = .none, // set by `finish`
.tag_ty = ini.enum_tag_ty,
- .zir_index = ini.zir_index,
+ .zir_index = switch (ini.key) {
+ inline else => |x| x.zir_index,
+ },
});
+ ip.items.appendAssumeCapacity(.{
+ .tag = .type_union,
+ .data = extra_index,
+ });
+
+ switch (ini.key) {
+ .declared => |d| if (d.captures.len != 0) {
+ ip.extra.appendAssumeCapacity(@intCast(d.captures.len));
+ ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures));
+ },
+ .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)),
+ }
+
// field types
if (ini.field_types.len > 0) {
assert(ini.field_types.len == ini.fields_len);
@@ -5627,27 +5925,41 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocat
assert(ini.field_aligns.len == 0);
}
- const adapter: KeyAdapter = .{ .intern_pool = ip };
- const gop = try ip.map.getOrPutAdapted(gpa, Key{
- .union_type = extraUnionType(ip, union_type_extra_index),
- }, adapter);
- if (gop.found_existing) {
- ip.extra.items.len = prev_extra_len;
- return @enumFromInt(gop.index);
+ return .{ .wip = .{
+ .index = @enumFromInt(ip.items.len - 1),
+ .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?,
+ .namespace_extra_index = if (ini.has_namespace)
+ extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").?
+ else
+ null,
+ } };
+}
+
+pub const WipNamespaceType = struct {
+ index: Index,
+ decl_extra_index: u32,
+ namespace_extra_index: ?u32,
+ pub fn finish(wip: WipNamespaceType, ip: *InternPool, decl: DeclIndex, namespace: OptionalNamespaceIndex) Index {
+ ip.extra.items[wip.decl_extra_index] = @intFromEnum(decl);
+ if (wip.namespace_extra_index) |i| {
+ ip.extra.items[i] = @intFromEnum(namespace.unwrap().?);
+ } else {
+ assert(namespace == .none);
+ }
+ return wip.index;
+ }
+ pub fn cancel(wip: WipNamespaceType, ip: *InternPool) void {
+ ip.remove(wip.index);
}
- ip.items.appendAssumeCapacity(.{
- .tag = .type_union,
- .data = union_type_extra_index,
- });
- return @enumFromInt(ip.items.len - 1);
-}
+ pub const Result = union(enum) {
+ wip: WipNamespaceType,
+ existing: Index,
+ };
+};
pub const StructTypeInit = struct {
- decl: DeclIndex,
- namespace: OptionalNamespaceIndex,
layout: std.builtin.Type.ContainerLayout,
- zir_index: TrackedInst.Index.Optional,
fields_len: u32,
known_non_opv: bool,
requires_comptime: RequiresComptime,
@@ -5656,69 +5968,101 @@ pub const StructTypeInit = struct {
any_default_inits: bool,
inits_resolved: bool,
any_aligned_fields: bool,
+ has_namespace: bool,
+ key: union(enum) {
+ declared: struct {
+ zir_index: TrackedInst.Index,
+ captures: []const CaptureValue,
+ },
+ reified: struct {
+ zir_index: TrackedInst.Index,
+ type_hash: u64,
+ },
+ },
};
pub fn getStructType(
ip: *InternPool,
gpa: Allocator,
ini: StructTypeInit,
-) Allocator.Error!Index {
+) Allocator.Error!WipNamespaceType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
- const key: Key = .{
- .struct_type = .{
- // Only the decl matters for hashing and equality purposes.
- .decl = ini.decl.toOptional(),
-
- .extra_index = undefined,
- .namespace = undefined,
- .zir_index = undefined,
- .layout = undefined,
- .field_names = undefined,
- .field_types = undefined,
- .field_inits = undefined,
- .field_aligns = undefined,
- .runtime_order = undefined,
- .comptime_bits = undefined,
- .offsets = undefined,
- .names_map = undefined,
- },
- };
+ const key: Key = .{ .struct_type = switch (ini.key) {
+ .declared => |d| .{ .declared = .{
+ .zir_index = d.zir_index,
+ .captures = .{ .external = d.captures },
+ } },
+ .reified => |r| .{ .reified = .{
+ .zir_index = r.zir_index,
+ .type_hash = r.type_hash,
+ } },
+ } };
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
- if (gop.found_existing) return @enumFromInt(gop.index);
+ if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
errdefer _ = ip.map.pop();
const names_map = try ip.addMap(gpa, ini.fields_len);
errdefer _ = ip.maps.pop();
+ const zir_index = switch (ini.key) {
+ inline else => |x| x.zir_index,
+ };
+
const is_extern = switch (ini.layout) {
.Auto => false,
.Extern => true,
.Packed => {
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len +
+ // TODO: fmt bug
+ // zig fmt: off
+ switch (ini.key) {
+ .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
+ .reified => 2, // type_hash: PackedU64
+ } +
+ // zig fmt: on
ini.fields_len + // types
ini.fields_len + // names
ini.fields_len); // inits
+ const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{
+ .decl = undefined, // set by `finish`
+ .zir_index = zir_index,
+ .fields_len = ini.fields_len,
+ .namespace = .none,
+ .backing_int_ty = .none,
+ .names_map = names_map,
+ .flags = .{
+ .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
+ .field_inits_wip = false,
+ .inits_resolved = ini.inits_resolved,
+ .is_reified = ini.key == .reified,
+ },
+ });
try ip.items.append(gpa, .{
.tag = if (ini.any_default_inits) .type_struct_packed_inits else .type_struct_packed,
- .data = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{
- .decl = ini.decl,
- .zir_index = ini.zir_index,
- .fields_len = ini.fields_len,
- .namespace = ini.namespace,
- .backing_int_ty = .none,
- .names_map = names_map,
- .flags = .{
- .field_inits_wip = false,
- .inits_resolved = ini.inits_resolved,
- },
- }),
+ .data = extra_index,
});
+ switch (ini.key) {
+ .declared => |d| if (d.captures.len != 0) {
+ ip.extra.appendAssumeCapacity(@intCast(d.captures.len));
+ ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures));
+ },
+ .reified => |r| {
+ _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash));
+ },
+ }
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len);
if (ini.any_default_inits) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
}
- return @enumFromInt(ip.items.len - 1);
+ return .{ .wip = .{
+ .index = @enumFromInt(ip.items.len - 1),
+ .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?,
+ .namespace_extra_index = if (ini.has_namespace)
+ extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?
+ else
+ null,
+ } };
},
};
@@ -5727,38 +6071,57 @@ pub fn getStructType(
const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0;
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStruct).Struct.fields.len +
+ // TODO: fmt bug
+ // zig fmt: off
+ switch (ini.key) {
+ .declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
+ .reified => 2, // type_hash: PackedU64
+ } +
+ // zig fmt: on
(ini.fields_len * 5) + // types, names, inits, runtime order, offsets
align_elements_len + comptime_elements_len +
2); // names_map + namespace
+ const extra_index = ip.addExtraAssumeCapacity(Tag.TypeStruct{
+ .decl = undefined, // set by `finish`
+ .zir_index = zir_index,
+ .fields_len = ini.fields_len,
+ .size = std.math.maxInt(u32),
+ .flags = .{
+ .any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
+ .is_extern = is_extern,
+ .known_non_opv = ini.known_non_opv,
+ .requires_comptime = ini.requires_comptime,
+ .is_tuple = ini.is_tuple,
+ .assumed_runtime_bits = false,
+ .assumed_pointer_aligned = false,
+ .has_namespace = ini.has_namespace,
+ .any_comptime_fields = ini.any_comptime_fields,
+ .any_default_inits = ini.any_default_inits,
+ .any_aligned_fields = ini.any_aligned_fields,
+ .alignment = .none,
+ .alignment_wip = false,
+ .field_types_wip = false,
+ .layout_wip = false,
+ .layout_resolved = false,
+ .field_inits_wip = false,
+ .inits_resolved = ini.inits_resolved,
+ .fully_resolved = false,
+ .is_reified = ini.key == .reified,
+ },
+ });
try ip.items.append(gpa, .{
.tag = .type_struct,
- .data = ip.addExtraAssumeCapacity(Tag.TypeStruct{
- .decl = ini.decl,
- .zir_index = ini.zir_index,
- .fields_len = ini.fields_len,
- .size = std.math.maxInt(u32),
- .flags = .{
- .is_extern = is_extern,
- .known_non_opv = ini.known_non_opv,
- .requires_comptime = ini.requires_comptime,
- .is_tuple = ini.is_tuple,
- .assumed_runtime_bits = false,
- .assumed_pointer_aligned = false,
- .has_namespace = ini.namespace != .none,
- .any_comptime_fields = ini.any_comptime_fields,
- .any_default_inits = ini.any_default_inits,
- .any_aligned_fields = ini.any_aligned_fields,
- .alignment = .none,
- .alignment_wip = false,
- .field_types_wip = false,
- .layout_wip = false,
- .layout_resolved = false,
- .field_inits_wip = false,
- .inits_resolved = ini.inits_resolved,
- .fully_resolved = false,
- },
- }),
+ .data = extra_index,
});
+ switch (ini.key) {
+ .declared => |d| if (d.captures.len != 0) {
+ ip.extra.appendAssumeCapacity(@intCast(d.captures.len));
+ ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures));
+ },
+ .reified => |r| {
+ _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash));
+ },
+ }
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
if (!ini.is_tuple) {
ip.extra.appendAssumeCapacity(@intFromEnum(names_map));
@@ -5767,9 +6130,10 @@ pub fn getStructType(
if (ini.any_default_inits) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
}
- if (ini.namespace.unwrap()) |namespace| {
- ip.extra.appendAssumeCapacity(@intFromEnum(namespace));
- }
+ const namespace_extra_index: ?u32 = if (ini.has_namespace) i: {
+ ip.extra.appendAssumeCapacity(undefined); // set by `finish`
+ break :i @intCast(ip.extra.items.len - 1);
+ } else null;
if (ini.any_aligned_fields) {
ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len);
}
@@ -5777,10 +6141,14 @@ pub fn getStructType(
ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len);
}
if (ini.layout == .Auto) {
- ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Key.StructType.RuntimeOrder.unresolved), ini.fields_len);
+ ip.extra.appendNTimesAssumeCapacity(@intFromEnum(LoadedStructType.RuntimeOrder.unresolved), ini.fields_len);
}
ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len);
- return @enumFromInt(ip.items.len - 1);
+ return .{ .wip = .{
+ .index = @enumFromInt(ip.items.len - 1),
+ .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?,
+ .namespace_extra_index = namespace_extra_index,
+ } };
}
pub const AnonStructTypeInit = struct {
@@ -6395,7 +6763,6 @@ fn finishFuncInstance(
.@"addrspace" = fn_owner_decl.@"addrspace",
.analysis = .complete,
.zir_decl_index = fn_owner_decl.zir_decl_index,
- .src_scope = fn_owner_decl.src_scope,
.is_pub = fn_owner_decl.is_pub,
.is_exported = fn_owner_decl.is_exported,
.alive = true,
@@ -6417,257 +6784,386 @@ fn finishFuncInstance(
return func_index;
}
-/// Provides API for completing an enum type after calling `getIncompleteEnum`.
-pub const IncompleteEnumType = struct {
+pub const EnumTypeInit = struct {
+ has_namespace: bool,
+ has_values: bool,
+ tag_mode: LoadedEnumType.TagMode,
+ fields_len: u32,
+ key: union(enum) {
+ declared: struct {
+ zir_index: TrackedInst.Index,
+ captures: []const CaptureValue,
+ },
+ reified: struct {
+ zir_index: TrackedInst.Index,
+ type_hash: u64,
+ },
+ },
+};
+
+pub const WipEnumType = struct {
index: Index,
tag_ty_index: u32,
+ decl_index: u32,
+ namespace_index: ?u32,
names_map: MapIndex,
names_start: u32,
values_map: OptionalMapIndex,
values_start: u32,
- pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void {
- assert(tag_ty == .noreturn_type or ip.isIntegerType(tag_ty));
- ip.extra.items[self.tag_ty_index] = @intFromEnum(tag_ty);
+ pub fn prepare(
+ wip: WipEnumType,
+ ip: *InternPool,
+ decl: DeclIndex,
+ namespace: OptionalNamespaceIndex,
+ ) void {
+ ip.extra.items[wip.decl_index] = @intFromEnum(decl);
+ if (wip.namespace_index) |i| {
+ ip.extra.items[i] = @intFromEnum(namespace.unwrap().?);
+ } else {
+ assert(namespace == .none);
+ }
}
- /// Returns the already-existing field with the same name, if any.
- pub fn addFieldName(
- self: @This(),
- ip: *InternPool,
- name: NullTerminatedString,
- ) ?u32 {
- return ip.addFieldName(self.names_map, self.names_start, name);
+ pub fn setTagTy(wip: WipEnumType, ip: *InternPool, tag_ty: Index) void {
+ assert(ip.isIntegerType(tag_ty));
+ ip.extra.items[wip.tag_ty_index] = @intFromEnum(tag_ty);
}
- /// Returns the already-existing field with the same value, if any.
- /// Make sure the type of the value has the integer tag type of the enum.
- pub fn addFieldValue(
- self: @This(),
- ip: *InternPool,
- value: Index,
- ) ?u32 {
- assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index])));
- const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)];
+ pub const FieldConflict = struct {
+ kind: enum { name, value },
+ prev_field_idx: u32,
+ };
+
+ /// Returns the already-existing field with the same name or value, if any.
+ /// If the enum is automatially numbered, `value` must be `.none`.
+ /// Otherwise, the type of `value` must be the integer tag type of the enum.
+ pub fn nextField(wip: WipEnumType, ip: *InternPool, name: NullTerminatedString, value: Index) ?FieldConflict {
+ if (ip.addFieldName(wip.names_map, wip.names_start, name)) |conflict| {
+ return .{ .kind = .name, .prev_field_idx = conflict };
+ }
+ if (value == .none) {
+ assert(wip.values_map == .none);
+ return null;
+ }
+ assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[wip.tag_ty_index])));
+ const map = &ip.maps.items[@intFromEnum(wip.values_map.unwrap().?)];
const field_index = map.count();
- const indexes = ip.extra.items[self.values_start..][0..field_index];
+ const indexes = ip.extra.items[wip.values_start..][0..field_index];
const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) };
const gop = map.getOrPutAssumeCapacityAdapted(value, adapter);
- if (gop.found_existing) return @intCast(gop.index);
- ip.extra.items[self.values_start + field_index] = @intFromEnum(value);
+ if (gop.found_existing) {
+ return .{ .kind = .value, .prev_field_idx = @intCast(gop.index) };
+ }
+ ip.extra.items[wip.values_start + field_index] = @intFromEnum(value);
return null;
}
-};
-/// This is used to create an enum type in the `InternPool`, with the ability
-/// to update the tag type, field names, and field values later.
-pub fn getIncompleteEnum(
- ip: *InternPool,
- gpa: Allocator,
- enum_type: Key.IncompleteEnumType,
-) Allocator.Error!IncompleteEnumType {
- switch (enum_type.tag_mode) {
- .auto => return getIncompleteEnumAuto(ip, gpa, enum_type),
- .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit),
- .nonexhaustive => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_nonexhaustive),
+ pub fn cancel(wip: WipEnumType, ip: *InternPool) void {
+ ip.remove(wip.index);
}
-}
-
-fn getIncompleteEnumAuto(
- ip: *InternPool,
- gpa: Allocator,
- enum_type: Key.IncompleteEnumType,
-) Allocator.Error!IncompleteEnumType {
- const int_tag_type = if (enum_type.tag_ty != .none)
- enum_type.tag_ty
- else
- try ip.get(gpa, .{ .int_type = .{
- .bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len),
- .signedness = .unsigned,
- } });
-
- // We must keep the map in sync with `items`. The hash and equality functions
- // for enum types only look at the decl field, which is present even in
- // an `IncompleteEnumType`.
- const adapter: KeyAdapter = .{ .intern_pool = ip };
- const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
- assert(!gop.found_existing);
-
- const names_map = try ip.addMap(gpa, enum_type.fields_len);
-
- const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len;
- try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len);
- try ip.items.ensureUnusedCapacity(gpa, 1);
-
- const extra_index = ip.addExtraAssumeCapacity(EnumAuto{
- .decl = enum_type.decl,
- .namespace = enum_type.namespace,
- .int_tag_type = int_tag_type,
- .names_map = names_map,
- .fields_len = enum_type.fields_len,
- .zir_index = enum_type.zir_index,
- });
- ip.items.appendAssumeCapacity(.{
- .tag = .type_enum_auto,
- .data = extra_index,
- });
- ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len);
- return .{
- .index = @enumFromInt(ip.items.len - 1),
- .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
- .names_map = names_map,
- .names_start = extra_index + extra_fields_len,
- .values_map = .none,
- .values_start = undefined,
+ pub const Result = union(enum) {
+ wip: WipEnumType,
+ existing: Index,
};
-}
+};
-fn getIncompleteEnumExplicit(
+pub fn getEnumType(
ip: *InternPool,
gpa: Allocator,
- enum_type: Key.IncompleteEnumType,
- tag: Tag,
-) Allocator.Error!IncompleteEnumType {
- // We must keep the map in sync with `items`. The hash and equality functions
- // for enum types only look at the decl field, which is present even in
- // an `IncompleteEnumType`.
+ ini: EnumTypeInit,
+) Allocator.Error!WipEnumType.Result {
const adapter: KeyAdapter = .{ .intern_pool = ip };
- const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
- assert(!gop.found_existing);
-
- const names_map = try ip.addMap(gpa, enum_type.fields_len);
- const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: {
- const values_map = try ip.addMap(gpa, enum_type.fields_len);
- break :m values_map.toOptional();
- };
-
- const reserved_len = enum_type.fields_len +
- if (enum_type.has_values) enum_type.fields_len else 0;
+ const gop = try ip.map.getOrPutAdapted(gpa, Key{ .enum_type = switch (ini.key) {
+ .declared => |d| .{ .declared = .{
+ .zir_index = d.zir_index,
+ .captures = .{ .external = d.captures },
+ } },
+ .reified => |r| .{ .reified = .{
+ .zir_index = r.zir_index,
+ .type_hash = r.type_hash,
+ } },
+ } }, adapter);
+ if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
+ assert(gop.index == ip.items.len);
+ errdefer _ = ip.map.pop();
- const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len;
- try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len);
try ip.items.ensureUnusedCapacity(gpa, 1);
- const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{
- .decl = enum_type.decl,
- .namespace = enum_type.namespace,
- .int_tag_type = enum_type.tag_ty,
- .fields_len = enum_type.fields_len,
- .names_map = names_map,
- .values_map = values_map,
- .zir_index = enum_type.zir_index,
- });
+ const names_map = try ip.addMap(gpa, ini.fields_len);
+ errdefer _ = ip.maps.pop();
- ip.items.appendAssumeCapacity(.{
- .tag = tag,
- .data = extra_index,
- });
- // This is both fields and values (if present).
- ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len);
- return .{
- .index = @enumFromInt(ip.items.len - 1),
- .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?,
- .names_map = names_map,
- .names_start = extra_index + extra_fields_len,
- .values_map = values_map,
- .values_start = extra_index + extra_fields_len + enum_type.fields_len,
- };
+ switch (ini.tag_mode) {
+ .auto => {
+ assert(!ini.has_values);
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len +
+ // TODO: fmt bug
+ // zig fmt: off
+ switch (ini.key) {
+ .declared => |d| d.captures.len,
+ .reified => 2, // type_hash: PackedU64
+ } +
+ // zig fmt: on
+ ini.fields_len); // field types
+
+ const extra_index = ip.addExtraAssumeCapacity(EnumAuto{
+ .decl = undefined, // set by `prepare`
+ .captures_len = switch (ini.key) {
+ .declared => |d| @intCast(d.captures.len),
+ .reified => std.math.maxInt(u32),
+ },
+ .namespace = .none,
+ .int_tag_type = .none, // set by `prepare`
+ .fields_len = ini.fields_len,
+ .names_map = names_map,
+ .zir_index = switch (ini.key) {
+ inline else => |x| x.zir_index,
+ }.toOptional(),
+ });
+ ip.items.appendAssumeCapacity(.{
+ .tag = .type_enum_auto,
+ .data = extra_index,
+ });
+ switch (ini.key) {
+ .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)),
+ .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)),
+ }
+ const names_start = ip.extra.items.len;
+ ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len);
+ return .{ .wip = .{
+ .index = @enumFromInt(gop.index),
+ .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
+ .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?,
+ .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null,
+ .names_map = names_map,
+ .names_start = @intCast(names_start),
+ .values_map = .none,
+ .values_start = undefined,
+ } };
+ },
+ .explicit, .nonexhaustive => {
+ const values_map: OptionalMapIndex = if (!ini.has_values) .none else m: {
+ const values_map = try ip.addMap(gpa, ini.fields_len);
+ break :m values_map.toOptional();
+ };
+ errdefer if (ini.has_values) {
+ _ = ip.map.pop();
+ };
+
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
+ // TODO: fmt bug
+ // zig fmt: off
+ switch (ini.key) {
+ .declared => |d| d.captures.len,
+ .reified => 2, // type_hash: PackedU64
+ } +
+ // zig fmt: on
+ ini.fields_len + // field types
+ ini.fields_len * @intFromBool(ini.has_values)); // field values
+
+ const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{
+ .decl = undefined, // set by `prepare`
+ .captures_len = switch (ini.key) {
+ .declared => |d| @intCast(d.captures.len),
+ .reified => std.math.maxInt(u32),
+ },
+ .namespace = .none,
+ .int_tag_type = .none, // set by `prepare`
+ .fields_len = ini.fields_len,
+ .names_map = names_map,
+ .values_map = values_map,
+ .zir_index = switch (ini.key) {
+ inline else => |x| x.zir_index,
+ }.toOptional(),
+ });
+ ip.items.appendAssumeCapacity(.{
+ .tag = switch (ini.tag_mode) {
+ .auto => unreachable,
+ .explicit => .type_enum_explicit,
+ .nonexhaustive => .type_enum_nonexhaustive,
+ },
+ .data = extra_index,
+ });
+ switch (ini.key) {
+ .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)),
+ .reified => |r| _ = ip.addExtraAssumeCapacity(PackedU64.init(r.type_hash)),
+ }
+ const names_start = ip.extra.items.len;
+ ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len);
+ const values_start = ip.extra.items.len;
+ if (ini.has_values) {
+ ip.extra.appendNTimesAssumeCapacity(undefined, ini.fields_len);
+ }
+ return .{ .wip = .{
+ .index = @enumFromInt(gop.index),
+ .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
+ .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?,
+ .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null,
+ .names_map = names_map,
+ .names_start = @intCast(names_start),
+ .values_map = values_map,
+ .values_start = @intCast(values_start),
+ } };
+ },
+ }
}
-pub const GetEnumInit = struct {
+const GeneratedTagEnumTypeInit = struct {
decl: DeclIndex,
- namespace: OptionalNamespaceIndex,
+ owner_union_ty: Index,
tag_ty: Index,
names: []const NullTerminatedString,
values: []const Index,
- tag_mode: Key.EnumType.TagMode,
- zir_index: TrackedInst.Index.Optional,
+ tag_mode: LoadedEnumType.TagMode,
};
-pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Error!Index {
- const adapter: KeyAdapter = .{ .intern_pool = ip };
- const gop = try ip.map.getOrPutAdapted(gpa, Key{
- .enum_type = .{
- // Only the decl is used for hashing and equality.
- .decl = ini.decl,
-
- .namespace = undefined,
- .tag_ty = undefined,
- .names = undefined,
- .values = undefined,
- .tag_mode = undefined,
- .names_map = undefined,
- .values_map = undefined,
- .zir_index = undefined,
- },
- }, adapter);
- if (gop.found_existing) return @enumFromInt(gop.index);
- errdefer _ = ip.map.pop();
+/// Creates an enum type which was automatically-generated as the tag type of a
+/// `union` with no explicit tag type. Since this is only called once per union
+/// type, it asserts that no matching type yet exists.
+pub fn getGeneratedTagEnumType(ip: *InternPool, gpa: Allocator, ini: GeneratedTagEnumTypeInit) Allocator.Error!Index {
+ assert(ip.isUnion(ini.owner_union_ty));
+ assert(ip.isIntegerType(ini.tag_ty));
+ for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty);
+
+ try ip.map.ensureUnusedCapacity(gpa, 1);
try ip.items.ensureUnusedCapacity(gpa, 1);
- assert(ini.tag_ty == .noreturn_type or ip.isIntegerType(ini.tag_ty));
- for (ini.values) |value| assert(ip.typeOf(value) == ini.tag_ty);
+ const names_map = try ip.addMap(gpa, ini.names.len);
+ errdefer _ = ip.maps.pop();
+ ip.addStringsToMap(names_map, ini.names);
+
+ const fields_len: u32 = @intCast(ini.names.len);
switch (ini.tag_mode) {
.auto => {
- const names_map = try ip.addMap(gpa, ini.names.len);
- addStringsToMap(ip, names_map, ini.names);
-
- const fields_len: u32 = @intCast(ini.names.len);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len +
- fields_len);
+ 1 + // owner_union
+ fields_len); // field names
ip.items.appendAssumeCapacity(.{
.tag = .type_enum_auto,
.data = ip.addExtraAssumeCapacity(EnumAuto{
.decl = ini.decl,
- .namespace = ini.namespace,
+ .captures_len = 0,
+ .namespace = .none,
.int_tag_type = ini.tag_ty,
+ .fields_len = fields_len,
.names_map = names_map,
+ .zir_index = .none,
+ }),
+ });
+ ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty));
+ ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
+ },
+ .explicit, .nonexhaustive => {
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
+ 1 + // owner_union
+ fields_len + // field names
+ ini.values.len); // field values
+
+ const values_map: OptionalMapIndex = if (ini.values.len != 0) m: {
+ const map = try ip.addMap(gpa, ini.values.len);
+ addIndexesToMap(ip, map, ini.values);
+ break :m map.toOptional();
+ } else .none;
+ // We don't clean up the values map on error!
+ errdefer @compileError("error path leaks values_map");
+
+ ip.items.appendAssumeCapacity(.{
+ .tag = switch (ini.tag_mode) {
+ .explicit => .type_enum_explicit,
+ .nonexhaustive => .type_enum_nonexhaustive,
+ .auto => unreachable,
+ },
+ .data = ip.addExtraAssumeCapacity(EnumExplicit{
+ .decl = ini.decl,
+ .captures_len = 0,
+ .namespace = .none,
+ .int_tag_type = ini.tag_ty,
.fields_len = fields_len,
- .zir_index = ini.zir_index,
+ .names_map = names_map,
+ .values_map = values_map,
+ .zir_index = .none,
}),
});
+ ip.extra.appendAssumeCapacity(@intFromEnum(ini.owner_union_ty));
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
- return @enumFromInt(ip.items.len - 1);
+ ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values));
},
- .explicit => return finishGetEnum(ip, gpa, ini, .type_enum_explicit),
- .nonexhaustive => return finishGetEnum(ip, gpa, ini, .type_enum_nonexhaustive),
}
+ // Same as above
+ errdefer @compileError("error path leaks values_map and extra data");
+
+ // Capacity for this was ensured earlier
+ const adapter: KeyAdapter = .{ .intern_pool = ip };
+ const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ .enum_type = .{
+ .generated_tag = .{ .union_type = ini.owner_union_ty },
+ } }, adapter);
+ assert(!gop.found_existing);
+ assert(gop.index == ip.items.len - 1);
+ return @enumFromInt(gop.index);
}
-pub fn finishGetEnum(
- ip: *InternPool,
- gpa: Allocator,
- ini: GetEnumInit,
- tag: Tag,
-) Allocator.Error!Index {
- const names_map = try ip.addMap(gpa, ini.names.len);
- addStringsToMap(ip, names_map, ini.names);
+pub const OpaqueTypeIni = struct {
+ has_namespace: bool,
+ key: union(enum) {
+ declared: struct {
+ zir_index: TrackedInst.Index,
+ captures: []const CaptureValue,
+ },
+ reified: struct {
+ zir_index: TrackedInst.Index,
+ // No type hash since reifid opaques have no data other than the `@Type` location
+ },
+ },
+};
- const values_map: OptionalMapIndex = if (ini.values.len == 0) .none else m: {
- const values_map = try ip.addMap(gpa, ini.values.len);
- addIndexesToMap(ip, values_map, ini.values);
- break :m values_map.toOptional();
- };
- const fields_len: u32 = @intCast(ini.names.len);
- try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
- fields_len);
+pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, ini: OpaqueTypeIni) Allocator.Error!WipNamespaceType.Result {
+ const adapter: KeyAdapter = .{ .intern_pool = ip };
+ const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) {
+ .declared => |d| .{ .declared = .{
+ .zir_index = d.zir_index,
+ .captures = .{ .external = d.captures },
+ } },
+ .reified => |r| .{ .reified = .{
+ .zir_index = r.zir_index,
+ .type_hash = 0,
+ } },
+ } }, adapter);
+ if (gop.found_existing) return .{ .existing = @enumFromInt(gop.index) };
+ errdefer _ = ip.map.pop();
+ try ip.items.ensureUnusedCapacity(gpa, 1);
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) {
+ .declared => |d| d.captures.len,
+ .reified => 0,
+ });
+ const extra_index = ip.addExtraAssumeCapacity(Tag.TypeOpaque{
+ .decl = undefined, // set by `finish`
+ .namespace = .none,
+ .zir_index = switch (ini.key) {
+ inline else => |x| x.zir_index,
+ },
+ .captures_len = switch (ini.key) {
+ .declared => |d| @intCast(d.captures.len),
+ .reified => std.math.maxInt(u32),
+ },
+ });
ip.items.appendAssumeCapacity(.{
- .tag = tag,
- .data = ip.addExtraAssumeCapacity(EnumExplicit{
- .decl = ini.decl,
- .namespace = ini.namespace,
- .int_tag_type = ini.tag_ty,
- .fields_len = fields_len,
- .names_map = names_map,
- .values_map = values_map,
- .zir_index = ini.zir_index,
- }),
+ .tag = .type_opaque,
+ .data = extra_index,
});
- ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
- ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values));
- return @enumFromInt(ip.items.len - 1);
+ switch (ini.key) {
+ .declared => |d| ip.extra.appendSliceAssumeCapacity(@ptrCast(d.captures)),
+ .reified => {},
+ }
+ return .{ .wip = .{
+ .index = @enumFromInt(gop.index),
+ .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?,
+ .namespace_extra_index = if (ini.has_namespace)
+ extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").?
+ else
+ null,
+ } };
}
pub fn getIfExists(ip: *const InternPool, key: Key) ?Index {
@@ -6716,8 +7212,34 @@ fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex
/// This operation only happens under compile error conditions.
/// Leak the index until the next garbage collection.
-/// TODO: this is a bit problematic to implement, can we get away without it?
-pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead");
+/// Invalidates all references to this index.
+pub fn remove(ip: *InternPool, index: Index) void {
+ if (@intFromEnum(index) < static_keys.len) {
+ // The item being removed replaced a special index via `InternPool.resolveBuiltinType`.
+ // Restore the original item at this index.
+ switch (static_keys[@intFromEnum(index)]) {
+ .simple_type => |s| {
+ ip.items.set(@intFromEnum(index), .{
+ .tag = .simple_type,
+ .data = @intFromEnum(s),
+ });
+ },
+ else => unreachable,
+ }
+ return;
+ }
+
+ if (@intFromEnum(index) == ip.items.len - 1) {
+ // Happy case - we can just drop the item without affecting any other indices.
+ ip.items.len -= 1;
+ _ = ip.map.pop();
+ } else {
+ // We must preserve the item so that indices following it remain valid.
+ // Thus, we will rewrite the tag to `removed`, leaking the item until
+ // next GC but causing `KeyAdapter` to ignore it.
+ ip.items.set(@intFromEnum(index), .{ .tag = .removed, .data = undefined });
+ }
+}
fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void {
const limbs_len = @as(u32, @intCast(limbs.len));
@@ -7077,9 +7599,9 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.func => unreachable,
.int => |int| switch (ip.indexToKey(new_ty)) {
- .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{
+ .enum_type => return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
- .int = try ip.getCoerced(gpa, val, enum_type.tag_ty),
+ .int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty),
} }),
.ptr_type => return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
@@ -7108,7 +7630,8 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
- .enum_type => |enum_type| {
+ .enum_type => {
+ const enum_type = ip.loadEnumType(new_ty);
const index = enum_type.nameIndex(ip, enum_literal).?;
return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
@@ -7249,7 +7772,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
const new_elem_ty = switch (ip.indexToKey(new_ty)) {
inline .array_type, .vector_type => |seq_type| seq_type.child,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i],
- .struct_type => |struct_type| struct_type.field_types.get(ip)[i],
+ .struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
else => unreachable,
};
elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty);
@@ -7513,6 +8036,10 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
if (!gop.found_existing) gop.value_ptr.* = .{};
gop.value_ptr.count += 1;
gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) {
+ // Note that in this case, we have technically leaked some extra data
+ // bytes which we do not account for here.
+ .removed => 0,
+
.type_int_signed => 0,
.type_int_unsigned => 0,
.type_array_small => @sizeOf(Vector),
@@ -7529,12 +8056,31 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len);
},
.type_inferred_error_set => 0,
- .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit),
- .type_enum_auto => @sizeOf(EnumAuto),
- .type_opaque => @sizeOf(Key.OpaqueType),
+ .type_enum_explicit, .type_enum_nonexhaustive => b: {
+ const info = ip.extraData(EnumExplicit, data);
+ var ints = @typeInfo(EnumExplicit).Struct.fields.len + info.captures_len + info.fields_len;
+ if (info.values_map != .none) ints += info.fields_len;
+ break :b @sizeOf(u32) * ints;
+ },
+ .type_enum_auto => b: {
+ const info = ip.extraData(EnumAuto, data);
+ const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len;
+ break :b @sizeOf(u32) * ints;
+ },
+ .type_opaque => b: {
+ const info = ip.extraData(Tag.TypeOpaque, data);
+ const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len;
+ break :b @sizeOf(u32) * ints;
+ },
.type_struct => b: {
- const info = ip.extraData(Tag.TypeStruct, data);
+ if (data == 0) break :b 0;
+ const extra = ip.extraDataTrail(Tag.TypeStruct, data);
+ const info = extra.data;
var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len;
+ if (info.flags.any_captures) {
+ const captures_len = ip.extra.items[extra.end];
+ ints += 1 + captures_len;
+ }
ints += info.fields_len; // types
if (!info.flags.is_tuple) {
ints += 1; // names_map
@@ -7552,20 +8098,29 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
ints += info.fields_len; // offsets
break :b @sizeOf(u32) * ints;
},
- .type_struct_ns => @sizeOf(Module.Namespace),
.type_struct_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
},
.type_struct_packed => b: {
- const info = ip.extraData(Tag.TypeStructPacked, data);
+ const extra = ip.extraDataTrail(Tag.TypeStructPacked, data);
+ const captures_len = if (extra.data.flags.any_captures)
+ ip.extra.items[extra.end]
+ else
+ 0;
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
- info.fields_len + info.fields_len);
+ @intFromBool(extra.data.flags.any_captures) + captures_len +
+ extra.data.fields_len * 2);
},
.type_struct_packed_inits => b: {
- const info = ip.extraData(Tag.TypeStructPacked, data);
+ const extra = ip.extraDataTrail(Tag.TypeStructPacked, data);
+ const captures_len = if (extra.data.flags.any_captures)
+ ip.extra.items[extra.end]
+ else
+ 0;
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
- info.fields_len + info.fields_len + info.fields_len);
+ @intFromBool(extra.data.flags.any_captures) + captures_len +
+ extra.data.fields_len * 3);
},
.type_tuple_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
@@ -7573,16 +8128,20 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
},
.type_union => b: {
- const info = ip.extraData(Tag.TypeUnion, data);
- const enum_info = ip.indexToKey(info.tag_ty).enum_type;
- const fields_len: u32 = @intCast(enum_info.names.len);
+ const extra = ip.extraDataTrail(Tag.TypeUnion, data);
+ const captures_len = if (extra.data.flags.any_captures)
+ ip.extra.items[extra.end]
+ else
+ 0;
const per_field = @sizeOf(u32); // field type
// 1 byte per field for alignment, rounded up to the nearest 4 bytes
- const alignments = if (info.flags.any_aligned_fields)
- ((fields_len + 3) / 4) * 4
+ const alignments = if (extra.data.flags.any_aligned_fields)
+ ((extra.data.fields_len + 3) / 4) * 4
else
0;
- break :b @sizeOf(Tag.TypeUnion) + (fields_len * per_field) + alignments;
+ break :b @sizeOf(Tag.TypeUnion) +
+ 4 * (@intFromBool(extra.data.flags.any_captures) + captures_len) +
+ (extra.data.fields_len * per_field) + alignments;
},
.type_function => b: {
@@ -7698,6 +8257,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
for (tags, datas, 0..) |tag, data, i| {
try w.print("${d} = {s}(", .{ i, @tagName(tag) });
switch (tag) {
+ .removed => {},
+
.simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}),
.simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}),
@@ -7718,7 +8279,6 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.type_enum_auto,
.type_opaque,
.type_struct,
- .type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -8105,6 +8665,8 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
// This optimization on tags is needed so that indexToKey can call
// typeOf without being recursive.
_ => switch (ip.items.items(.tag)[@intFromEnum(index)]) {
+ .removed => unreachable,
+
.type_int_signed,
.type_int_unsigned,
.type_array_big,
@@ -8124,7 +8686,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.simple_type,
.type_opaque,
.type_struct,
- .type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -8218,7 +8779,7 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
- .struct_type => |struct_type| struct_type.field_types.len,
+ .struct_type => ip.loadStructType(ty).field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len,
.vector_type => |vector_type| vector_type.len,
@@ -8228,7 +8789,7 @@ pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
- .struct_type => |struct_type| struct_type.field_types.len,
+ .struct_type => ip.loadStructType(ty).field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len + @intFromBool(array_type.sentinel != .none),
.vector_type => |vector_type| vector_type.len,
@@ -8423,6 +8984,8 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.var_args_param_type => unreachable, // special tag
_ => switch (ip.items.items(.tag)[@intFromEnum(index)]) {
+ .removed => unreachable,
+
.type_int_signed,
.type_int_unsigned,
=> .Int,
@@ -8458,7 +9021,6 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.type_opaque => .Opaque,
.type_struct,
- .type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,