aboutsummaryrefslogtreecommitdiff
path: root/src/Type.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2024-10-26 23:13:58 +0100
committermlugg <mlugg@mlugg.co.uk>2024-10-31 20:42:53 +0000
commitd11bbde5f9c64ef58405604601d55f88bb5d5f3a (patch)
tree4e05b679cd791e90db68f3f2198294667a6fc57c /src/Type.zig
parenta916bc7fdd3975a9e2ef13c44f814c71ce017193 (diff)
downloadzig-d11bbde5f9c64ef58405604601d55f88bb5d5f3a.tar.gz
zig-d11bbde5f9c64ef58405604601d55f88bb5d5f3a.zip
compiler: remove anonymous struct types, unify all tuples
This commit reworks how anonymous struct literals and tuples work. Previously, an untyped anonymous struct literal (e.g. `const x = .{ .a = 123 }`) was given an "anonymous struct type", which is a special kind of struct which coerces using structural equivalence. This mechanism was a holdover from before we used RLS / result types as the primary mechanism of type inference. This commit changes the language so that the type assigned here is a "normal" struct type. It uses a form of equivalence based on the AST node and the type's structure, much like a reified (`@Type`) type. Additionally, tuples have been simplified. The distinction between "simple" and "complex" tuple types is eliminated. All tuples, even those explicitly declared using `struct { ... }` syntax, use structural equivalence, and do not undergo staged type resolution. Tuples are very restricted: they cannot have non-`auto` layouts, cannot have aligned fields, and cannot have default values with the exception of `comptime` fields. Tuples currently do not have optimized layout, but this can be changed in the future. This change simplifies the language, and fixes some problematic coercions through pointers which led to unintuitive behavior. Resolves: #16865
Diffstat (limited to 'src/Type.zig')
-rw-r--r--src/Type.zig178
1 files changed, 63 insertions, 115 deletions
diff --git a/src/Type.zig b/src/Type.zig
index 1fa526a304..9548a4e5ef 100644
--- a/src/Type.zig
+++ b/src/Type.zig
@@ -320,33 +320,20 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
},
.struct_type => {
const name = ip.loadStructType(ty.toIntern()).name;
- if (name == .empty) {
- try writer.writeAll("@TypeOf(.{})");
- } else {
- try writer.print("{}", .{name.fmt(ip)});
- }
+ try writer.print("{}", .{name.fmt(ip)});
},
- .anon_struct_type => |anon_struct| {
- if (anon_struct.types.len == 0) {
+ .tuple_type => |tuple| {
+ if (tuple.types.len == 0) {
return writer.writeAll("@TypeOf(.{})");
}
- try writer.writeAll("struct{");
- for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| {
- if (i != 0) try writer.writeAll(", ");
- if (val != .none) {
- try writer.writeAll("comptime ");
- }
- if (anon_struct.names.len != 0) {
- try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&zcu.intern_pool)});
- }
-
+ try writer.writeAll("struct {");
+ for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, val, i| {
+ try writer.writeAll(if (i == 0) " " else ", ");
+ if (val != .none) try writer.writeAll("comptime ");
try print(Type.fromInterned(field_ty), writer, pt);
-
- if (val != .none) {
- try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)});
- }
+ if (val != .none) try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)});
}
- try writer.writeAll("}");
+ try writer.writeAll(" }");
},
.union_type => {
@@ -489,8 +476,7 @@ pub fn hasRuntimeBitsInner(
) RuntimeBitsError!bool {
const ip = &zcu.intern_pool;
return switch (ty.toIntern()) {
- // False because it is a comptime-only type.
- .empty_struct_type => false,
+ .empty_tuple_type => false,
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| int_type.bits != 0,
.ptr_type => {
@@ -593,7 +579,7 @@ pub fn hasRuntimeBitsInner(
return false;
}
},
- .anon_struct_type => |tuple| {
+ .tuple_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
if (try Type.fromInterned(field_ty).hasRuntimeBitsInner(
@@ -691,7 +677,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
.error_union_type,
.error_set_type,
.inferred_error_set_type,
- .anon_struct_type,
+ .tuple_type,
.opaque_type,
.anyframe_type,
// These are function bodies, not function pointers.
@@ -966,7 +952,7 @@ pub fn abiAlignmentInner(
const ip = &zcu.intern_pool;
switch (ty.toIntern()) {
- .empty_struct_type => return .{ .scalar = .@"1" },
+ .empty_tuple_type => return .{ .scalar = .@"1" },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) return .{ .scalar = .@"1" };
@@ -1109,7 +1095,7 @@ pub fn abiAlignmentInner(
return .{ .scalar = struct_type.flagsUnordered(ip).alignment };
},
- .anon_struct_type => |tuple| {
+ .tuple_type => |tuple| {
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
@@ -1295,7 +1281,7 @@ pub fn abiSizeInner(
const ip = &zcu.intern_pool;
switch (ty.toIntern()) {
- .empty_struct_type => return .{ .scalar = 0 },
+ .empty_tuple_type => return .{ .scalar = 0 },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
@@ -1498,7 +1484,7 @@ pub fn abiSizeInner(
},
}
},
- .anon_struct_type => |tuple| {
+ .tuple_type => |tuple| {
switch (strat) {
.sema => try ty.resolveLayout(strat.pt(zcu, tid)),
.lazy, .eager => {},
@@ -1831,8 +1817,7 @@ pub fn bitSizeInner(
return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
},
- .anon_struct_type => {
- if (strat == .sema) try ty.resolveFields(strat.pt(zcu, tid));
+ .tuple_type => {
return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
},
@@ -2176,7 +2161,7 @@ pub fn containerLayout(ty: Type, zcu: *const Zcu) std.builtin.Type.ContainerLayo
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).layout,
- .anon_struct_type => .auto,
+ .tuple_type => .auto,
.union_type => ip.loadUnionType(ty.toIntern()).flagsUnordered(ip).layout,
else => unreachable,
};
@@ -2295,7 +2280,7 @@ pub fn arrayLenIncludingSentinel(ty: Type, zcu: *const Zcu) u64 {
pub fn vectorLen(ty: Type, zcu: *const Zcu) u32 {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.vector_type => |vector_type| vector_type.len,
- .anon_struct_type => |tuple| @intCast(tuple.types.len),
+ .tuple_type => |tuple| @intCast(tuple.types.len),
else => unreachable,
};
}
@@ -2305,7 +2290,7 @@ pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.vector_type,
.struct_type,
- .anon_struct_type,
+ .tuple_type,
=> null,
.array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
@@ -2386,7 +2371,7 @@ pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() };
},
- .anon_struct_type => unreachable,
+ .tuple_type => unreachable,
.ptr_type => unreachable,
.anyframe_type => unreachable,
@@ -2556,7 +2541,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
var ty = starting_type;
const ip = &zcu.intern_pool;
while (true) switch (ty.toIntern()) {
- .empty_struct_type => return Value.empty_struct,
+ .empty_tuple_type => return Value.empty_tuple,
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
@@ -2660,7 +2645,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
} }));
},
- .anon_struct_type => |tuple| {
+ .tuple_type => |tuple| {
for (tuple.values.get(ip)) |val| {
if (val == .none) return null;
}
@@ -2783,7 +2768,7 @@ pub fn comptimeOnlyInner(
) SemaError!bool {
const ip = &zcu.intern_pool;
return switch (ty.toIntern()) {
- .empty_struct_type => false,
+ .empty_tuple_type => false,
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => false,
@@ -2891,7 +2876,7 @@ pub fn comptimeOnlyInner(
};
},
- .anon_struct_type => |tuple| {
+ .tuple_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
const have_comptime_val = val != .none;
if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) return true;
@@ -3022,7 +3007,7 @@ pub fn getNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace.toOptional(),
- .struct_type => ip.loadStructType(ty.toIntern()).namespace,
+ .struct_type => ip.loadStructType(ty.toIntern()).namespace.toOptional(),
.union_type => ip.loadUnionType(ty.toIntern()).namespace.toOptional(),
.enum_type => ip.loadEnumType(ty.toIntern()).namespace.toOptional(),
else => .none,
@@ -3181,7 +3166,7 @@ pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.Optio
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index),
- .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index),
+ .tuple_type => .none,
else => unreachable,
};
}
@@ -3190,7 +3175,7 @@ pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
- .anon_struct_type => |anon_struct| anon_struct.types.len,
+ .tuple_type => |tuple| tuple.types.len,
else => unreachable,
};
}
@@ -3204,7 +3189,7 @@ pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
const union_obj = ip.loadUnionType(ty.toIntern());
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
},
- .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]),
+ .tuple_type => |tuple| Type.fromInterned(tuple.types.get(ip)[index]),
else => unreachable,
};
}
@@ -3238,8 +3223,8 @@ pub fn fieldAlignmentInner(
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
return field_ty.structFieldAlignmentInner(explicit_align, struct_type.layout, strat, zcu, tid);
},
- .anon_struct_type => |anon_struct| {
- return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentInner(
+ .tuple_type => |tuple| {
+ return (try Type.fromInterned(tuple.types.get(ip)[index]).abiAlignmentInner(
strat.toLazy(),
zcu,
tid,
@@ -3361,8 +3346,8 @@ pub fn structFieldDefaultValue(ty: Type, index: usize, zcu: *const Zcu) Value {
if (val == .none) return Value.@"unreachable";
return Value.fromInterned(val);
},
- .anon_struct_type => |anon_struct| {
- const val = anon_struct.values.get(ip)[index];
+ .tuple_type => |tuple| {
+ const val = tuple.values.get(ip)[index];
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
return Value.fromInterned(val);
@@ -3384,7 +3369,7 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val
return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt);
}
},
- .anon_struct_type => |tuple| {
+ .tuple_type => |tuple| {
const val = tuple.values.get(ip)[index];
if (val == .none) {
return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(pt);
@@ -3400,7 +3385,7 @@ pub fn structFieldIsComptime(ty: Type, index: usize, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index),
- .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
+ .tuple_type => |tuple| tuple.values.get(ip)[index] != .none,
else => unreachable,
};
}
@@ -3425,7 +3410,7 @@ pub fn structFieldOffset(
return struct_type.offsets.get(ip)[index];
},
- .anon_struct_type => |tuple| {
+ .tuple_type => |tuple| {
var offset: u64 = 0;
var big_align: Alignment = .none;
@@ -3472,7 +3457,6 @@ pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Zcu.LazySrcLoc {
.declared => |d| d.zir_index,
.reified => |r| r.zir_index,
.generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
- .empty_struct => return null,
},
else => return null,
},
@@ -3491,49 +3475,7 @@ pub fn isGenericPoison(ty: Type) bool {
pub fn isTuple(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => {
- const struct_type = ip.loadStructType(ty.toIntern());
- if (struct_type.layout == .@"packed") return false;
- if (struct_type.cau == .none) return false;
- return struct_type.flagsUnordered(ip).is_tuple;
- },
- .anon_struct_type => |anon_struct| anon_struct.names.len == 0,
- else => false,
- };
-}
-
-pub fn isAnonStruct(ty: Type, zcu: *const Zcu) bool {
- if (ty.toIntern() == .empty_struct_type) return true;
- return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
- .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0,
- else => false,
- };
-}
-
-pub fn isTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool {
- const ip = &zcu.intern_pool;
- return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => {
- const struct_type = ip.loadStructType(ty.toIntern());
- if (struct_type.layout == .@"packed") return false;
- if (struct_type.cau == .none) return false;
- return struct_type.flagsUnordered(ip).is_tuple;
- },
- .anon_struct_type => true,
- else => false,
- };
-}
-
-pub fn isSimpleTuple(ty: Type, zcu: *const Zcu) bool {
- return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
- .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0,
- else => false,
- };
-}
-
-pub fn isSimpleTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool {
- return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
- .anon_struct_type => true,
+ .tuple_type => true,
else => false,
};
}
@@ -3564,7 +3506,7 @@ pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(),
+ .struct_type => ip.loadStructType(ty.toIntern()).zir_index,
.union_type => ip.loadUnionType(ty.toIntern()).zir_index,
.enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(),
.opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
@@ -3575,12 +3517,11 @@ pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(),
+ .struct_type => ip.loadStructType(ty.toIntern()).zir_index,
.union_type => ip.loadUnionType(ty.toIntern()).zir_index,
.enum_type => |e| switch (e) {
.declared, .reified => ip.loadEnumType(ty.toIntern()).zir_index.unwrap().?,
.generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
- .empty_struct => unreachable,
},
.opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
else => null,
@@ -3588,13 +3529,16 @@ pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.Trac
}
pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
+ // Note that changes to ZIR instruction tracking only need to update this code
+ // if a newly-tracked instruction can be a type's owner `zir_index`.
+ comptime assert(Zir.inst_tracking_version == 0);
+
const ip = &zcu.intern_pool;
const tracked = switch (ip.indexToKey(ty.toIntern())) {
.struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) {
.declared => |d| d.zir_index,
.reified => |r| r.zir_index,
.generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
- .empty_struct => return null,
},
else => return null,
};
@@ -3603,13 +3547,17 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
assert(file.zir_loaded);
const zir = file.zir;
const inst = zir.instructions.get(@intFromEnum(info.inst));
- assert(inst.tag == .extended);
- return switch (inst.data.extended.opcode) {
- .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line,
- .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line,
- .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line,
- .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line,
- .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line,
+ return switch (inst.tag) {
+ .struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_line,
+ .struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_line,
+ .extended => switch (inst.data.extended.opcode) {
+ .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line,
+ .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line,
+ .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line,
+ .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line,
+ .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line,
+ else => unreachable,
+ },
else => unreachable,
};
}
@@ -3697,8 +3645,8 @@ pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
.@"struct" => switch (ip.indexToKey(ty.toIntern())) {
- .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
- const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
+ .tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| {
+ const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]);
try field_ty.resolveLayout(pt);
},
.struct_type => return ty.resolveStructInner(pt, .layout),
@@ -3796,7 +3744,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.optional_noreturn_type,
.anyerror_void_error_union_type,
.generic_poison_type,
- .empty_struct_type,
+ .empty_tuple_type,
=> {},
.undef => unreachable,
@@ -3813,7 +3761,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.null_value => unreachable,
.bool_true => unreachable,
.bool_false => unreachable,
- .empty_struct => unreachable,
+ .empty_tuple => unreachable,
.generic_poison => unreachable,
else => switch (ty_ip.unwrap(ip).getTag(ip)) {
@@ -3868,8 +3816,8 @@ pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
},
.@"struct" => switch (ip.indexToKey(ty.toIntern())) {
- .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
- const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
+ .tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| {
+ const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]);
try field_ty.resolveFully(pt);
},
.struct_type => return ty.resolveStructInner(pt, .full),
@@ -3903,7 +3851,7 @@ fn resolveStructInner(
const gpa = zcu.gpa;
const struct_obj = zcu.typeToStruct(ty).?;
- const owner = InternPool.AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap() orelse return });
+ const owner = InternPool.AnalUnit.wrap(.{ .cau = struct_obj.cau });
if (zcu.failed_analysis.contains(owner) or zcu.transitive_failed_analysis.contains(owner)) {
return error.AnalysisFail;
@@ -3915,7 +3863,7 @@ fn resolveStructInner(
var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
- const zir = zcu.namespacePtr(struct_obj.namespace.unwrap().?).fileScope(zcu).zir;
+ const zir = zcu.namespacePtr(struct_obj.namespace).fileScope(zcu).zir;
var sema: Sema = .{
.pt = pt,
.gpa = gpa,
@@ -4196,7 +4144,7 @@ pub const single_const_pointer_to_comptime_int: Type = .{
.ip_index = .single_const_pointer_to_comptime_int_type,
};
pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };
-pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type };
+pub const empty_tuple_type: Type = .{ .ip_index = .empty_tuple_type };
pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };