diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-08-24 20:43:43 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2023-09-21 14:48:40 -0700 |
| commit | accd5701c251c2741479fe08e56c8271c444f021 (patch) | |
| tree | 78871f150609687a9210063e90f8f4eb53997c38 /src/type.zig | |
| parent | 0345d7866347c9066b0646f9e46be9a068dcfaa3 (diff) | |
| download | zig-accd5701c251c2741479fe08e56c8271c444f021.tar.gz zig-accd5701c251c2741479fe08e56c8271c444f021.zip | |
compiler: move struct types into InternPool proper
Structs were previously using `SegmentedList` to be given indexes, but
were not actually backed by the InternPool arrays.
After this, the only remaining uses of `SegmentedList` in the compiler
are `Module.Decl` and `Module.Namespace`. Once those last two are
migrated to become backed by InternPool arrays as well, we can introduce
state serialization via writing these arrays to disk all at once.
Unfortunately there are a lot of source code locations that touch the
struct type API, so this commit is still work-in-progress. Once I get it
compiling and passing the test suite, I can provide some interesting
data points such as how it affected the InternPool memory size and
performance comparison against master branch.
I also couldn't resist migrating over a bunch of alignment API over to
use the log2 Alignment type rather than a mismash of u32 and u64 byte
units with 0 meaning something implicitly different and special at every
location. Turns out you can do all the math you need directly on the
log2 representation of alignments.
Diffstat (limited to 'src/type.zig')
| -rw-r--r-- | src/type.zig | 651 |
1 files changed, 261 insertions, 390 deletions
diff --git a/src/type.zig b/src/type.zig index 71548d793a..f6194060e1 100644 --- a/src/type.zig +++ b/src/type.zig @@ -9,6 +9,7 @@ const target_util = @import("target.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); +const Alignment = InternPool.Alignment; /// Both types and values are canonically represented by a single 32-bit integer /// which is an index into an `InternPool` data structure. @@ -196,7 +197,9 @@ pub const Type = struct { info.packed_offset.host_size != 0 or info.flags.vector_index != .none) { - const alignment = info.flags.alignment.toByteUnitsOptional() orelse + const alignment = if (info.flags.alignment != .none) + info.flags.alignment + else info.child.toType().abiAlignment(mod); try writer.print("align({d}", .{alignment}); @@ -315,8 +318,8 @@ pub const Type = struct { .generic_poison => unreachable, }, .struct_type => |struct_type| { - if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { - const decl = mod.declPtr(struct_obj.owner_decl); + if (struct_type.decl.unwrap()) |decl_index| { + const decl = mod.declPtr(decl_index); try decl.renderFullyQualifiedName(mod, writer); } else if (struct_type.namespace.unwrap()) |namespace_index| { const namespace = mod.namespacePtr(namespace_index); @@ -561,24 +564,20 @@ pub const Type = struct { .generic_poison => unreachable, }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { - // This struct has no fields. - return false; - }; - if (struct_obj.status == .field_types_wip) { + if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { // In this case, we guess that hasRuntimeBits() for this type is true, // and then later if our guess was incorrect, we emit a compile error. - struct_obj.assumed_runtime_bits = true; return true; } switch (strat) { .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_obj.haveFieldTypes()), - .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, + .eager => assert(struct_type.haveFieldTypes(ip)), + .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, } - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + for (0..struct_type.field_types.len) |i| { + if (struct_type.comptime_bits.getBit(ip, i)) continue; + const field_ty = struct_type.field_types.get(ip)[i].toType(); + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -728,11 +727,8 @@ pub const Type = struct { => false, }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { - // Struct with no fields has a well-defined layout of no bits. - return true; - }; - return struct_obj.layout != .Auto; + // Struct with no fields have a well-defined layout of no bits. + return struct_type.layout != .Auto or struct_type.field_types.len == 0; }, .union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) { .none, .safety => union_type.flagsPtr(ip).layout != .Auto, @@ -806,22 +802,23 @@ pub const Type = struct { return mod.intern_pool.isNoReturn(ty.toIntern()); } - /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, mod: *Module) u32 { + /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. + pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; } - pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { + pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { - if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| { - return @as(u32, @intCast(a)); - } else if (opt_sema) |sema| { + if (ptr_type.flags.alignment != .none) + return ptr_type.flags.alignment; + + if (opt_sema) |sema| { const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; - } else { - return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } + + return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; }, .opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema), else => unreachable, @@ -836,8 +833,8 @@ pub const Type = struct { }; } - /// Returns 0 for 0-bit types. - pub fn abiAlignment(ty: Type, mod: *Module) u32 { + /// Returns `none` for 0-bit types. + pub fn abiAlignment(ty: Type, mod: *Module) Alignment { return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } @@ -846,12 +843,12 @@ pub const Type = struct { pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x), + .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnitsOptional().?), } } pub const AbiAlignmentAdvanced = union(enum) { - scalar: u32, + scalar: Alignment, val: Value, }; @@ -881,36 +878,36 @@ pub const Type = struct { }; switch (ty.toIntern()) { - .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, + .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .none }, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { - if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; - return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .none }; + return .{ .scalar = intAbiAlignment(int_type.bits, target) }; }, .ptr_type, .anyframe_type => { - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) }; }, .array_type => |array_type| { return array_type.child.toType().abiAlignmentAdvanced(mod, strat); }, .vector_type => |vector_type| { const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); - const bits = @as(u32, @intCast(bits_u64)); + const bits: u32 = @intCast(bits_u64); const bytes = ((bits * vector_type.len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return AbiAlignmentAdvanced{ .scalar = alignment }; + return .{ .scalar = Alignment.fromByteUnits(alignment) }; }, .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, info.payload_type.toType()), // TODO revisit this when we have the concept of the error tag type - .error_set_type, .inferred_error_set_type => return AbiAlignmentAdvanced{ .scalar = 2 }, + .error_set_type, .inferred_error_set_type => return .{ .scalar = .@"2" }, // represents machine code; not a pointer - .func_type => |func_type| return AbiAlignmentAdvanced{ - .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| - @as(u32, @intCast(a)) + .func_type => |func_type| return .{ + .scalar = if (func_type.alignment != .none) + func_type.alignment else target_util.defaultFunctionAlignment(target), }, @@ -926,47 +923,49 @@ pub const Type = struct { .call_modifier, .prefetch_options, .anyopaque, - => return AbiAlignmentAdvanced{ .scalar = 1 }, + => return .{ .scalar = .@"1" }, .usize, .isize, .export_options, .extern_options, - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, - .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, - .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, - .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, - .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, - .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, - .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, - .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, - .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - - .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, - .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, + => return .{ + .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)), + }, + + .c_char => return .{ .scalar = cTypeAlign(target, .char) }, + .c_short => return .{ .scalar = cTypeAlign(target, .short) }, + .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) }, + .c_int => return .{ .scalar = cTypeAlign(target, .int) }, + .c_uint => return .{ .scalar = cTypeAlign(target, .uint) }, + .c_long => return .{ .scalar = cTypeAlign(target, .long) }, + .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) }, + .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) }, + .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) }, + .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) }, + + .f16 => return .{ .scalar = .@"2" }, + .f32 => return .{ .scalar = cTypeAlign(target, .float) }, .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, - else => return AbiAlignmentAdvanced{ .scalar = 8 }, + 64 => return .{ .scalar = cTypeAlign(target, .double) }, + else => return .{ .scalar = .@"8" }, }, .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, else => { const u80_ty: Type = .{ .ip_index = .u80_type }; - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; + return .{ .scalar = abiAlignment(u80_ty, mod) }; }, }, .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => return AbiAlignmentAdvanced{ .scalar = 16 }, + 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, + else => return .{ .scalar = .@"16" }, }, // TODO revisit this when we have the concept of the error tag type .anyerror, .adhoc_inferred_error_set, - => return AbiAlignmentAdvanced{ .scalar = 2 }, + => return .{ .scalar = .@"2" }, .void, .type, @@ -976,89 +975,57 @@ pub const Type = struct { .undefined, .enum_literal, .type_info, - => return AbiAlignmentAdvanced{ .scalar = 0 }, + => return .{ .scalar = .none }, .noreturn => unreachable, .generic_poison => unreachable, }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse - return AbiAlignmentAdvanced{ .scalar = 0 }; - - if (opt_sema) |sema| { - if (struct_obj.status == .field_types_wip) { - // We'll guess "pointer-aligned", if the struct has an - // underaligned pointer field then some allocations - // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; - } - _ = try sema.resolveTypeFields(ty); - } - if (!struct_obj.haveFieldTypes()) switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => return .{ .val = (try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } })).toValue() }, - }; - if (struct_obj.layout == .Packed) { + if (struct_type.layout == .Packed) { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } })).toValue() }, + .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ + .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue(), + }, .eager => {}, } - assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; + assert(struct_type.backingIntType(ip).* != .none); + return .{ .scalar = struct_type.backingIntType(ip).toType().abiAlignment(mod) }; } - const fields = ty.structFields(mod); - var big_align: u32 = 0; - for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } })).toValue() }, - else => |e| return e, - })) continue; + const flags = struct_type.flagsPtr(ip).*; + if (flags.layout_resolved) + return .{ .scalar = flags.alignment }; - const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse - switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |a| a, - .val => switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => return .{ .val = (try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } })).toValue() }, - }, - })); - big_align = @max(big_align, field_align); - - // This logic is duplicated in Module.Struct.Field.alignment. - if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - big_align = @max(big_align, 16); + switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => |sema| { + if (flags.field_types_wip) { + // We'll guess "pointer-aligned", if the struct has an + // underaligned pointer field then some allocations + // might require explicit alignment. + return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) }; } - } + try sema.resolveTypeLayout(ty); + return .{ .scalar = struct_type.flagsPtr(ip).alignment }; + }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, } - return AbiAlignmentAdvanced{ .scalar = big_align }; }, .anon_struct_type => |tuple| { - var big_align: u32 = 0; + var big_align: Alignment = .none; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { if (val != .none) continue; // comptime field if (!(field_ty.toType().hasRuntimeBits(mod))) continue; switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) { - .scalar => |field_align| big_align = @max(big_align, field_align), + .scalar => |field_align| big_align = big_align.max(field_align), .val => switch (strat) { .eager => unreachable, // field type alignment not resolved .sema => unreachable, // passed to abiAlignmentAdvanced above @@ -1069,7 +1036,7 @@ pub const Type = struct { }, } } - return AbiAlignmentAdvanced{ .scalar = big_align }; + return .{ .scalar = big_align }; }, .union_type => |union_type| { @@ -1078,7 +1045,7 @@ pub const Type = struct { // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) }; } _ = try sema.resolveTypeFields(ty); } @@ -1095,13 +1062,13 @@ pub const Type = struct { if (union_obj.hasTag(ip)) { return abiAlignmentAdvanced(union_obj.enum_tag_ty.toType(), mod, strat); } else { - return AbiAlignmentAdvanced{ - .scalar = @intFromBool(union_obj.flagsPtr(ip).layout == .Extern), + return .{ + .scalar = Alignment.fromByteUnits(@intFromBool(union_obj.flagsPtr(ip).layout == .Extern)), }; } } - var max_align: u32 = 0; + var max_align: Alignment = .none; if (union_obj.hasTag(ip)) max_align = union_obj.enum_tag_ty.toType().abiAlignment(mod); for (0..union_obj.field_names.len) |field_index| { const field_ty = union_obj.field_types.get(ip)[field_index].toType(); @@ -1117,8 +1084,9 @@ pub const Type = struct { else => |e| return e, })) continue; - const field_align_bytes: u32 = @intCast(field_align.toByteUnitsOptional() orelse - switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { + const field_align_bytes: Alignment = if (field_align != .none) + field_align + else switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved @@ -1128,13 +1096,15 @@ pub const Type = struct { .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, - }); - max_align = @max(max_align, field_align_bytes); + }; + max_align = max_align.max(field_align_bytes); } - return AbiAlignmentAdvanced{ .scalar = max_align }; + return .{ .scalar = max_align }; + }, + .opaque_type => return .{ .scalar = .@"1" }, + .enum_type => |enum_type| return .{ + .scalar = enum_type.tag_ty.toType().abiAlignment(mod), }, - .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, - .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types .undef, @@ -1179,20 +1149,15 @@ pub const Type = struct { } })).toValue() }, else => |e| return e, })) { - return AbiAlignmentAdvanced{ .scalar = code_align }; + return .{ .scalar = code_align }; } - return AbiAlignmentAdvanced{ .scalar = @max( - code_align, + return .{ .scalar = code_align.max( (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, .lazy => { switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |payload_align| { - return AbiAlignmentAdvanced{ - .scalar = @max(code_align, payload_align), - }; - }, + .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, .val => {}, } return .{ .val = (try mod.intern(.{ .int = .{ @@ -1212,9 +1177,11 @@ pub const Type = struct { const child_type = ty.optionalChild(mod); switch (child_type.zigTypeTag(mod)) { - .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + .Pointer => return .{ + .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)), + }, .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), - .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, + .NoReturn => return .{ .scalar = .none }, else => {}, } @@ -1227,12 +1194,12 @@ pub const Type = struct { } })).toValue() }, else => |e| return e, })) { - return AbiAlignmentAdvanced{ .scalar = 1 }; + return .{ .scalar = .@"1" }; } return child_type.abiAlignmentAdvanced(mod, strat); }, .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, + .scalar => |x| return .{ .scalar = x.max(.@"1") }, .val => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, @@ -1310,8 +1277,7 @@ pub const Type = struct { .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; - const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); - const elem_bits = @as(u32, @intCast(elem_bits_u64)); + const elem_bits = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); const total_bits = elem_bits * vector_type.len; const total_bytes = (total_bits + 7) / 8; const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { @@ -1321,8 +1287,7 @@ pub const Type = struct { .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; - const result = std.mem.alignForward(u32, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; + return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; }, .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), @@ -1360,16 +1325,16 @@ pub const Type = struct { }; var size: u64 = 0; - if (code_align > payload_align) { + if (code_align.compare(.gt, payload_align)) { size += code_size; - size = std.mem.alignForward(u64, size, payload_align); + size = payload_align.forward(size); size += payload_size; - size = std.mem.alignForward(u64, size, code_align); + size = code_align.forward(size); } else { size += payload_size; - size = std.mem.alignForward(u64, size, code_align); + size = code_align.forward(size); size += code_size; - size = std.mem.alignForward(u64, size, payload_align); + size = payload_align.forward(size); } return AbiSizeAdvanced{ .scalar = size }; }, @@ -1435,41 +1400,43 @@ pub const Type = struct { .noreturn => unreachable, .generic_poison => unreachable, }, - .struct_type => |struct_type| switch (ty.containerLayout(mod)) { - .Packed => { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse - return AbiSizeAdvanced{ .scalar = 0 }; - - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } })).toValue() }, - .eager => {}, - } - assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; - }, - else => { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse - return AbiSizeAdvanced{ .scalar = 0 }; - if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } })).toValue() }; + .struct_type => |struct_type| { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => switch (struct_type.layout) { + .Packed => { + if (struct_type.backingIntType(ip).* == .none) return .{ + .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue(), + }; }, - .eager => {}, - } - const field_count = ty.structFieldCount(mod); - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, + .Auto, .Extern => { + if (!struct_type.haveLayout(ip)) return .{ + .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue(), + }; + }, + }, + .eager => {}, + } + switch (struct_type.layout) { + .Packed => { + return .{ + .scalar = struct_type.backingIntType(ip).toType().abiSize(mod), + }; + }, + .Auto, .Extern => { + const field_count = ty.structFieldCount(mod); + if (field_count == 0) { + return .{ .scalar = 0 }; + } + return .{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + } }, .anon_struct_type => |tuple| { switch (strat) { @@ -1565,20 +1532,19 @@ pub const Type = struct { // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. return AbiSizeAdvanced{ - .scalar = child_ty.abiAlignment(mod) + payload_size, + .scalar = child_ty.abiAlignment(mod).toByteUnits(0) + payload_size, }; } fn intAbiSize(bits: u16, target: Target) u64 { - const alignment = intAbiAlignment(bits, target); - return std.mem.alignForward(u64, @as(u16, @intCast((@as(u17, bits) + 7) / 8)), alignment); + return intAbiAlignment(bits, target).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); } - fn intAbiAlignment(bits: u16, target: Target) u32 { - return @min( + fn intAbiAlignment(bits: u16, target: Target) Alignment { + return Alignment.fromByteUnits(@min( std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), target.maxIntAlignment(), - ); + )); } pub fn bitSize(ty: Type, mod: *Module) u64 { @@ -1610,7 +1576,7 @@ pub const Type = struct { const len = array_type.len + @intFromBool(array_type.sentinel != .none); if (len == 0) return 0; const elem_ty = array_type.child.toType(); - const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + const elem_size = @max(elem_ty.abiAlignment(mod).toByteUnits(0), elem_ty.abiSize(mod)); if (elem_size == 0) return 0; const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); return (len - 1) * 8 * elem_size + elem_bit_size; @@ -1675,26 +1641,24 @@ pub const Type = struct { .enum_literal => unreachable, .generic_poison => unreachable, - .atomic_order => unreachable, // missing call to resolveTypeFields - .atomic_rmw_op => unreachable, // missing call to resolveTypeFields - .calling_convention => unreachable, // missing call to resolveTypeFields - .address_space => unreachable, // missing call to resolveTypeFields - .float_mode => unreachable, // missing call to resolveTypeFields - .reduce_op => unreachable, // missing call to resolveTypeFields - .call_modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields + .atomic_order => unreachable, + .atomic_rmw_op => unreachable, + .calling_convention => unreachable, + .address_space => unreachable, + .float_mode => unreachable, + .reduce_op => unreachable, + .call_modifier => unreachable, + .prefetch_options => unreachable, + .export_options => unreachable, + .extern_options => unreachable, + .type_info => unreachable, }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; - if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + if (struct_type.layout == .Packed) { + if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); + return try struct_type.backingIntType(ip).*.toType().bitSizeAdvanced(mod, opt_sema); } - if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); - assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; }, .anon_struct_type => { @@ -1749,13 +1713,7 @@ pub const Type = struct { pub fn layoutIsResolved(ty: Type, mod: *Module) bool { const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { - return struct_obj.haveLayout(); - } else { - return true; - } - }, + .struct_type => |struct_type| struct_type.haveLayout(ip), .union_type => |union_type| union_type.haveLayout(ip), .array_type => |array_type| { if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true; @@ -2020,10 +1978,7 @@ pub const Type = struct { pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; - return struct_obj.layout; - }, + .struct_type => |struct_type| struct_type.layout, .anon_struct_type => .Auto, .union_type => |union_type| union_type.flagsPtr(ip).layout, else => unreachable, @@ -2136,10 +2091,6 @@ pub const Type = struct { return switch (ip.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, - .struct_type => |struct_type| { - const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; - return struct_obj.fields.count(); - }, .anon_struct_type => |tuple| tuple.types.len, else => unreachable, @@ -2214,6 +2165,7 @@ pub const Type = struct { /// Asserts the type is an integer, enum, error set, or vector of one of them. pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { + const ip = &mod.intern_pool; const target = mod.getTarget(); var ty = starting_ty; @@ -2233,13 +2185,9 @@ pub const Type = struct { .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| return int_type, - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.layout == .Packed); - ty = struct_obj.backing_int_ty; - }, + .struct_type => |t| ty = t.backingIntType(ip).*.toType(), .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), .vector_type => |vector_type| ty = vector_type.child.toType(), @@ -2503,33 +2451,28 @@ pub const Type = struct { .generic_poison => unreachable, }, .struct_type => |struct_type| { - if (mod.structPtrUnwrap(struct_type.index)) |s| { - assert(s.haveFieldTypes()); - const field_vals = try mod.gpa.alloc(InternPool.Index, s.fields.count()); - defer mod.gpa.free(field_vals); - for (field_vals, s.fields.values()) |*field_val, field| { - if (field.is_comptime) { - field_val.* = field.default_val; - continue; - } - if (try field.ty.onePossibleValue(mod)) |field_opv| { - field_val.* = try field_opv.intern(field.ty, mod); - } else return null; + assert(struct_type.haveFieldTypes(ip)); + if (struct_type.knownNonOpv(ip)) + return null; + const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len); + defer mod.gpa.free(field_vals); + for (field_vals, 0..) |*field_val, i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) { + field_val.* = struct_type.field_inits.get(ip)[i]; + continue; } - - // In this case the struct has no runtime-known fields and - // therefore has one possible value. - return (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = field_vals }, - } })).toValue(); + const field_ty = struct_type.field_types.get(ip)[i].toType(); + if (try field_ty.onePossibleValue(mod)) |field_opv| { + field_val.* = try field_opv.intern(field_ty, mod); + } else return null; } - // In this case the struct has no fields at all and + // In this case the struct has no runtime-known fields and // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .elems = &.{} }, + .storage = .{ .elems = field_vals }, } })).toValue(); }, @@ -2715,18 +2658,20 @@ pub const Type = struct { => true, }, .struct_type => |struct_type| { + // packed structs cannot be comptime-only because they have a well-defined + // memory layout and every field has a well-defined bit pattern. + if (struct_type.layout == .Packed) + return false; + // A struct with no fields is not comptime-only. - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; - switch (struct_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } + return switch (struct_type.flagsPtr(ip).requires_comptime) { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + .wip, .unknown => false, + .no => false, + .yes => true, + }; }, .anon_struct_type => |tuple| { @@ -2982,37 +2927,19 @@ pub const Type = struct { return enum_type.tagValueIndex(ip, int_tag); } - pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { - switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{}; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields; - }, - else => unreachable, - } - } - pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.keys()[field_index]; - }, + .struct_type => |struct_type| struct_type.field_names.get(ip)[field_index], .anon_struct_type => |anon_struct| anon_struct.names.get(ip)[field_index], else => unreachable, }; } pub fn structFieldCount(ty: Type, mod: *Module) usize { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.count(); - }, + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| struct_type.field_types.len, .anon_struct_type => |anon_struct| anon_struct.types.len, else => unreachable, }; @@ -3022,11 +2949,7 @@ pub const Type = struct { pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.values()[index].ty; - }, + .struct_type => |struct_type| struct_type.field_types.get(ip)[index].toType(), .union_type => |union_type| { const union_obj = ip.loadUnionType(union_type); return union_obj.field_types.get(ip)[index].toType(); @@ -3036,13 +2959,14 @@ pub const Type = struct { }; } - pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { + pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment { const ip = &mod.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); + assert(struct_type.layout != .Packed); + const explicit_align = struct_type.field_aligns.get(ip)[index]; + const field_ty = struct_type.field_types.get(ip)[index].toType(); + return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout); }, .anon_struct_type => |anon_struct| { return anon_struct.types.get(ip)[index].toType().abiAlignment(mod); @@ -3059,8 +2983,7 @@ pub const Type = struct { const ip = &mod.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const val = struct_obj.fields.values()[index].default_val; + const val = struct_type.field_inits.get(ip)[index]; // TODO: avoid using `unreachable` to indicate this. if (val == .none) return Value.@"unreachable"; return val.toValue(); @@ -3079,12 +3002,10 @@ pub const Type = struct { const ip = &mod.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const field = struct_obj.fields.values()[index]; - if (field.is_comptime) { - return field.default_val.toValue(); + if (struct_type.comptime_bits.getBit(ip, index)) { + return struct_type.field_inits.get(ip)[index].toValue(); } else { - return field.ty.onePossibleValue(mod); + return struct_type.field_types.get(ip)[index].toType().onePossibleValue(mod); } }, .anon_struct_type => |tuple| { @@ -3102,30 +3023,25 @@ pub const Type = struct { pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (struct_obj.layout == .Packed) return false; - const field = struct_obj.fields.values()[index]; - return field.is_comptime; - }, + .struct_type => |struct_type| struct_type.fieldIsComptime(ip, index), .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, else => unreachable, }; } pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { - const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type; - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.layout == .Packed); + const ip = &mod.intern_pool; + const struct_type = ip.indexToKey(ty.toIntern()).struct_type; + assert(struct_type.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); var bit_offset: u16 = undefined; var elem_size_bits: u16 = undefined; var running_bits: u16 = 0; - for (struct_obj.fields.values(), 0..) |f, i| { - if (!f.ty.hasRuntimeBits(mod)) continue; + for (struct_type.field_types.get(ip), 0..) |field_ty, i| { + if (!field_ty.toType().hasRuntimeBits(mod)) continue; - const field_bits = @as(u16, @intCast(f.ty.bitSize(mod))); + const field_bits: u16 = @intCast(field_ty.toType().bitSize(mod)); if (i == field_index) { bit_offset = running_bits; elem_size_bits = field_bits; @@ -3141,68 +3057,19 @@ pub const Type = struct { offset: u64, }; - pub const StructOffsetIterator = struct { - field: usize = 0, - offset: u64 = 0, - big_align: u32 = 0, - struct_obj: *Module.Struct, - module: *Module, - - pub fn next(it: *StructOffsetIterator) ?FieldOffset { - const mod = it.module; - var i = it.field; - if (it.struct_obj.fields.count() <= i) - return null; - - if (it.struct_obj.optimized_order) |some| { - i = some[i]; - if (i == Module.Struct.omitted_field) return null; - } - const field = it.struct_obj.fields.values()[i]; - it.field += 1; - - if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) { - return FieldOffset{ .field = i, .offset = it.offset }; - } - - const field_align = field.alignment(mod, it.struct_obj.layout); - it.big_align = @max(it.big_align, field_align); - const field_offset = std.mem.alignForward(u64, it.offset, field_align); - it.offset = field_offset + field.ty.abiSize(mod); - return FieldOffset{ .field = i, .offset = field_offset }; - } - }; - - /// Get an iterator that iterates over all the struct field, returning the field and - /// offset of that field. Asserts that the type is a non-packed struct. - pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator { - const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type; - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveLayout()); - assert(struct_obj.layout != .Packed); - return .{ .struct_obj = struct_obj, .module = mod }; - } - /// Supports structs and unions. pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { const ip = &mod.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveLayout()); - assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(mod); - while (it.next()) |field_offset| { - if (index == field_offset.field) - return field_offset.offset; - } - - return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)); + assert(struct_type.haveLayout(ip)); + assert(struct_type.layout != .Packed); + return struct_type.offsets.get(ip)[index]; }, .anon_struct_type => |tuple| { var offset: u64 = 0; - var big_align: u32 = 0; + var big_align: Alignment = .none; for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) { @@ -3212,12 +3079,12 @@ pub const Type = struct { } const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - offset = std.mem.alignForward(u64, offset, field_align); + big_align = big_align.max(field_align); + offset = field_align.forward(offset); if (i == index) return offset; offset += field_ty.toType().abiSize(mod); } - offset = std.mem.alignForward(u64, offset, @max(big_align, 1)); + offset = big_align.max(.@"1").forward(offset); return offset; }, @@ -3226,9 +3093,9 @@ pub const Type = struct { return 0; const union_obj = ip.loadUnionType(union_type); const layout = mod.getUnionLayout(union_obj); - if (layout.tag_align >= layout.payload_align) { + if (layout.tag_align.compare(.gte, layout.payload_align)) { // {Tag, Payload} - return std.mem.alignForward(u64, layout.tag_size, layout.payload_align); + return layout.payload_align.forward(layout.tag_size); } else { // {Payload, Tag} return 0; @@ -3246,8 +3113,7 @@ pub const Type = struct { pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.srcLoc(mod); + return mod.declPtr(struct_type.decl.unwrap() orelse return null).srcLoc(mod); }, .union_type => |union_type| { return mod.declPtr(union_type.decl).srcLoc(mod); @@ -3264,10 +3130,7 @@ pub const Type = struct { pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; - return struct_obj.owner_decl; - }, + .struct_type => |struct_type| struct_type.decl.unwrap(), .union_type => |union_type| union_type.decl, .opaque_type => |opaque_type| opaque_type.decl, .enum_type => |enum_type| enum_type.decl, @@ -3280,10 +3143,12 @@ pub const Type = struct { } pub fn isTuple(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; - return struct_obj.is_tuple; + if (struct_type.layout == .Packed) return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; }, .anon_struct_type => |anon_struct| anon_struct.names.len == 0, else => false, @@ -3299,10 +3164,12 @@ pub const Type = struct { } pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; - return struct_obj.is_tuple; + if (struct_type.layout == .Packed) return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; }, .anon_struct_type => true, else => false, @@ -3391,3 +3258,7 @@ pub const Type = struct { /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; }; + +fn cTypeAlign(target: Target, c_type: Target.CType) Alignment { + return Alignment.fromByteUnits(target.c_type_alignment(c_type)); +} |
