diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-08-24 20:43:43 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2023-09-21 14:48:40 -0700 |
| commit | accd5701c251c2741479fe08e56c8271c444f021 (patch) | |
| tree | 78871f150609687a9210063e90f8f4eb53997c38 /src/Sema.zig | |
| parent | 0345d7866347c9066b0646f9e46be9a068dcfaa3 (diff) | |
| download | zig-accd5701c251c2741479fe08e56c8271c444f021.tar.gz zig-accd5701c251c2741479fe08e56c8271c444f021.zip | |
compiler: move struct types into InternPool proper
Structs were previously using `SegmentedList` to be given indexes, but
were not actually backed by the InternPool arrays.
After this, the only remaining uses of `SegmentedList` in the compiler
are `Module.Decl` and `Module.Namespace`. Once those last two are
migrated to become backed by InternPool arrays as well, we can introduce
state serialization via writing these arrays to disk all at once.
Unfortunately there are a lot of source code locations that touch the
struct type API, so this commit is still work-in-progress. Once I get it
compiling and passing the test suite, I can provide some interesting
data points such as how it affected the InternPool memory size and
performance comparison against master branch.
I also couldn't resist migrating over a bunch of alignment API over to
use the log2 Alignment type rather than a mismash of u32 and u64 byte
units with 0 meaning something implicitly different and special at every
location. Turns out you can do all the math you need directly on the
log2 representation of alignments.
Diffstat (limited to 'src/Sema.zig')
| -rw-r--r-- | src/Sema.zig | 1411 |
1 files changed, 744 insertions, 667 deletions
diff --git a/src/Sema.zig b/src/Sema.zig index db2a189929..6c114b59ba 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2221,8 +2221,8 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{}); errdefer msg.destroy(sema.gpa); - const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg; - const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{ + const struct_type = mod.typeToStruct(container_ty) orelse break :msg msg; + const default_value_src = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{ .index = field_index, .range = .value, }); @@ -2504,23 +2504,22 @@ fn analyzeAsAlign( const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, .{ .needed_comptime_reason = "alignment must be comptime-known", }); - const alignment: u32 = @intCast(alignment_big); // We coerce to u29 in the prev line. - try sema.validateAlign(block, src, alignment); - return Alignment.fromNonzeroByteUnits(alignment); + return sema.validateAlign(block, src, alignment_big); } fn validateAlign( sema: *Sema, block: *Block, src: LazySrcLoc, - alignment: u32, -) !void { + alignment: u64, +) !Alignment { if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{}); if (!std.math.isPowerOfTwo(alignment)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two", .{ alignment, }); } + return Alignment.fromNonzeroByteUnits(alignment); } pub fn resolveAlign( @@ -2801,26 +2800,26 @@ fn coerceResultPtr( } } -pub fn analyzeStructDecl( +pub fn getStructType( sema: *Sema, - new_decl: *Decl, - inst: Zir.Inst.Index, - struct_index: Module.Struct.Index, -) SemaError!void { + decl: Module.Decl.Index, + namespace: Module.Namespace.Index, + zir_index: Zir.Inst.Index, +) !InternPool.Index { const mod = sema.mod; - const struct_obj = mod.structPtr(struct_index); - const extended = sema.code.instructions.items(.data)[inst].extended; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + const extended = sema.code.instructions.items(.data)[zir_index].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); - struct_obj.known_non_opv = small.known_non_opv; - if (small.known_comptime_only) { - struct_obj.requires_comptime = .yes; - } - var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); - extra_index += @intFromBool(small.has_fields_len); + const fields_len = if (small.has_fields_len) blk: { + const fields_len = sema.code.extra[extra_index]; + extra_index += 1; + break :blk fields_len; + } else 0; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; @@ -2837,7 +2836,20 @@ pub fn analyzeStructDecl( } } - _ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(namespace, extra_index, decls_len, mod.declPtr(decl)); + + const ty = try ip.getStructType(gpa, .{ + .decl = decl, + .namespace = namespace.toOptional(), + .zir_index = zir_index, + .layout = small.layout, + .known_non_opv = small.known_non_opv, + .is_tuple = small.is_tuple, + .fields_len = fields_len, + .requires_comptime = if (small.known_comptime_only) .yes else .unknown, + }); + + return ty; } fn zirStructDecl( @@ -2847,7 +2859,7 @@ fn zirStructDecl( inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const gpa = sema.gpa; + const ip = &mod.intern_pool; const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset: i32 = @bitCast(sema.code.extra[extended.operand]); @@ -2874,37 +2886,21 @@ fn zirStructDecl( const new_namespace = mod.namespacePtr(new_namespace_index); errdefer mod.destroyNamespace(new_namespace_index); - const struct_index = try mod.createStruct(.{ - .owner_decl = new_decl_index, - .fields = .{}, - .zir_index = inst, - .layout = small.layout, - .status = .none, - .known_non_opv = undefined, - .is_tuple = small.is_tuple, - .namespace = new_namespace_index, - }); - errdefer mod.destroyStruct(struct_index); - const struct_ty = ty: { - const ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ - .index = struct_index.toOptional(), - .namespace = new_namespace_index.toOptional(), - } }); + const ty = try sema.getStructType(new_decl_index, new_namespace_index, inst); if (sema.builtin_type_target_index != .none) { - mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, ty); + ip.resolveBuiltinType(sema.builtin_type_target_index, ty); break :ty sema.builtin_type_target_index; } break :ty ty; }; // TODO: figure out InternPool removals for incremental compilation - //errdefer mod.intern_pool.remove(struct_ty); + //errdefer ip.remove(struct_ty); new_decl.ty = Type.type; new_decl.val = struct_ty.toValue(); new_namespace.ty = struct_ty.toType(); - try sema.analyzeStructDecl(new_decl, inst, struct_index); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -3196,7 +3192,7 @@ fn zirEnumDecl( extra_index += 1; const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir); - if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| { + if (incomplete_enum.addFieldName(&mod.intern_pool, field_name)) |other_index| { const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { @@ -3227,7 +3223,7 @@ fn zirEnumDecl( }; if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { + if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| { const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, @@ -3249,7 +3245,7 @@ fn zirEnumDecl( else try mod.intValue(int_tag_ty, 0); if (overflow != null) break :overflow true; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { + if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| { const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { @@ -4723,10 +4719,11 @@ fn validateStructInit( } if (root_msg) |msg| { - if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(mod); + if (mod.typeToStruct(struct_ty)) |struct_type| { + const decl = mod.declPtr(struct_type.decl.unwrap().?); + const fqn = try decl.getFullyQualifiedName(mod); try mod.errNoteNonLazy( - struct_obj.srcLoc(mod), + decl.srcLoc(mod), msg, "struct '{}' declared here", .{fqn.fmt(ip)}, @@ -4853,10 +4850,11 @@ fn validateStructInit( } if (root_msg) |msg| { - if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(mod); + if (mod.typeToStruct(struct_ty)) |struct_type| { + const decl = mod.declPtr(struct_type.decl.unwrap().?); + const fqn = try decl.getFullyQualifiedName(mod); try mod.errNoteNonLazy( - struct_obj.srcLoc(mod), + decl.srcLoc(mod), msg, "struct '{}' declared here", .{fqn.fmt(ip)}, @@ -5255,14 +5253,14 @@ fn failWithBadMemberAccess( fn failWithBadStructFieldAccess( sema: *Sema, block: *Block, - struct_obj: *Module.Struct, + struct_type: InternPool.Key.StructType, field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { const mod = sema.mod; const gpa = sema.gpa; - - const fqn = try struct_obj.getFullyQualifiedName(mod); + const decl = mod.declPtr(struct_type.decl.unwrap().?); + const fqn = try decl.getFullyQualifiedName(mod); const msg = msg: { const msg = try sema.errMsg( @@ -5272,7 +5270,7 @@ fn failWithBadStructFieldAccess( .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) }, ); errdefer msg.destroy(gpa); - try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); @@ -12953,9 +12951,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false; - assert(struct_obj.haveFieldTypes()); - break :hf struct_obj.fields.contains(field_name); + break :hf struct_type.nameIndex(ip, field_name) != null; }, .union_type => |union_type| { const union_obj = ip.loadUnionType(union_type); @@ -16907,7 +16903,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // calling_convention: CallingConvention, (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(), + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod).toByteUnits(0))).toIntern(), // is_generic: bool, Value.makeBool(func_ty_info.is_generic).toIntern(), // is_var_args: bool, @@ -17461,7 +17457,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const alignment = switch (layout) { .Auto, .Extern => try sema.unionFieldAlignment(union_obj, @intCast(i)), - .Packed => 0, + .Packed => .none, }; const field_ty = union_obj.field_types.get(ip)[i]; @@ -17471,7 +17467,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, field_ty, // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).toIntern(), + (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ .ty = union_field_ty.toIntern(), @@ -17578,7 +17574,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = ty.containerLayout(mod); var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); @@ -17633,7 +17628,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(), + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod).toByteUnits(0))).toIntern(), }; struct_field_val.* = try mod.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), @@ -17645,14 +17640,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .struct_type => |s| s, else => unreachable, }; - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv; - struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count()); - - for ( - struct_field_vals, - struct_obj.fields.keys(), - struct_obj.fields.values(), - ) |*field_val, name_nts, field| { + struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); + + for (struct_field_vals, 0..) |*field_val, i| { + const name_nts = struct_type.fieldName(ip, i).unwrap().?; + const field_ty = struct_type.field_types.get(ip)[i].toType(); + const field_init = struct_type.fieldInit(ip, i); + const field_is_comptime = struct_type.fieldIsComptime(ip, i); // TODO: write something like getCoercedInts to avoid needing to dupe const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts)); const name_val = v: { @@ -17677,24 +17671,25 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - const opt_default_val = if (field.default_val == .none) - null - else - field.default_val.toValue(); - const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); - const alignment = field.alignment(mod, layout); + const opt_default_val = if (field_init == .none) null else field_init.toValue(); + const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val); + const alignment = mod.structFieldAlignment( + struct_type.field_aligns.get(ip)[i], + field_ty, + struct_type.layout, + ); const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - field.ty.toIntern(), + field_ty.toIntern(), // default_value: ?*const anyopaque, default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(field.is_comptime).toIntern(), + Value.makeBool(field_is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).toIntern(), + (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), @@ -17733,11 +17728,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const backing_integer_val = try mod.intern(.{ .opt = .{ .ty = (try mod.optionalType(.type_type)).toIntern(), - .val = if (layout == .Packed) val: { - const struct_obj = mod.typeToStruct(ty).?; - assert(struct_obj.haveLayout()); - assert(struct_obj.backing_int_ty.isInt(mod)); - break :val struct_obj.backing_int_ty.toIntern(); + .val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: { + assert(packed_struct.backingIntType(ip).toType().isInt(mod)); + break :val packed_struct.backingIntType(ip).*; } else .none, } }); @@ -17754,6 +17747,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; + const layout = ty.containerLayout(mod); + const field_values = [_]InternPool.Index{ // layout: ContainerLayout, (try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(), @@ -18924,9 +18919,8 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const abi_align: u32 = @intCast((try val.getUnsignedIntAdvanced(mod, sema)).?); - try sema.validateAlign(block, align_src, abi_align); - break :blk Alignment.fromByteUnits(abi_align); + const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?; + break :blk try sema.validateAlign(block, align_src, align_bytes); } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { @@ -19291,12 +19285,12 @@ fn finishStructInit( } }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - for (struct_obj.fields.values(), 0..) |field, i| { + for (0..struct_type.field_types.len) |i| { if (field_inits[i] != .none) continue; - if (field.default_val == .none) { - const field_name = struct_obj.fields.keys()[i]; + const field_init = struct_type.field_inits.get(ip)[i]; + if (field_init == .none) { + const field_name = struct_type.field_names.get(ip)[i]; const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { @@ -19305,7 +19299,7 @@ fn finishStructInit( root_msg = try sema.errMsg(block, init_src, template, args); } } else { - field_inits[i] = Air.internedToRef(field.default_val); + field_inits[i] = Air.internedToRef(field_init); } } }, @@ -19313,10 +19307,11 @@ fn finishStructInit( } if (root_msg) |msg| { - if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(mod); + if (mod.typeToStruct(struct_ty)) |struct_type| { + const decl = mod.declPtr(struct_type.decl.unwrap().?); + const fqn = try decl.getFullyQualifiedName(mod); try mod.errNoteNonLazy( - struct_obj.srcLoc(mod), + decl.srcLoc(mod), msg, "struct '{}' declared here", .{fqn.fmt(ip)}, @@ -19848,10 +19843,10 @@ fn fieldType( return Air.internedToRef(anon_struct.types.get(ip)[field_index]); }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const field = struct_obj.fields.get(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return Air.internedToRef(field.ty.toIntern()); + const field_index = struct_type.nameIndex(ip, field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name); + const field_ty = struct_type.field_types.get(ip)[field_index]; + return Air.internedToRef(field_ty); }, else => unreachable, }, @@ -20167,14 +20162,14 @@ fn zirReify( .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return .enum_literal_type, .Int => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; const signedness_val = try union_val.val.toValue().fieldValue( mod, - fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness")).?, ); const bits_val = try union_val.val.toValue().fieldValue( mod, - fields.getIndex(try ip.getOrPutString(gpa, "bits")).?, + struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits")).?, ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); @@ -20183,11 +20178,13 @@ fn zirReify( return Air.internedToRef(ty.toIntern()); }, .Vector => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "len"), ).?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "child"), ).?); @@ -20203,8 +20200,9 @@ fn zirReify( return Air.internedToRef(ty.toIntern()); }, .Float => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const bits_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "bits"), ).?); @@ -20220,29 +20218,37 @@ fn zirReify( return Air.internedToRef(ty.toIntern()); }, .Pointer => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const size_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "size"), ).?); - const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const is_const_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_const"), ).?); - const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const is_volatile_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_volatile"), ).?); - const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "alignment"), ).?); - const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const address_space_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "address_space"), ).?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "child"), ).?); - const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_allowzero"), ).?); - const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "sentinel"), ).?); @@ -20322,14 +20328,17 @@ fn zirReify( return Air.internedToRef(ty.toIntern()); }, .Array => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "len"), ).?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "child"), ).?); - const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "sentinel"), ).?); @@ -20348,8 +20357,9 @@ fn zirReify( return Air.internedToRef(ty.toIntern()); }, .Optional => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "child"), ).?); @@ -20359,11 +20369,13 @@ fn zirReify( return Air.internedToRef(ty.toIntern()); }, .ErrorUnion => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const error_set_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "error_set"), ).?); - const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const payload_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "payload"), ).?); @@ -20386,8 +20398,9 @@ fn zirReify( try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { const elem_val = try payload_val.elemValue(mod, i); - const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type; + const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "name"), ).?); @@ -20405,20 +20418,25 @@ fn zirReify( return Air.internedToRef(ty.toIntern()); }, .Struct => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "layout"), ).?); - const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const backing_integer_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "backing_integer"), ).?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "fields"), ).?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "decls"), ).?); - const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const is_tuple_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_tuple"), ).?); @@ -20436,17 +20454,21 @@ fn zirReify( return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool()); }, .Enum => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "tag_type"), ).?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "fields"), ).?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "decls"), ).?); - const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_exhaustive"), ).?); @@ -20496,11 +20518,13 @@ fn zirReify( for (0..fields_len) |field_i| { const elem_val = try fields_val.elemValue(mod, field_i); - const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type; + const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "name"), ).?); - const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "value"), ).?); @@ -20515,7 +20539,7 @@ fn zirReify( }); } - if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| { + if (incomplete_enum.addFieldName(ip, field_name)) |other_index| { const msg = msg: { const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{ field_name.fmt(ip), @@ -20528,7 +20552,7 @@ fn zirReify( return sema.failWithOwnedErrorMsg(block, msg); } - if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { + if (incomplete_enum.addFieldValue(ip, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); @@ -20545,8 +20569,9 @@ fn zirReify( return decl_val; }, .Opaque => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "decls"), ).?); @@ -20594,17 +20619,21 @@ fn zirReify( return decl_val; }, .Union => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "layout"), ).?); - const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "tag_type"), ).?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "fields"), ).?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "decls"), ).?); @@ -20644,14 +20673,17 @@ fn zirReify( for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); - const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type; + const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "name"), ).?); - const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "type"), ).?); - const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "alignment"), ).?); @@ -20812,23 +20844,29 @@ fn zirReify( return decl_val; }, .Fn => { - const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type; + const calling_convention_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "calling_convention"), ).?); - const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "alignment"), ).?); - const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const is_generic_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_generic"), ).?); - const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const is_var_args_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_var_args"), ).?); - const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const return_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "return_type"), ).?); - const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + const params_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "params"), ).?); @@ -20844,15 +20882,9 @@ fn zirReify( } const alignment = alignment: { - if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { - return sema.fail(block, src, "alignment must fit in 'u32'", .{}); - } - const alignment: u29 = @intCast(alignment_val.toUnsignedInt(mod)); - if (alignment == target_util.defaultFunctionAlignment(target)) { - break :alignment .none; - } else { - break :alignment Alignment.fromByteUnits(alignment); - } + const alignment = try sema.validateAlign(block, src, alignment_val.toUnsignedInt(mod)); + const default = target_util.defaultFunctionAlignment(target); + break :alignment if (alignment == default) .none else alignment; }; const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); @@ -20863,14 +20895,17 @@ fn zirReify( var noalias_bits: u32 = 0; for (param_types, 0..) |*param_type, i| { const elem_val = try params_val.elemValue(mod, i); - const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type; + const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_generic"), ).?); - const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const param_is_noalias_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_noalias"), ).?); - const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const opt_param_type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "type"), ).?); @@ -20931,6 +20966,8 @@ fn reifyStruct( .Auto => {}, }; + const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod))); + // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an // InternPool index. @@ -20946,58 +20983,45 @@ fn reifyStruct( mod.abortAnonDecl(new_decl_index); } - const new_namespace_index = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = undefined, - .file_scope = block.getFileScope(mod), - }); - const new_namespace = mod.namespacePtr(new_namespace_index); - errdefer mod.destroyNamespace(new_namespace_index); - - const struct_index = try mod.createStruct(.{ - .owner_decl = new_decl_index, - .fields = .{}, + const ty = try ip.getStructType(gpa, .{ + .decl = new_decl_index, + .namespace = .none, .zir_index = inst, .layout = layout, - .status = .have_field_types, .known_non_opv = false, + .fields_len = fields_len, + .requires_comptime = .unknown, .is_tuple = is_tuple, - .namespace = new_namespace_index, }); - const struct_obj = mod.structPtr(struct_index); - errdefer mod.destroyStruct(struct_index); - - const struct_ty = try ip.get(gpa, .{ .struct_type = .{ - .index = struct_index.toOptional(), - .namespace = new_namespace_index.toOptional(), - } }); // TODO: figure out InternPool removals for incremental compilation - //errdefer ip.remove(struct_ty); + //errdefer ip.remove(ty); + const struct_type = ip.indexToKey(ty).struct_type; new_decl.ty = Type.type; - new_decl.val = struct_ty.toValue(); - new_namespace.ty = struct_ty.toType(); + new_decl.val = ty.toValue(); // Fields - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); - var i: usize = 0; - while (i < fields_len) : (i += 1) { + for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); - const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type; + const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "name"), ).?); - const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "type"), ).?); - const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const default_value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "default_value"), ).?); - const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const is_comptime_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "is_comptime"), ).?); - const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex( + ip, try ip.getOrPutString(gpa, "alignment"), ).?); @@ -21033,9 +21057,8 @@ fn reifyStruct( ); } } - const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop.found_existing) { - // TODO: better source location + if (struct_type.addFieldName(ip, field_name)) |prev_index| { + _ = prev_index; // TODO: better source location return sema.fail(block, src, "duplicate struct field {}", .{field_name.fmt(ip)}); } @@ -21051,13 +21074,11 @@ fn reifyStruct( return sema.fail(block, src, "comptime field without default initialization value", .{}); } - gop.value_ptr.* = .{ - .ty = field_ty, - .abi_align = Alignment.fromByteUnits(abi_align), - .default_val = default_val, - .is_comptime = is_comptime_val.toBool(), - .offset = undefined, - }; + struct_type.field_types.get(ip)[i] = field_ty.toIntern(); + struct_type.field_aligns.get(ip)[i] = Alignment.fromByteUnits(abi_align); + struct_type.field_inits.get(ip)[i] = default_val; + if (is_comptime_val.toBool()) + struct_type.setFieldComptime(ip, i); if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { @@ -21079,7 +21100,7 @@ fn reifyStruct( }; return sema.failWithOwnedErrorMsg(block, msg); } - if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) { + if (layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(gpa); @@ -21091,7 +21112,7 @@ fn reifyStruct( break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { + } else if (layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(gpa); @@ -21107,13 +21128,12 @@ fn reifyStruct( } if (layout == .Packed) { - struct_obj.status = .layout_wip; - - for (struct_obj.fields.values(), 0..) |field, index| { - sema.resolveTypeLayout(field.ty) catch |err| switch (err) { + for (0..struct_type.field_types.len) |index| { + const field_ty = struct_type.field_types.get(ip)[index].toType(); + sema.resolveTypeLayout(field_ty) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; - try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{}); + try sema.addFieldErrNote(ty.toType(), index, msg, "while checking this field", .{}); return err; }, else => return err, @@ -21121,19 +21141,18 @@ fn reifyStruct( } var fields_bit_sum: u64 = 0; - for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(mod); + for (struct_type.field_types.get(ip)) |field_ty| { + fields_bit_sum += field_ty.toType().bitSize(mod); } - if (backing_int_val.optionalValue(mod)) |payload| { - const backing_int_ty = payload.toType(); + if (backing_int_val.optionalValue(mod)) |backing_int_ty_val| { + const backing_int_ty = backing_int_ty_val.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = backing_int_ty; + struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } else { - struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); + const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); + struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } - - struct_obj.status = .have_layout; } const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); @@ -21439,8 +21458,9 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); } - if (ptr_align > 1) { - const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, ptr_align - 1)).toIntern()); + if (ptr_align.compare(.gt, .@"1")) { + const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1; + const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); @@ -21458,8 +21478,9 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize); try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); } - if (ptr_align > 1) { - const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, ptr_align - 1)).toIntern()); + if (ptr_align.compare(.gt, .@"1")) { + const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1; + const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); @@ -21476,12 +21497,19 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return block.addAggregateInit(dest_ty, new_elems); } -fn ptrFromIntVal(sema: *Sema, block: *Block, operand_src: LazySrcLoc, operand_val: Value, ptr_ty: Type, ptr_align: u32) !Value { +fn ptrFromIntVal( + sema: *Sema, + block: *Block, + operand_src: LazySrcLoc, + operand_val: Value, + ptr_ty: Type, + ptr_align: Alignment, +) !Value { const mod = sema.mod; const addr = operand_val.toUnsignedInt(mod); if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)}); - if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) + if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); return switch (ptr_ty.zigTypeTag(mod)) { @@ -21795,10 +21823,18 @@ fn ptrCastFull( // TODO: vector index? } - const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse src_info.child.toType().abiAlignment(mod); - const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse dest_info.child.toType().abiAlignment(mod); + const src_align = if (src_info.flags.alignment != .none) + src_info.flags.alignment + else + src_info.child.toType().abiAlignment(mod); + + const dest_align = if (dest_info.flags.alignment != .none) + dest_info.flags.alignment + else + dest_info.child.toType().abiAlignment(mod); + if (!flags.align_cast) { - if (dest_align > src_align) { + if (dest_align.compare(.gt, src_align)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{}); errdefer msg.destroy(sema.gpa); @@ -21891,10 +21927,13 @@ fn ptrCastFull( if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) { return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } - if (dest_align > src_align) { + if (dest_align.compare(.gt, src_align)) { if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| { - if (addr % dest_align != 0) { - return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); + if (!dest_align.check(addr)) { + return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ + addr, + dest_align.toByteUnitsOptional().?, + }); } } } @@ -21928,8 +21967,12 @@ fn ptrCastFull( try sema.addSafetyCheck(block, src, ok, .cast_to_null); } - if (block.wantSafety() and dest_align > src_align and try sema.typeHasRuntimeBits(dest_info.child.toType())) { - const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, dest_align - 1)).toIntern()); + if (block.wantSafety() and + dest_align.compare(.gt, src_align) and + try sema.typeHasRuntimeBits(dest_info.child.toType())) + { + const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1; + const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const ptr_int = try block.addUnOp(.int_from_ptr, ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -22285,6 +22328,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 }); const mod = sema.mod; + const ip = &mod.intern_pool; try sema.resolveTypeLayout(ty); switch (ty.zigTypeTag(mod)) { .Struct => {}, @@ -22300,7 +22344,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 } const field_index = if (ty.isTuple(mod)) blk: { - if (mod.intern_pool.stringEqlSlice(field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); @@ -22313,12 +22357,13 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 switch (ty.containerLayout(mod)) { .Packed => { var bit_sum: u64 = 0; - const fields = ty.structFields(mod); - for (fields.values(), 0..) |field, i| { + const struct_type = ip.indexToKey(ty.toIntern()).struct_type; + for (0..struct_type.field_types.len) |i| { if (i == field_index) { return bit_sum; } - bit_sum += field.ty.bitSize(mod); + const field_ty = struct_type.field_types.get(ip)[i].toType(); + bit_sum += field_ty.bitSize(mod); } else unreachable; }, else => return ty.structFieldOffset(field_index, mod) * 8, @@ -23717,8 +23762,8 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{}); } else { ptr_ty_data.flags.alignment = blk: { - if (mod.typeToStruct(parent_ty)) |struct_obj| { - break :blk struct_obj.fields.values()[field_index].abi_align; + if (mod.typeToStruct(parent_ty)) |struct_type| { + break :blk struct_type.field_aligns.get(ip)[field_index]; } else if (mod.typeToUnion(parent_ty)) |union_obj| { break :blk union_obj.fieldAlign(ip, field_index); } else { @@ -24528,13 +24573,9 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment: u32 = @intCast(val.toUnsignedInt(mod)); - try sema.validateAlign(block, align_src, alignment); - if (alignment == target_util.defaultFunctionAlignment(target)) { - break :blk .none; - } else { - break :blk Alignment.fromNonzeroByteUnits(alignment); - } + const alignment = try sema.validateAlign(block, align_src, val.toUnsignedInt(mod)); + const default = target_util.defaultFunctionAlignment(target); + break :blk if (alignment == default) .none else alignment; } else if (extra.data.bits.has_align_ref) blk: { const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; @@ -24546,13 +24587,9 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment: u32 = @intCast(align_tv.val.toUnsignedInt(mod)); - try sema.validateAlign(block, align_src, alignment); - if (alignment == target_util.defaultFunctionAlignment(target)) { - break :blk .none; - } else { - break :blk Alignment.fromNonzeroByteUnits(alignment); - } + const alignment = try sema.validateAlign(block, align_src, align_tv.val.toUnsignedInt(mod)); + const default = target_util.defaultFunctionAlignment(target); + break :blk if (alignment == default) .none else alignment; } else .none; const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: { @@ -25237,16 +25274,17 @@ fn explainWhyTypeIsComptimeInner( .Struct => { if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; - if (mod.typeToStruct(ty)) |struct_obj| { - for (struct_obj.fields.values(), 0..) |field, i| { - const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + if (mod.typeToStruct(ty)) |struct_type| { + for (0..struct_type.field_types.len) |i| { + const field_ty = struct_type.field_types.get(ip)[i].toType(); + const field_src_loc = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{ .index = i, .range = .type, }); - if (try sema.typeRequiresComptime(field.ty)) { + if (try sema.typeRequiresComptime(field_ty)) { try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{}); - try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field.ty, type_set); + try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field_ty, type_set); } } } @@ -26297,13 +26335,12 @@ fn fieldCallBind( switch (concrete_ty.zigTypeTag(mod)) { .Struct => { try sema.resolveTypeFields(concrete_ty); - if (mod.typeToStruct(concrete_ty)) |struct_obj| { - const field_index_usize = struct_obj.fields.getIndex(field_name) orelse + if (mod.typeToStruct(concrete_ty)) |struct_type| { + const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; - const field_index: u32 = @intCast(field_index_usize); - const field = struct_obj.fields.values()[field_index]; + const field_ty = struct_type.field_types.get(ip)[field_index].toType(); - return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); + return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr); } else if (concrete_ty.isTuple(mod)) { if (ip.stringEqlSlice(field_name, "len")) { return .{ .direct = try mod.intRef(Type.usize, concrete_ty.structFieldCount(mod)) }; @@ -26526,13 +26563,14 @@ fn structFieldPtr( initializing: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); try sema.resolveTypeFields(struct_ty); try sema.resolveStructLayout(struct_ty); if (struct_ty.isTuple(mod)) { - if (mod.intern_pool.stringEqlSlice(field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { const len_inst = try mod.intRef(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } @@ -26543,11 +26581,10 @@ fn structFieldPtr( return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } - const struct_obj = mod.typeToStruct(struct_ty).?; + const struct_type = mod.typeToStruct(struct_ty).?; - const field_index_big = struct_obj.fields.getIndex(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index: u32 = @intCast(field_index_big); + const field_index = struct_type.nameIndex(ip, field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name); return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing); } @@ -26563,17 +26600,18 @@ fn structFieldPtrByIndex( initializing: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; if (struct_ty.isAnonStruct(mod)) { return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } - const struct_obj = mod.typeToStruct(struct_ty).?; - const field = struct_obj.fields.values()[field_index]; + const struct_type = mod.typeToStruct(struct_ty).?; + const field_ty = struct_type.field_types.get(ip)[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); var ptr_ty_data: InternPool.Key.PtrType = .{ - .child = field.ty.toIntern(), + .child = field_ty, .flags = .{ .is_const = struct_ptr_ty_info.flags.is_const, .is_volatile = struct_ptr_ty_info.flags.is_volatile, @@ -26583,20 +26621,23 @@ fn structFieldPtrByIndex( const target = mod.getTarget(); - const parent_align = struct_ptr_ty_info.flags.alignment.toByteUnitsOptional() orelse + const parent_align = if (struct_ptr_ty_info.flags.alignment != .none) + struct_ptr_ty_info.flags.alignment + else try sema.typeAbiAlignment(struct_ptr_ty_info.child.toType()); - if (struct_obj.layout == .Packed) { + if (struct_type.layout == .Packed) { comptime assert(Type.packed_struct_layout_version == 2); var running_bits: u16 = 0; - for (struct_obj.fields.values(), 0..) |f, i| { - if (!(try sema.typeHasRuntimeBits(f.ty))) continue; + for (0..struct_type.field_types.len) |i| { + const f_ty = struct_type.field_types.get(ip)[i].toType(); + if (!(try sema.typeHasRuntimeBits(f_ty))) continue; if (i == field_index) { ptr_ty_data.packed_offset.bit_offset = running_bits; } - running_bits += @intCast(f.ty.bitSize(mod)); + running_bits += @intCast(f_ty.bitSize(mod)); } ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8; @@ -26607,7 +26648,7 @@ fn structFieldPtrByIndex( ptr_ty_data.packed_offset.bit_offset += struct_ptr_ty_info.packed_offset.bit_offset; } - ptr_ty_data.flags.alignment = Alignment.fromByteUnits(parent_align); + ptr_ty_data.flags.alignment = parent_align; // If the field happens to be byte-aligned, simplify the pointer type. // The pointee type bit size must match its ABI byte size so that loads and stores @@ -26617,38 +26658,43 @@ fn structFieldPtrByIndex( // targets before adding the necessary complications to this code. This will not // cause miscompilations; it only means the field pointer uses bit masking when it // might not be strictly necessary. - if (parent_align != 0 and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and + if (parent_align != .none and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and target.cpu.arch.endian() == .Little) { const elem_size_bytes = ptr_ty_data.child.toType().abiSize(mod); const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8; - const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align)); + const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?)); assert(new_align != .none); ptr_ty_data.flags.alignment = new_align; ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 }; } } - } else if (struct_obj.layout == .Extern) { + } else if (struct_type.layout == .Extern) { // For extern structs, field aligment might be bigger than type's natural alignment. Eg, in // `extern struct { x: u32, y: u16 }` the second field is aligned as u32. const field_offset = struct_ty.structFieldOffset(field_index, mod); - ptr_ty_data.flags.alignment = Alignment.fromByteUnits( - if (parent_align == 0) 0 else std.math.gcd(field_offset, parent_align), - ); + ptr_ty_data.flags.alignment = if (parent_align == .none) + .none + else + @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment - const field_align = try sema.structFieldAlignment(field, struct_obj.layout); - ptr_ty_data.flags.alignment = Alignment.fromByteUnits(@min(field_align, parent_align)); + const field_align = try sema.structFieldAlignment( + struct_type.field_aligns.get(ip)[field_index], + field_ty.toType(), + struct_type.layout, + ); + ptr_ty_data.flags.alignment = field_align.min(parent_align); } const ptr_field_ty = try mod.ptrType(ptr_ty_data); - if (field.is_comptime) { + if (struct_type.comptime_bits.getBit(ip, field_index)) { const val = try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), - .addr = .{ .comptime_field = field.default_val }, + .addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] }, } }); return Air.internedToRef(val); } @@ -26678,33 +26724,33 @@ fn structFieldVal( struct_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); try sema.resolveTypeFields(struct_ty); - switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); - - const field_index_usize = struct_obj.fields.getIndex(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index: u32 = @intCast(field_index_usize); - const field = struct_obj.fields.values()[field_index]; + if (struct_type.isTuple(ip)) + return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); - if (field.is_comptime) { - return Air.internedToRef(field.default_val); + const field_index = struct_type.nameIndex(ip, field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name); + if (struct_type.comptime_bits.getBit(ip, field_index)) { + return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } + const field_ty = struct_type.field_types.get(ip)[field_index].toType(); + if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef(mod)) return mod.undefRef(field.ty); - if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { + if (struct_val.isUndef(mod)) return mod.undefRef(field_ty); + if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return Air.internedToRef(opv.toIntern()); } return Air.internedToRef((try struct_val.fieldValue(mod, field_index)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); - return block.addStructFieldVal(struct_byval, field_index, field.ty); + return block.addStructFieldVal(struct_byval, field_index, field_ty); }, .anon_struct_type => |anon_struct| { if (anon_struct.names.len == 0) { @@ -26823,9 +26869,12 @@ fn unionFieldPtr( .is_volatile = union_ptr_info.flags.is_volatile, .address_space = union_ptr_info.flags.address_space, .alignment = if (union_obj.getLayout(ip) == .Auto) blk: { - const union_align = union_ptr_info.flags.alignment.toByteUnitsOptional() orelse try sema.typeAbiAlignment(union_ty); + const union_align = if (union_ptr_info.flags.alignment != .none) + union_ptr_info.flags.alignment + else + try sema.typeAbiAlignment(union_ty); const field_align = try sema.unionFieldAlignment(union_obj, field_index); - break :blk InternPool.Alignment.fromByteUnits(@min(union_align, field_align)); + break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, .packed_offset = union_ptr_info.packed_offset, @@ -28266,7 +28315,7 @@ const InMemoryCoercionResult = union(enum) { ptr_qualifiers: Qualifiers, ptr_allowzero: Pair, ptr_bit_range: BitRange, - ptr_alignment: IntPair, + ptr_alignment: AlignPair, double_ptr_to_anyopaque: Pair, slice_to_anyopaque: Pair, @@ -28312,6 +28361,11 @@ const InMemoryCoercionResult = union(enum) { wanted: u64, }; + const AlignPair = struct { + actual: Alignment, + wanted: Alignment, + }; + const Size = struct { actual: std.builtin.Type.Pointer.Size, wanted: std.builtin.Type.Pointer.Size, @@ -29133,13 +29187,17 @@ fn coerceInMemoryAllowedPtrs( if (src_info.flags.alignment != .none or dest_info.flags.alignment != .none or dest_info.child != src_info.child) { - const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse + const src_align = if (src_info.flags.alignment != .none) + src_info.flags.alignment + else src_info.child.toType().abiAlignment(mod); - const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse + const dest_align = if (dest_info.flags.alignment != .none) + dest_info.flags.alignment + else dest_info.child.toType().abiAlignment(mod); - if (dest_align > src_align) { + if (dest_align.compare(.gt, src_align)) { return InMemoryCoercionResult{ .ptr_alignment = .{ .actual = src_align, .wanted = dest_align, @@ -30378,13 +30436,17 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul if (inst_info.flags.alignment == .none and dest_info.flags.alignment == .none) return true; if (len0) return true; - const inst_align = inst_info.flags.alignment.toByteUnitsOptional() orelse + const inst_align = if (inst_info.flags.alignment != .none) + inst_info.flags.alignment + else inst_info.child.toType().abiAlignment(mod); - const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse + const dest_align = if (dest_info.flags.alignment != .none) + dest_info.flags.alignment + else dest_info.child.toType().abiAlignment(mod); - if (dest_align > inst_align) { + if (dest_align.compare(.gt, inst_align)) { in_memory_result.* = .{ .ptr_alignment = .{ .actual = inst_align, .wanted = dest_align, @@ -30598,7 +30660,7 @@ fn coerceAnonStructToUnion( else .{ .count = anon_struct_type.names.len }, .struct_type => |struct_type| name: { - const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys(); + const field_names = struct_type.field_names.get(ip); break :name if (field_names.len == 1) .{ .name = field_names[0] } else @@ -30869,8 +30931,8 @@ fn coerceTupleToStruct( return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); } - const fields = struct_ty.structFields(mod); - const field_vals = try sema.arena.alloc(InternPool.Index, fields.count()); + const struct_type = mod.typeToStruct(struct_ty).?; + const field_vals = try sema.arena.alloc(InternPool.Index, struct_type.field_types.len); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); @@ -30878,10 +30940,7 @@ fn coerceTupleToStruct( var runtime_src: ?LazySrcLoc = null; const field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, - .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| - struct_obj.fields.count() - else - 0, + .struct_type => |s| s.field_types.len, else => unreachable, }; for (0..field_count) |field_index_usize| { @@ -30893,22 +30952,23 @@ fn coerceTupleToStruct( anon_struct_type.names.get(ip)[field_i] else try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + .struct_type => |s| s.field_names.get(ip)[field_i], else => unreachable, }; const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); - const field = fields.values()[field_index]; + const field_ty = struct_type.field_types.get(ip)[field_index].toType(); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); - const coerced = try sema.coerce(block, field.ty, elem_ref, field_src); + const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; - if (field.is_comptime) { + if (struct_type.comptime_bits.getBit(ip, field_index)) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", }); }; - if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) { + const field_init = struct_type.field_inits.get(ip)[field_index].toValue(); + if (!init_val.eql(field_init, field_ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -30928,10 +30988,10 @@ fn coerceTupleToStruct( for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; - const field_name = fields.keys()[i]; - const field = fields.values()[i]; + const field_name = struct_type.field_names.get(ip)[i]; + const field_default_val = struct_type.field_inits.get(ip)[i]; const field_src = inst_src; // TODO better source location - if (field.default_val == .none) { + if (field_default_val == .none) { const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { @@ -30942,9 +31002,9 @@ fn coerceTupleToStruct( continue; } if (runtime_src == null) { - field_vals[i] = field.default_val; + field_vals[i] = field_default_val; } else { - field_ref.* = Air.internedToRef(field.default_val); + field_ref.* = Air.internedToRef(field_default_val); } } @@ -30980,10 +31040,7 @@ fn coerceTupleToTuple( const ip = &mod.intern_pool; const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, - .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| - struct_obj.fields.count() - else - 0, + .struct_type => |struct_type| struct_type.field_types.len, else => unreachable, }; const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count); @@ -30993,10 +31050,7 @@ fn coerceTupleToTuple( const inst_ty = sema.typeOf(inst); const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, - .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| - struct_obj.fields.count() - else - 0, + .struct_type => |struct_type| struct_type.field_types.len, else => unreachable, }; if (src_field_count > dest_field_count) return error.NotCoercible; @@ -31011,7 +31065,7 @@ fn coerceTupleToTuple( anon_struct_type.names.get(ip)[field_i] else try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + .struct_type => |struct_type| struct_type.field_names.get(ip)[field_i], else => unreachable, }; @@ -31019,20 +31073,20 @@ fn coerceTupleToTuple( return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize].toType(), - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty, + .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize], + .struct_type => |struct_type| struct_type.field_types.get(ip)[field_index_usize], else => unreachable, }; const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize], - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val, + .struct_type => |struct_type| struct_type.field_inits.get(ip)[field_index_usize], else => unreachable, }; const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); - const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); + const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src); field_refs[field_index] = coerced; if (default_val != .none) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { @@ -31041,7 +31095,7 @@ fn coerceTupleToTuple( }); }; - if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) { + if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -31063,7 +31117,7 @@ fn coerceTupleToTuple( const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i], - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val, + .struct_type => |struct_type| struct_type.field_inits.get(ip)[i], else => unreachable, }; @@ -33181,12 +33235,17 @@ fn resolvePeerTypesInner( } // Note that the align can be always non-zero; Module.ptrType will canonicalize it - ptr_info.flags.alignment = Alignment.fromByteUnits(@min( - ptr_info.flags.alignment.toByteUnitsOptional() orelse + ptr_info.flags.alignment = InternPool.Alignment.min( + if (ptr_info.flags.alignment != .none) + ptr_info.flags.alignment + else ptr_info.child.toType().abiAlignment(mod), - peer_info.flags.alignment.toByteUnitsOptional() orelse + + if (peer_info.flags.alignment != .none) + peer_info.flags.alignment + else peer_info.child.toType().abiAlignment(mod), - )); + ); if (ptr_info.flags.address_space != peer_info.flags.address_space) { return .{ .conflict = .{ .peer_idx_a = first_idx, @@ -33260,12 +33319,17 @@ fn resolvePeerTypesInner( } }; // Note that the align can be always non-zero; Type.ptr will canonicalize it - ptr_info.flags.alignment = Alignment.fromByteUnits(@min( - ptr_info.flags.alignment.toByteUnitsOptional() orelse + ptr_info.flags.alignment = Alignment.min( + if (ptr_info.flags.alignment != .none) + ptr_info.flags.alignment + else ptr_info.child.toType().abiAlignment(mod), - peer_info.flags.alignment.toByteUnitsOptional() orelse + + if (peer_info.flags.alignment != .none) + peer_info.flags.alignment + else peer_info.child.toType().abiAlignment(mod), - )); + ); if (ptr_info.flags.address_space != peer_info.flags.address_space) { return generic_err; @@ -34191,103 +34255,117 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { } fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; try sema.resolveTypeFields(ty); - if (mod.typeToStruct(ty)) |struct_obj| { - switch (struct_obj.status) { - .none, .have_field_types => {}, - .field_types_wip, .layout_wip => { - const msg = try Module.ErrorMsg.create( - sema.gpa, - struct_obj.srcLoc(mod), - "struct '{}' depends on itself", - .{ty.fmt(mod)}, - ); - return sema.failWithOwnedErrorMsg(null, msg); + + const mod = sema.mod; + const ip = &mod.intern_pool; + const struct_type = mod.typeToStruct(ty) orelse return; + + if (struct_type.haveLayout(ip)) + return; + + if (struct_type.layout == .Packed) { + try semaBackingIntType(mod, struct_type); + return; + } + + if (struct_type.setLayoutWip(ip)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod), + "struct '{}' depends on itself", + .{ty.fmt(mod)}, + ); + return sema.failWithOwnedErrorMsg(null, msg); + } + + if (try sema.typeRequiresComptime(ty)) + return; + + const aligns = try sema.arena.alloc(Alignment, struct_type.field_types.len); + const sizes = try sema.arena.alloc(u64, struct_type.field_types.len); + + for (aligns, sizes, 0..) |*field_align, *field_size, i| { + const field_ty = struct_type.field_types.get(ip)[i].toType(); + field_size.* = sema.typeAbiSize(field_ty) catch |err| switch (err) { + error.AnalysisFail => { + const msg = sema.err orelse return err; + try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); + return err; }, - .have_layout, .fully_resolved_wip, .fully_resolved => return, - } - const prev_status = struct_obj.status; - errdefer if (struct_obj.status == .layout_wip) { - struct_obj.status = prev_status; + else => return err, }; + field_align.* = try sema.structFieldAlignment( + struct_type.fieldAlign(ip, i), + field_ty, + struct_type.layout, + ); + } - struct_obj.status = .layout_wip; - for (struct_obj.fields.values(), 0..) |field, i| { - sema.resolveTypeLayout(field.ty) catch |err| switch (err) { - error.AnalysisFail => { - const msg = sema.err orelse return err; - try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); - return err; - }, - else => return err, - }; - } + if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod), + "struct layout depends on it having runtime bits", + .{}, + ); + return sema.failWithOwnedErrorMsg(null, msg); + } - if (struct_obj.layout == .Packed) { - try semaBackingIntType(mod, struct_obj); + if (struct_type.hasReorderedFields(ip)) { + for (sizes, struct_type.runtime_order.get(ip), 0..) |size, *ro, i| { + ro.* = if (size != 0) @enumFromInt(i) else .omitted; } - struct_obj.status = .have_layout; - _ = try sema.typeRequiresComptime(ty); + const RuntimeOrder = InternPool.Key.StructType.RuntimeOrder; - if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - struct_obj.srcLoc(mod), - "struct layout depends on it having runtime bits", - .{}, - ); - return sema.failWithOwnedErrorMsg(null, msg); - } + const AlignSortContext = struct { + aligns: []const Alignment, - if (struct_obj.layout == .Auto and !struct_obj.is_tuple and - mod.backendSupportsFeature(.field_reordering)) - { - const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count()); - - for (struct_obj.fields.values(), 0..) |field, i| { - optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) - @intCast(i) - else - Module.Struct.omitted_field; + fn lessThan(ctx: @This(), a: RuntimeOrder, b: RuntimeOrder) bool { + if (a == .omitted) return false; + if (b == .omitted) return true; + const a_align = ctx.aligns[@intFromEnum(a)]; + const b_align = ctx.aligns[@intFromEnum(b)]; + return a_align.compare(.gt, b_align); } + }; + mem.sortUnstable(RuntimeOrder, struct_type.runtime_order.get(ip), AlignSortContext{ + .aligns = aligns, + }, AlignSortContext.lessThan); + } - const AlignSortContext = struct { - struct_obj: *Module.Struct, - sema: *Sema, - - fn lessThan(ctx: @This(), a: u32, b: u32) bool { - const m = ctx.sema.mod; - if (a == Module.Struct.omitted_field) return false; - if (b == Module.Struct.omitted_field) return true; - return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) > - ctx.struct_obj.fields.values()[b].ty.abiAlignment(m); - } - }; - mem.sort(u32, optimized_order, AlignSortContext{ - .struct_obj = struct_obj, - .sema = sema, - }, AlignSortContext.lessThan); - struct_obj.optimized_order = optimized_order.ptr; - } + // Calculate size, alignment, and field offsets. + const offsets = struct_type.offsets.get(ip); + var it = struct_type.iterateRuntimeOrder(ip); + var offset: u64 = 0; + var big_align: Alignment = .none; + while (it.next()) |i| { + big_align = big_align.max(aligns[i]); + offsets[i] = @intCast(aligns[i].forward(offset)); + offset = offsets[i] + sizes[i]; } - // otherwise it's a tuple; no need to resolve anything + struct_type.size(ip).* = @intCast(big_align.forward(offset)); + const flags = struct_type.flagsPtr(ip); + flags.alignment = big_align; + flags.layout_resolved = true; } -fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void { +fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) CompileError!void { const gpa = mod.gpa; + const ip = &mod.intern_pool; var fields_bit_sum: u64 = 0; - for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(mod); + for (0..struct_type.field_types.len) |i| { + const field_ty = struct_type.field_types.get(ip)[i].toType(); + fields_bit_sum += field_ty.bitSize(mod); } - const decl_index = struct_obj.owner_decl; + const decl_index = struct_type.decl.unwrap().?; const decl = mod.declPtr(decl_index); - const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; - const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; + const zir = mod.namespacePtr(struct_type.namespace.unwrap().?).file_scope.zir; + const extended = zir.instructions.items(.data)[struct_type.zir_index].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); @@ -34326,7 +34404,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = struct_obj.namespace, + .namespace = struct_type.namespace.unwrap() orelse decl.src_namespace, .wip_capture_scope = try mod.createCaptureScope(decl.src_scope), .instructions = .{}, .inlining = null, @@ -34341,13 +34419,13 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref); } else { const body = zir.extra[extra_index..][0..backing_int_body_len]; - const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index); + const ty_ref = try sema.resolveBody(&block, body, struct_type.zir_index); break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref); } }; try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = backing_int_ty; + struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); for (comptime_mutable_decls.items) |ct_decl_index| { const ct_decl = mod.declPtr(ct_decl_index); _ = try ct_decl.internValue(mod); @@ -34374,7 +34452,8 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = struct_obj.namespace, + .namespace = struct_type.namespace.unwrap() orelse + mod.declPtr(struct_type.decl.unwrap().?).src_namespace, .wip_capture_scope = undefined, .instructions = .{}, .inlining = null, @@ -34382,7 +34461,8 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); + const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); + struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } } @@ -34532,30 +34612,20 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveStructLayout(ty); const mod = sema.mod; - try sema.resolveTypeFields(ty); - const struct_obj = mod.typeToStruct(ty).?; + const ip = &mod.intern_pool; + const struct_type = mod.typeToStruct(ty).?; - switch (struct_obj.status) { - .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, - .fully_resolved_wip, .fully_resolved => return, - } + if (struct_type.setFullyResolved(ip)) return; + errdefer struct_type.clearFullyResolved(ip); - { - // After we have resolve struct layout we have to go over the fields again to - // make sure pointer fields get their child types resolved as well. - // See also similar code for unions. - const prev_status = struct_obj.status; - errdefer struct_obj.status = prev_status; + // After we have resolve struct layout we have to go over the fields again to + // make sure pointer fields get their child types resolved as well. + // See also similar code for unions. - struct_obj.status = .fully_resolved_wip; - for (struct_obj.fields.values()) |field| { - try sema.resolveTypeFully(field.ty); - } - struct_obj.status = .fully_resolved; + for (0..struct_type.field_types.len) |i| { + const field_ty = struct_type.field_types.get(ip)[i].toType(); + try sema.resolveTypeFully(field_ty); } - - // And let's not forget comptime-only status. - _ = try sema.typeRequiresComptime(ty); } fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { @@ -34591,8 +34661,10 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void { const mod = sema.mod; + const ip = &mod.intern_pool; + const ty_ip = ty.toIntern(); - switch (ty.toIntern()) { + switch (ty_ip) { .var_args_param_type => unreachable, .none => unreachable, @@ -34673,20 +34745,15 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void { .empty_struct => unreachable, .generic_poison => unreachable, - else => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) { + else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { .type_struct, .type_struct_ns, - .type_union, - .simple_type, - => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return; - try sema.resolveTypeFieldsStruct(ty, struct_obj); - }, - .union_type => |union_type| try sema.resolveTypeFieldsUnion(ty, union_type), - .simple_type => |simple_type| try sema.resolveSimpleType(simple_type), - else => unreachable, - }, + .type_struct_packed, + .type_struct_packed_inits, + => try sema.resolveTypeFieldsStruct(ty_ip, ip.indexToKey(ty_ip).struct_type), + + .type_union => try sema.resolveTypeFieldsUnion(ty_ip.toType(), ip.indexToKey(ty_ip).union_type), + .simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type), else => {}, }, } @@ -34716,43 +34783,44 @@ fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileErr fn resolveTypeFieldsStruct( sema: *Sema, - ty: Type, - struct_obj: *Module.Struct, + ty: InternPool.Index, + struct_type: InternPool.Key.StructType, ) CompileError!void { - switch (sema.mod.declPtr(struct_obj.owner_decl).analysis) { + const mod = sema.mod; + const ip = &mod.intern_pool; + // If there is no owner decl it means the struct has no fields. + const owner_decl = struct_type.decl.unwrap() orelse return; + + switch (mod.declPtr(owner_decl).analysis) { .file_failure, .dependency_failure, .sema_failure, .sema_failure_retryable, => { sema.owner_decl.analysis = .dependency_failure; - sema.owner_decl.generation = sema.mod.generation; + sema.owner_decl.generation = mod.generation; return error.AnalysisFail; }, else => {}, } - switch (struct_obj.status) { - .none => {}, - .field_types_wip => { - const msg = try Module.ErrorMsg.create( - sema.gpa, - struct_obj.srcLoc(sema.mod), - "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - return sema.failWithOwnedErrorMsg(null, msg); - }, - .have_field_types, - .have_layout, - .layout_wip, - .fully_resolved_wip, - .fully_resolved, - => return, + + if (struct_type.haveFieldTypes(ip)) + return; + + if (struct_type.flagsPtr(ip).field_types_wip) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + mod.declPtr(owner_decl).srcLoc(mod), + "struct '{}' depends on itself", + .{ty.toType().fmt(mod)}, + ); + return sema.failWithOwnedErrorMsg(null, msg); } - struct_obj.status = .field_types_wip; - errdefer struct_obj.status = .none; - try semaStructFields(sema.mod, struct_obj); + struct_type.flagsPtr(ip).field_types_wip = true; + errdefer struct_type.flagsPtr(ip).field_types_wip = false; + + try semaStructFields(mod, sema.arena, struct_type); } fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key.UnionType) CompileError!void { @@ -34936,12 +35004,19 @@ fn resolveInferredErrorSetTy( } } -fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void { +fn semaStructFields( + mod: *Module, + arena: Allocator, + struct_type: InternPool.Key.StructType, +) CompileError!void { const gpa = mod.gpa; const ip = &mod.intern_pool; - const decl_index = struct_obj.owner_decl; - const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; - const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; + const decl_index = struct_type.decl.unwrap() orelse return; + const decl = mod.declPtr(decl_index); + const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace; + const zir = mod.namespacePtr(namespace_index).file_scope.zir; + const zir_index = struct_type.zir_index; + const extended = zir.instructions.items(.data)[zir_index].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); var extra_index: usize = extended.operand; @@ -34977,18 +35052,16 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void while (decls_it.next()) |_| {} extra_index = decls_it.extra_index; - if (fields_len == 0) { - if (struct_obj.layout == .Packed) { - try semaBackingIntType(mod, struct_obj); - } - struct_obj.status = .have_layout; - return; - } - - const decl = mod.declPtr(decl_index); - - var analysis_arena = std.heap.ArenaAllocator.init(gpa); - defer analysis_arena.deinit(); + if (fields_len == 0) switch (struct_type.layout) { + .Packed => { + try semaBackingIntType(mod, struct_type); + return; + }, + .Auto, .Extern => { + struct_type.flagsPtr(ip).layout_resolved = true; + return; + }, + }; var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); @@ -34996,7 +35069,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = analysis_arena.allocator(), + .arena = arena, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -35013,7 +35086,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = struct_obj.namespace, + .namespace = namespace_index, .wip_capture_scope = try mod.createCaptureScope(decl.src_scope), .instructions = .{}, .inlining = null, @@ -35021,9 +35094,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; defer assert(block_scope.instructions.items.len == 0); - struct_obj.fields = .{}; - try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); - const Field = struct { type_body_len: u32 = 0, align_body_len: u32 = 0, @@ -35031,7 +35101,9 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void type_ref: Zir.Inst.Ref = .none, }; const fields = try sema.arena.alloc(Field, fields_len); + var any_inits = false; + var any_aligned = false; { const bits_per_field = 4; @@ -35056,9 +35128,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - var field_name_zir: ?[:0]const u8 = null; + if (is_comptime) struct_type.setFieldComptime(ip, field_i); + + var opt_field_name_zir: ?[:0]const u8 = null; if (!small.is_tuple) { - field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); + opt_field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); extra_index += 1; } extra_index += 1; // doc_comment @@ -35073,37 +35147,27 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s| - s - else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i})); - - const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop.found_existing) { - const msg = msg: { - const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)}); - errdefer msg.destroy(gpa); + if (opt_field_name_zir) |field_name_zir| { + const field_name = try ip.getOrPutString(gpa, field_name_zir); + if (struct_type.addFieldName(ip, field_name)) |other_index| { + const msg = msg: { + const field_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i }).lazy; + const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)}); + errdefer msg.destroy(gpa); - const prev_field_index = struct_obj.fields.getIndex(field_name).?; - const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); - try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); - try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(&block_scope, msg); + const prev_field_src = mod.fieldSrcLoc(decl_index, .{ .index = other_index }); + try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); + try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(&block_scope, msg); + } } - gop.value_ptr.* = .{ - .ty = Type.noreturn, - .abi_align = .none, - .default_val = .none, - .is_comptime = is_comptime, - .offset = undefined, - }; if (has_align) { fields[field_i].align_body_len = zir.extra[extra_index]; extra_index += 1; + any_aligned = true; } if (has_init) { fields[field_i].init_body_len = zir.extra[extra_index]; @@ -35122,7 +35186,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (zir_field.type_ref != .none) { break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + const ty_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i, .range = .type, }).lazy; @@ -35135,10 +35199,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void assert(zir_field.type_body_len != 0); const body = zir.extra[extra_index..][0..zir_field.type_body_len]; extra_index += body.len; - const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); + const ty_ref = try sema.resolveBody(&block_scope, body, zir_index); break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + const ty_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i, .range = .type, }).lazy; @@ -35152,12 +35216,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void return error.GenericPoison; } - const field = &struct_obj.fields.values()[field_i]; - field.ty = field_ty; + struct_type.field_types.get(ip)[field_i] = field_ty.toIntern(); if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + const ty_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i, .range = .type, }).lazy; @@ -35171,7 +35234,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { - const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + const ty_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i, .range = .type, }).lazy; @@ -35183,45 +35246,49 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } - if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) { - const msg = msg: { - const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ - .index = field_i, - .range = .type, - }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + switch (struct_type.layout) { + .Extern => if (!try sema.validateExternType(field_ty, .struct_field)) { + const msg = msg: { + const ty_src = mod.fieldSrcLoc(decl_index, .{ + .index = field_i, + .range = .type, + }); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + errdefer msg.destroy(sema.gpa); - try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field); + try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field); - try sema.addDeclaredHereNote(msg, field.ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(&block_scope, msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { - const msg = msg: { - const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ - .index = field_i, - .range = .type, - }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + try sema.addDeclaredHereNote(msg, field_ty); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(&block_scope, msg); + }, + .Packed => if (!validatePackedType(field_ty, mod)) { + const msg = msg: { + const ty_src = mod.fieldSrcLoc(decl_index, .{ + .index = field_i, + .range = .type, + }); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + errdefer msg.destroy(sema.gpa); - try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty); + try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); - try sema.addDeclaredHereNote(msg, field.ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(&block_scope, msg); + try sema.addDeclaredHereNote(msg, field_ty); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(&block_scope, msg); + }, + else => {}, } if (zir_field.align_body_len > 0) { const body = zir.extra[extra_index..][0..zir_field.align_body_len]; extra_index += body.len; - const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); - field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { + const align_ref = try sema.resolveBody(&block_scope, body, zir_index); + const field_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + const align_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i, .range = .alignment, }).lazy; @@ -35230,36 +35297,38 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }, else => |e| return e, }; + struct_type.field_aligns.get(ip)[field_i] = field_align; } extra_index += zir_field.init_body_len; } - struct_obj.status = .have_field_types; + // TODO: there seems to be no mechanism to catch when an init depends on + // another init that hasn't been resolved. if (any_inits) { extra_index = bodies_index; for (fields, 0..) |zir_field, field_i| { + const field_ty = struct_type.field_types.get(ip)[field_i].toType(); extra_index += zir_field.type_body_len; extra_index += zir_field.align_body_len; if (zir_field.init_body_len > 0) { const body = zir.extra[extra_index..][0..zir_field.init_body_len]; extra_index += body.len; - const init = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); - const field = &struct_obj.fields.values()[field_i]; - const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) { + const init = try sema.resolveBody(&block_scope, body, zir_index); + const coerced = sema.coerce(&block_scope, field_ty, init, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + const init_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i, .range = .value, }).lazy; - _ = try sema.coerce(&block_scope, field.ty, init, init_src); + _ = try sema.coerce(&block_scope, field_ty, init, init_src); unreachable; }, else => |e| return e, }; const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { - const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ + const init_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i, .range = .value, }).lazy; @@ -35267,7 +35336,8 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .needed_comptime_reason = "struct field default value must be comptime-known", }); }; - field.default_val = try default_val.intern(field.ty, mod); + const field_init = try default_val.intern(field_ty, mod); + struct_type.field_inits.get(ip)[field_i] = field_init; } } } @@ -35275,8 +35345,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const ct_decl = mod.declPtr(ct_decl_index); _ = try ct_decl.internValue(mod); } - - struct_obj.have_field_inits = true; } fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.UnionType) CompileError!void { @@ -36060,6 +36128,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .type_struct, .type_struct_ns, .type_struct_anon, + .type_struct_packed, + .type_struct_packed_inits, .type_tuple_anon, .type_union, => switch (ip.indexToKey(ty.toIntern())) { @@ -36081,41 +36151,46 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .struct_type => |struct_type| { try sema.resolveTypeFields(ty); - if (mod.structPtrUnwrap(struct_type.index)) |s| { - const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count()); - for (field_vals, s.fields.values(), 0..) |*field_val, field, i| { - if (field.is_comptime) { - field_val.* = field.default_val; - continue; - } - if (field.ty.eql(ty, mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - s.srcLoc(mod), - "struct '{}' depends on itself", - .{ty.fmt(mod)}, - ); - try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } - if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| { - field_val.* = try field_opv.intern(field.ty, mod); - } else return null; - } - // In this case the struct has no runtime-known fields and + if (struct_type.field_types.len == 0) { + // In this case the struct has no fields at all and // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .elems = field_vals }, + .storage = .{ .elems = &.{} }, } })).toValue(); } - // In this case the struct has no fields at all and + const field_vals = try sema.arena.alloc( + InternPool.Index, + struct_type.field_types.len, + ); + for (field_vals, 0..) |*field_val, i| { + if (struct_type.comptime_bits.getBit(ip, i)) { + field_val.* = struct_type.field_inits.get(ip)[i]; + continue; + } + const field_ty = struct_type.field_types.get(ip)[i].toType(); + if (field_ty.eql(ty, mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod), + "struct '{}' depends on itself", + .{ty.fmt(mod)}, + ); + try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(null, msg); + } + if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| { + field_val.* = try field_opv.intern(field_ty, mod); + } else return null; + } + + // In this case the struct has no runtime-known fields and // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .elems = &.{} }, + .storage = .{ .elems = field_vals }, } })).toValue(); }, @@ -36574,25 +36649,32 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { => true, }, .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; - switch (struct_obj.requires_comptime) { + if (struct_type.layout == .Packed) { + // packed structs cannot be comptime-only because they have a well-defined + // memory layout and every field has a well-defined bit pattern. + return false; + } + switch (struct_type.flagsPtr(ip).requires_comptime) { .no, .wip => return false, .yes => return true, .unknown => { - if (struct_obj.status == .field_types_wip) + if (struct_type.flagsPtr(ip).field_types_wip) return false; - try sema.resolveTypeFieldsStruct(ty, struct_obj); + try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); + + struct_type.flagsPtr(ip).requires_comptime = .wip; - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try sema.typeRequiresComptime(field.ty)) { - struct_obj.requires_comptime = .yes; + for (0..struct_type.field_types.len) |i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) continue; + const field_ty = struct_type.field_types.get(ip)[i]; + if (try sema.typeRequiresComptime(field_ty.toType())) { + struct_type.setRequiresComptime(ip); return true; } } - struct_obj.requires_comptime = .no; + struct_type.flagsPtr(ip).requires_comptime = .no; return false; }, } @@ -36673,40 +36755,41 @@ fn typeAbiSize(sema: *Sema, ty: Type) !u64 { return ty.abiSize(sema.mod); } -fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { +fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment { return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; } /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.unionFieldNormalAlignment`. -/// TODO: this returns alignment in byte units should should be a u64 -fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !u32 { +fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !Alignment { const mod = sema.mod; const ip = &mod.intern_pool; - if (u.fieldAlign(ip, field_index).toByteUnitsOptional()) |a| return @intCast(a); + const field_align = u.fieldAlign(ip, field_index); + if (field_align != .none) return field_align; const field_ty = u.field_types.get(ip)[field_index].toType(); - if (field_ty.isNoReturn(sema.mod)) return 0; - return @intCast(try sema.typeAbiAlignment(field_ty)); + if (field_ty.isNoReturn(sema.mod)) return .none; + return sema.typeAbiAlignment(field_ty); } -/// Keep implementation in sync with `Module.Struct.Field.alignment`. -fn structFieldAlignment(sema: *Sema, field: Module.Struct.Field, layout: std.builtin.Type.ContainerLayout) !u32 { +/// Keep implementation in sync with `Module.structFieldAlignment`. +fn structFieldAlignment( + sema: *Sema, + explicit_alignment: InternPool.Alignment, + field_ty: Type, + layout: std.builtin.Type.ContainerLayout, +) !Alignment { + if (explicit_alignment != .none) + return explicit_alignment; const mod = sema.mod; - if (field.abi_align.toByteUnitsOptional()) |a| { - assert(layout != .Packed); - return @intCast(a); - } switch (layout) { - .Packed => return 0, - .Auto => if (mod.getTarget().ofmt != .c) { - return sema.typeAbiAlignment(field.ty); - }, + .Packed => return .none, + .Auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty), .Extern => {}, } // extern - const ty_abi_align = try sema.typeAbiAlignment(field.ty); - if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { - return @max(ty_abi_align, 16); + const ty_abi_align = try sema.typeAbiAlignment(field_ty); + if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { + return ty_abi_align.max(.@"16"); } return ty_abi_align; } @@ -36752,14 +36835,14 @@ fn structFieldIndex( field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; + const ip = &mod.intern_pool; try sema.resolveTypeFields(struct_ty); if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { - const struct_obj = mod.typeToStruct(struct_ty).?; - const field_index_usize = struct_obj.fields.getIndex(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return @intCast(field_index_usize); + const struct_type = mod.typeToStruct(struct_ty).?; + return struct_type.nameIndex(ip, field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name); } } @@ -36776,13 +36859,7 @@ fn anonStructFieldIndex( .anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| { if (name == field_name) return @intCast(i); }, - .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { - for (struct_obj.fields.keys(), 0..) |name, i| { - if (name == field_name) { - return @intCast(i); - } - } - }, + .struct_type => |struct_type| if (struct_type.nameIndex(ip, field_name)) |i| return i, else => unreachable, } return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{ @@ -37167,8 +37244,8 @@ fn intFitsInType( // If it is u16 or bigger we know the alignment fits without resolving it. if (info.bits >= max_needed_bits) return true; const x = try sema.typeAbiAlignment(lazy_ty.toType()); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed); + if (x == .none) return true; + const actual_needed_bits = @as(usize, x.toLog2Units()) + 1 + @intFromBool(info.signedness == .signed); return info.bits >= actual_needed_bits; }, .lazy_size => |lazy_ty| { @@ -37381,7 +37458,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { const vector_info: struct { host_size: u16 = 0, - alignment: u32 = 0, + alignment: Alignment = .none, vector_index: VI = .none, } = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: { const elem_bits = elem_ty.bitSize(mod); @@ -37391,7 +37468,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { break :blk .{ .host_size = @intCast(parent_ty.arrayLen(mod)), - .alignment = @intCast(parent_ty.abiAlignment(mod)), + .alignment = parent_ty.abiAlignment(mod), .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, }; } else .{}; @@ -37399,9 +37476,9 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { const alignment: Alignment = a: { // Calculate the new pointer alignment. if (ptr_info.flags.alignment == .none) { - if (vector_info.alignment != 0) break :a Alignment.fromNonzeroByteUnits(vector_info.alignment); - // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness. - break :a .none; + // In case of an ABI-aligned pointer, any pointer arithmetic + // maintains the same ABI-alignedness. + break :a vector_info.alignment; } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. @@ -37413,7 +37490,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { // non zero). const new_align: Alignment = @enumFromInt(@min( @ctz(addend), - @intFromEnum(ptr_info.flags.alignment), + ptr_info.flags.alignment.toLog2Units(), )); assert(new_align != .none); break :a new_align; |
