From b4bb64ce78bf2dee9437f366a362ef4d8c77b204 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 11 Aug 2024 03:14:12 -0700 Subject: sema: rework type resolution to use Zcu when possible --- src/codegen/spirv.zig | 667 +++++++++++++++++++++++++------------------------- 1 file changed, 334 insertions(+), 333 deletions(-) (limited to 'src/codegen/spirv.zig') diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index a89dd8f10b..44b48efc43 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -436,16 +436,16 @@ const NavGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *NavGen, inst: Air.Inst.Ref) !IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; if (try self.air.value(inst, pt)) |val| { const ty = self.typeOf(inst); - if (ty.zigTypeTag(mod) == .Fn) { - const fn_nav = switch (mod.intern_pool.indexToKey(val.ip_index)) { + if (ty.zigTypeTag(zcu) == .Fn) { + const fn_nav = switch (zcu.intern_pool.indexToKey(val.ip_index)) { .@"extern" => |@"extern"| @"extern".owner_nav, .func => |func| func.owner_nav, else => unreachable, }; - const spv_decl_index = try self.object.resolveNav(mod, fn_nav); + const spv_decl_index = try self.object.resolveNav(zcu, fn_nav); try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {}); return self.spv.declPtr(spv_decl_index).result_id; } @@ -459,8 +459,8 @@ const NavGen = struct { fn resolveUav(self: *NavGen, val: InternPool.Index) !IdRef { // TODO: This cannot be a function at this point, but it should probably be handled anyway. - const mod = self.pt.zcu; - const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); + const zcu = self.pt.zcu; + const ty = Type.fromInterned(zcu.intern_pool.typeOf(val)); const decl_ptr_ty_id = try self.ptrType(ty, .Generic); const spv_decl_index = blk: { @@ -639,15 +639,15 @@ const NavGen = struct { /// Checks whether the type can be directly translated to SPIR-V vectors fn isSpvVector(self: *NavGen, ty: Type) bool { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const target = self.getTarget(); - if (ty.zigTypeTag(mod) != .Vector) return false; + if (ty.zigTypeTag(zcu) != .Vector) return false; // TODO: This check must be expanded for types that can be represented // as integers (enums / packed structs?) and types that are represented // by multiple SPIR-V values. - const scalar_ty = ty.scalarType(mod); - switch (scalar_ty.zigTypeTag(mod)) { + const scalar_ty = ty.scalarType(zcu); + switch (scalar_ty.zigTypeTag(zcu)) { .Bool, .Int, .Float, @@ -655,24 +655,24 @@ const NavGen = struct { else => return false, } - const elem_ty = ty.childType(mod); + const elem_ty = ty.childType(zcu); - const len = ty.vectorLen(mod); - const is_scalar = elem_ty.isNumeric(mod) or elem_ty.toIntern() == .bool_type; + const len = ty.vectorLen(zcu); + const is_scalar = elem_ty.isNumeric(zcu) or elem_ty.toIntern() == .bool_type; const spirv_len = len > 1 and len <= 4; const opencl_len = if (target.os.tag == .opencl) (len == 8 or len == 16) else false; return is_scalar and (spirv_len or opencl_len); } fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const target = self.getTarget(); - var scalar_ty = ty.scalarType(mod); - if (scalar_ty.zigTypeTag(mod) == .Enum) { - scalar_ty = scalar_ty.intTagType(mod); + var scalar_ty = ty.scalarType(zcu); + if (scalar_ty.zigTypeTag(zcu) == .Enum) { + scalar_ty = scalar_ty.intTagType(zcu); } - const vector_len = if (ty.isVector(mod)) ty.vectorLen(mod) else null; - return switch (scalar_ty.zigTypeTag(mod)) { + const vector_len = if (ty.isVector(zcu)) ty.vectorLen(zcu) else null; + return switch (scalar_ty.zigTypeTag(zcu)) { .Bool => ArithmeticTypeInfo{ .bits = 1, // Doesn't matter for this class. .backing_bits = self.backingIntBits(1).?, @@ -688,7 +688,7 @@ const NavGen = struct { .class = .float, }, .Int => blk: { - const int_info = scalar_ty.intInfo(mod); + const int_info = scalar_ty.intInfo(zcu); // TODO: Maybe it's useful to also return this value. const maybe_backing_bits = self.backingIntBits(int_info.bits); break :blk ArithmeticTypeInfo{ @@ -741,9 +741,9 @@ const NavGen = struct { /// the value to an unsigned int first for Kernels. fn constInt(self: *NavGen, ty: Type, value: anytype, repr: Repr) !IdRef { // TODO: Cache? - const mod = self.pt.zcu; - const scalar_ty = ty.scalarType(mod); - const int_info = scalar_ty.intInfo(mod); + const zcu = self.pt.zcu; + const scalar_ty = ty.scalarType(zcu); + const int_info = scalar_ty.intInfo(zcu); // Use backing bits so that negatives are sign extended const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int @@ -783,11 +783,11 @@ const NavGen = struct { else => unreachable, // TODO: Large integer constants } - if (!ty.isVector(mod)) { + if (!ty.isVector(zcu)) { return result_id; } - const n = ty.vectorLen(mod); + const n = ty.vectorLen(zcu); const ids = try self.gpa.alloc(IdRef, n); defer self.gpa.free(ids); @memset(ids, result_id); @@ -821,8 +821,8 @@ const NavGen = struct { /// Construct a vector at runtime. /// ty must be an vector type. fn constructVector(self: *NavGen, ty: Type, constituents: []const IdRef) !IdRef { - const mod = self.pt.zcu; - assert(ty.vectorLen(mod) == constituents.len); + const zcu = self.pt.zcu; + assert(ty.vectorLen(zcu) == constituents.len); // Note: older versions of the Khronos SPRIV-LLVM translator crash on this instruction // because it cannot construct structs which' operands are not constant. @@ -845,8 +845,8 @@ const NavGen = struct { /// Construct a vector at runtime with all lanes set to the same value. /// ty must be an vector type. fn constructVectorSplat(self: *NavGen, ty: Type, constituent: IdRef) !IdRef { - const mod = self.pt.zcu; - const n = ty.vectorLen(mod); + const zcu = self.pt.zcu; + const n = ty.vectorLen(zcu); const constituents = try self.gpa.alloc(IdRef, n); defer self.gpa.free(constituents); @@ -884,13 +884,13 @@ const NavGen = struct { } const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const target = self.getTarget(); const result_ty_id = try self.resolveType(ty, repr); - const ip = &mod.intern_pool; + const ip = &zcu.intern_pool; log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(pt), val.fmtValue(pt) }); - if (val.isUndefDeep(mod)) { + if (val.isUndefDeep(zcu)) { return self.spv.constUndef(result_ty_id); } @@ -937,17 +937,17 @@ const NavGen = struct { .false, .true => break :cache try self.constBool(val.toBool(), repr), }, .int => { - if (ty.isSignedInt(mod)) { - break :cache try self.constInt(ty, val.toSignedInt(pt), repr); + if (ty.isSignedInt(zcu)) { + break :cache try self.constInt(ty, val.toSignedInt(zcu), repr); } else { - break :cache try self.constInt(ty, val.toUnsignedInt(pt), repr); + break :cache try self.constInt(ty, val.toUnsignedInt(zcu), repr); } }, .float => { const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) { - 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, pt))) }, - 32 => .{ .float32 = val.toFloat(f32, pt) }, - 64 => .{ .float64 = val.toFloat(f64, pt) }, + 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, zcu))) }, + 32 => .{ .float32 = val.toFloat(f32, zcu) }, + 64 => .{ .float64 = val.toFloat(f64, zcu) }, 80, 128 => unreachable, // TODO else => unreachable, }; @@ -968,17 +968,17 @@ const NavGen = struct { // allows it. For now, just generate it here regardless. const err_int_ty = try pt.errorIntType(); const err_ty = switch (error_union.val) { - .err_name => ty.errorUnionSet(mod), + .err_name => ty.errorUnionSet(zcu), .payload => err_int_ty, }; const err_val = switch (error_union.val) { .err_name => |err_name| Value.fromInterned(try pt.intern(.{ .err = .{ - .ty = ty.errorUnionSet(mod).toIntern(), + .ty = ty.errorUnionSet(zcu).toIntern(), .name = err_name, } })), .payload => try pt.intValue(err_int_ty, 0), }; - const payload_ty = ty.errorUnionPayload(mod); + const payload_ty = ty.errorUnionPayload(zcu); const eu_layout = self.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { // We use the error type directly as the type. @@ -1006,12 +1006,12 @@ const NavGen = struct { }, .enum_tag => { const int_val = try val.intFromEnum(ty, pt); - const int_ty = ty.intTagType(mod); + const int_ty = ty.intTagType(zcu); break :cache try self.constant(int_ty, int_val, repr); }, .ptr => return self.constantPtr(val), .slice => |slice| { - const ptr_ty = ty.slicePtrFieldType(mod); + const ptr_ty = ty.slicePtrFieldType(zcu); const ptr_id = try self.constantPtr(Value.fromInterned(slice.ptr)); const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect); return self.constructStruct( @@ -1021,12 +1021,12 @@ const NavGen = struct { ); }, .opt => { - const payload_ty = ty.optionalChild(mod); - const maybe_payload_val = val.optionalValue(mod); + const payload_ty = ty.optionalChild(zcu); + const maybe_payload_val = val.optionalValue(zcu); - if (!payload_ty.hasRuntimeBits(pt)) { + if (!payload_ty.hasRuntimeBits(zcu)) { break :cache try self.constBool(maybe_payload_val != null, .indirect); - } else if (ty.optionalReprIsPayload(mod)) { + } else if (ty.optionalReprIsPayload(zcu)) { // Optional representation is a nullable pointer or slice. if (maybe_payload_val) |payload_val| { return try self.constant(payload_ty, payload_val, .indirect); @@ -1054,7 +1054,7 @@ const NavGen = struct { inline .array_type, .vector_type => |array_type, tag| { const elem_ty = Type.fromInterned(array_type.child); - const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(mod))); + const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(zcu))); defer self.gpa.free(constituents); const child_repr: Repr = switch (tag) { @@ -1088,7 +1088,7 @@ const NavGen = struct { } }, .struct_type => { - const struct_type = mod.typeToStruct(ty).?; + const struct_type = zcu.typeToStruct(ty).?; if (struct_type.layout == .@"packed") { return self.todo("packed struct constants", .{}); } @@ -1102,7 +1102,7 @@ const NavGen = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // This is a zero-bit field - we only needed it for the alignment. continue; } @@ -1121,10 +1121,10 @@ const NavGen = struct { else => unreachable, }, .un => |un| { - const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?; - const union_obj = mod.typeToUnion(ty).?; + const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?; + const union_obj = zcu.typeToUnion(ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]); - const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt)) + const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu)) try self.constant(field_ty, Value.fromInterned(un.val), .direct) else null; @@ -1232,8 +1232,8 @@ const NavGen = struct { // TODO: Merge this function with constantDeclRef. const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ty_id = try self.resolveType(ty, .direct); const uav_ty = Type.fromInterned(ip.typeOf(uav.val)); @@ -1243,14 +1243,14 @@ const NavGen = struct { else => {}, } - // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { + // const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn; + if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { // Pointer to nothing - return undefined return self.spv.constUndef(ty_id); } // Uav refs are always generic. - assert(ty.ptrAddressSpace(mod) == .generic); + assert(ty.ptrAddressSpace(zcu) == .generic); const decl_ptr_ty_id = try self.ptrType(uav_ty, .Generic); const ptr_id = try self.resolveUav(uav.val); @@ -1270,12 +1270,12 @@ const NavGen = struct { fn constantNavRef(self: *NavGen, ty: Type, nav_index: InternPool.Nav.Index) !IdRef { const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ty_id = try self.resolveType(ty, .direct); const nav = ip.getNav(nav_index); - const nav_val = mod.navValue(nav_index); - const nav_ty = nav_val.typeOf(mod); + const nav_val = zcu.navValue(nav_index); + const nav_ty = nav_val.typeOf(zcu); switch (ip.indexToKey(nav_val.toIntern())) { .func => { @@ -1287,12 +1287,12 @@ const NavGen = struct { else => {}, } - if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { + if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { // Pointer to nothing - return undefined. return self.spv.constUndef(ty_id); } - const spv_decl_index = try self.object.resolveNav(mod, nav_index); + const spv_decl_index = try self.object.resolveNav(zcu, nav_index); const spv_decl = self.spv.declPtr(spv_decl_index); const decl_id = switch (spv_decl.kind) { @@ -1452,9 +1452,9 @@ const NavGen = struct { /// } /// If any of the fields' size is 0, it will be omitted. fn resolveUnionType(self: *NavGen, ty: Type) !IdRef { - const mod = self.pt.zcu; - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; + const zcu = self.pt.zcu; + const ip = &zcu.intern_pool; + const union_obj = zcu.typeToUnion(ty).?; if (union_obj.flagsUnordered(ip).layout == .@"packed") { return self.todo("packed union types", .{}); @@ -1503,12 +1503,12 @@ const NavGen = struct { } fn resolveFnReturnType(self: *NavGen, ret_ty: Type) !IdRef { - const pt = self.pt; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { + const zcu = self.pt.zcu; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (ret_ty.isError(pt.zcu)) { + if (ret_ty.isError(zcu)) { return self.resolveType(Type.anyerror, .direct); } else { return self.resolveType(Type.void, .direct); @@ -1531,14 +1531,14 @@ const NavGen = struct { fn resolveTypeInner(self: *NavGen, ty: Type, repr: Repr) Error!IdRef { const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; log.debug("resolveType: ty = {}", .{ty.fmt(pt)}); const target = self.getTarget(); const section = &self.spv.sections.types_globals_constants; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(zcu)) { .NoReturn => { assert(repr == .direct); return try self.spv.voidType(); @@ -1562,7 +1562,7 @@ const NavGen = struct { .indirect => return try self.resolveType(Type.u1, .indirect), }, .Int => { - const int_info = ty.intInfo(mod); + const int_info = ty.intInfo(zcu); if (int_info.bits == 0) { // Some times, the backend will be asked to generate a pointer to i0. OpTypeInt // with 0 bits is invalid, so return an opaque type in this case. @@ -1577,7 +1577,7 @@ const NavGen = struct { return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - const tag_ty = ty.intTagType(mod); + const tag_ty = ty.intTagType(zcu); return try self.resolveType(tag_ty, repr); }, .Float => { @@ -1599,13 +1599,13 @@ const NavGen = struct { return try self.spv.floatType(bits); }, .Array => { - const elem_ty = ty.childType(mod); + const elem_ty = ty.childType(zcu); const elem_ty_id = try self.resolveType(elem_ty, .indirect); - const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse { - return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); + const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(zcu)) orelse { + return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(zcu)}); }; - if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // The size of the array would be 0, but that is not allowed in SPIR-V. // This path can be reached when the backend is asked to generate a pointer to // an array of some zero-bit type. This should always be an indirect path. @@ -1635,7 +1635,7 @@ const NavGen = struct { }, .Fn => switch (repr) { .direct => { - const fn_info = mod.typeToFunc(ty).?; + const fn_info = zcu.typeToFunc(ty).?; comptime assert(zig_call_abi_ver == 3); switch (fn_info.cc) { @@ -1653,7 +1653,7 @@ const NavGen = struct { var param_index: usize = 0; for (fn_info.param_types.get(ip)) |param_ty_index| { const param_ty = Type.fromInterned(param_ty_index); - if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; param_ty_ids[param_index] = try self.resolveType(param_ty, .direct); param_index += 1; @@ -1677,7 +1677,7 @@ const NavGen = struct { }, }, .Pointer => { - const ptr_info = ty.ptrInfo(mod); + const ptr_info = ty.ptrInfo(zcu); const storage_class = self.spvStorageClass(ptr_info.flags.address_space); const ptr_ty_id = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class); @@ -1693,9 +1693,9 @@ const NavGen = struct { ); }, .Vector => { - const elem_ty = ty.childType(mod); + const elem_ty = ty.childType(zcu); const elem_ty_id = try self.resolveType(elem_ty, repr); - const len = ty.vectorLen(mod); + const len = ty.vectorLen(zcu); if (self.isSpvVector(ty)) { return try self.spv.vectorType(len, elem_ty_id); @@ -1711,7 +1711,7 @@ const NavGen = struct { var member_index: usize = 0; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; member_types[member_index] = try self.resolveType(Type.fromInterned(field_ty), .indirect); member_index += 1; @@ -1740,13 +1740,13 @@ const NavGen = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // This is a zero-bit field - we only needed it for the alignment. continue; } const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse - try ip.getOrPutStringFmt(mod.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); + try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); try member_types.append(try self.resolveType(field_ty, .indirect)); try member_names.append(field_name.toSlice(ip)); } @@ -1758,8 +1758,8 @@ const NavGen = struct { return result_id; }, .Optional => { - const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + const payload_ty = ty.optionalChild(zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity // Perform the conversion to a direct bool when the field is extracted. @@ -1767,7 +1767,7 @@ const NavGen = struct { } const payload_ty_id = try self.resolveType(payload_ty, .indirect); - if (ty.optionalReprIsPayload(mod)) { + if (ty.optionalReprIsPayload(zcu)) { // Optional is actually a pointer or a slice. return payload_ty_id; } @@ -1782,7 +1782,7 @@ const NavGen = struct { .Union => return try self.resolveUnionType(ty), .ErrorSet => return try self.resolveType(Type.u16, repr), .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); + const payload_ty = ty.errorUnionPayload(zcu); const error_ty_id = try self.resolveType(Type.anyerror, .indirect); const eu_layout = self.errorUnionLayout(payload_ty); @@ -1877,13 +1877,14 @@ const NavGen = struct { fn errorUnionLayout(self: *NavGen, payload_ty: Type) ErrorUnionLayout { const pt = self.pt; + const zcu = pt.zcu; - const error_align = Type.anyerror.abiAlignment(pt); - const payload_align = payload_ty.abiAlignment(pt); + const error_align = Type.anyerror.abiAlignment(zcu); + const payload_align = payload_ty.abiAlignment(zcu); const error_first = error_align.compare(.gt, payload_align); return .{ - .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt), + .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu), .error_first = error_first, }; } @@ -1908,10 +1909,10 @@ const NavGen = struct { fn unionLayout(self: *NavGen, ty: Type) UnionLayout { const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; - const layout = ty.unionGetLayout(pt); - const union_obj = mod.typeToUnion(ty).?; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const layout = ty.unionGetLayout(zcu); + const union_obj = zcu.typeToUnion(ty).?; var union_layout = UnionLayout{ .has_payload = layout.payload_size != 0, @@ -1931,7 +1932,7 @@ const NavGen = struct { const most_aligned_field = layout.most_aligned_field; const most_aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[most_aligned_field]); union_layout.payload_ty = most_aligned_field_ty; - union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(pt)); + union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(zcu)); } else { union_layout.payload_size = 0; } @@ -1998,12 +1999,12 @@ const NavGen = struct { } fn materialize(self: Temporary, ng: *NavGen) !IdResult { - const mod = ng.pt.zcu; + const zcu = ng.pt.zcu; switch (self.value) { .singleton => |id| return id, .exploded_vector => |range| { - assert(self.ty.isVector(mod)); - assert(self.ty.vectorLen(mod) == range.len); + assert(self.ty.isVector(zcu)); + assert(self.ty.vectorLen(zcu) == range.len); const consituents = try ng.gpa.alloc(IdRef, range.len); defer ng.gpa.free(consituents); for (consituents, 0..range.len) |*id, i| { @@ -2028,18 +2029,18 @@ const NavGen = struct { /// 'Explode' a temporary into separate elements. This turns a vector /// into a bag of elements. fn explode(self: Temporary, ng: *NavGen) !IdRange { - const mod = ng.pt.zcu; + const zcu = ng.pt.zcu; // If the value is a scalar, then this is a no-op. - if (!self.ty.isVector(mod)) { + if (!self.ty.isVector(zcu)) { return switch (self.value) { .singleton => |id| .{ .base = @intFromEnum(id), .len = 1 }, .exploded_vector => |range| range, }; } - const ty_id = try ng.resolveType(self.ty.scalarType(mod), .direct); - const n = self.ty.vectorLen(mod); + const ty_id = try ng.resolveType(self.ty.scalarType(zcu), .direct); + const n = self.ty.vectorLen(zcu); const results = ng.spv.allocIds(n); const id = switch (self.value) { @@ -2087,13 +2088,13 @@ const NavGen = struct { /// only checks the size, but the source-of-truth is implemented /// by `isSpvVector()`. fn fromType(ty: Type, ng: *NavGen) Vectorization { - const mod = ng.pt.zcu; - if (!ty.isVector(mod)) { + const zcu = ng.pt.zcu; + if (!ty.isVector(zcu)) { return .scalar; } else if (ng.isSpvVector(ty)) { - return .{ .spv_vectorized = ty.vectorLen(mod) }; + return .{ .spv_vectorized = ty.vectorLen(zcu) }; } else { - return .{ .unrolled = ty.vectorLen(mod) }; + return .{ .unrolled = ty.vectorLen(zcu) }; } } @@ -2339,10 +2340,10 @@ const NavGen = struct { /// This function builds an OpSConvert of OpUConvert depending on the /// signedness of the types. fn buildIntConvert(self: *NavGen, dst_ty: Type, src: Temporary) !Temporary { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; - const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct); - const src_ty_id = try self.resolveType(src.ty.scalarType(mod), .direct); + const dst_ty_id = try self.resolveType(dst_ty.scalarType(zcu), .direct); + const src_ty_id = try self.resolveType(src.ty.scalarType(zcu), .direct); const v = self.vectorization(.{ dst_ty, src }); const result_ty = try v.resultType(self, dst_ty); @@ -2363,7 +2364,7 @@ const NavGen = struct { const op_result_ty = try v.operationType(self, dst_ty); const op_result_ty_id = try self.resolveType(op_result_ty, .direct); - const opcode: Opcode = if (dst_ty.isSignedInt(mod)) .OpSConvert else .OpUConvert; + const opcode: Opcode = if (dst_ty.isSignedInt(zcu)) .OpSConvert else .OpUConvert; const op_src = try v.prepare(self, src); @@ -2418,7 +2419,7 @@ const NavGen = struct { } fn buildSelect(self: *NavGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const v = self.vectorization(.{ condition, lhs, rhs }); const ops = v.operations(); @@ -2428,7 +2429,7 @@ const NavGen = struct { const op_result_ty_id = try self.resolveType(op_result_ty, .direct); const result_ty = try v.resultType(self, lhs.ty); - assert(condition.ty.scalarType(mod).zigTypeTag(mod) == .Bool); + assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .Bool); const cond = try v.prepare(self, condition); const object_1 = try v.prepare(self, lhs); @@ -2764,9 +2765,9 @@ const NavGen = struct { rhs: Temporary, ) !struct { Temporary, Temporary } { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const target = self.getTarget(); - const ip = &mod.intern_pool; + const ip = &zcu.intern_pool; const v = lhs.vectorization(self).unify(rhs.vectorization(self)); const ops = v.operations(); @@ -2814,7 +2815,7 @@ const NavGen = struct { // where T is maybe vectorized. const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; - const index = try ip.getAnonStructType(mod.gpa, pt.tid, .{ + const index = try ip.getAnonStructType(zcu.gpa, pt.tid, .{ .types = &types, .values = &values, .names = &.{}, @@ -2941,17 +2942,17 @@ const NavGen = struct { fn genNav(self: *NavGen) !void { const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; - const spv_decl_index = try self.object.resolveNav(mod, self.owner_nav); + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const spv_decl_index = try self.object.resolveNav(zcu, self.owner_nav); const result_id = self.spv.declPtr(spv_decl_index).result_id; const nav = ip.getNav(self.owner_nav); - const val = mod.navValue(self.owner_nav); - const ty = val.typeOf(mod); + const val = zcu.navValue(self.owner_nav); + const ty = val.typeOf(zcu); switch (self.spv.declPtr(spv_decl_index).kind) { .func => { - const fn_info = mod.typeToFunc(ty).?; + const fn_info = zcu.typeToFunc(ty).?; const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type)); const prototype_ty_id = try self.resolveType(ty, .direct); @@ -2969,7 +2970,7 @@ const NavGen = struct { try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); for (fn_info.param_types.get(ip)) |param_ty_index| { const param_ty = Type.fromInterned(param_ty_index); - if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const param_type_id = try self.resolveType(param_ty, .direct); const arg_result_id = self.spv.allocId(); @@ -3116,8 +3117,8 @@ const NavGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). fn convertToDirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef { - const mod = self.pt.zcu; - switch (ty.scalarType(mod).zigTypeTag(mod)) { + const zcu = self.pt.zcu; + switch (ty.scalarType(zcu).zigTypeTag(zcu)) { .Bool => { const false_id = try self.constBool(false, .indirect); // The operation below requires inputs in direct representation, but the operand @@ -3142,8 +3143,8 @@ const NavGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). fn convertToIndirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef { - const mod = self.pt.zcu; - switch (ty.scalarType(mod).zigTypeTag(mod)) { + const zcu = self.pt.zcu; + switch (ty.scalarType(zcu).zigTypeTag(zcu)) { .Bool => { const result = try self.intFromBool(Temporary.init(ty, operand_id)); return try result.materialize(self); @@ -3219,8 +3220,8 @@ const NavGen = struct { } fn genInst(self: *NavGen, inst: Air.Inst.Index) !void { - const mod = self.pt.zcu; - const ip = &mod.intern_pool; + const zcu = self.pt.zcu; + const ip = &zcu.intern_pool; if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; @@ -3399,7 +3400,7 @@ const NavGen = struct { } fn airShift(self: *NavGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const base = try self.temporary(bin_op.lhs); @@ -3420,7 +3421,7 @@ const NavGen = struct { // Note: The sign may differ here between the shift and the base type, in case // of an arithmetic right shift. SPIR-V still expects the same type, // so in that case we have to cast convert to signed. - const casted_shift = try self.buildIntConvert(base.ty.scalarType(mod), shift); + const casted_shift = try self.buildIntConvert(base.ty.scalarType(zcu), shift); const shifted = switch (info.signedness) { .unsigned => try self.buildBinary(unsigned, base, casted_shift), @@ -3477,7 +3478,7 @@ const NavGen = struct { /// All other values are returned unmodified (this makes strange integer /// wrapping easier to use in generic operations). fn normalize(self: *NavGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ty = value.ty; switch (info.class) { .integer, .bool, .float => return value, @@ -3485,13 +3486,13 @@ const NavGen = struct { .strange_integer => switch (info.signedness) { .unsigned => { const mask_value = if (info.bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(info.bits))) - 1; - const mask_id = try self.constInt(ty.scalarType(mod), mask_value, .direct); - return try self.buildBinary(.bit_and, value, Temporary.init(ty.scalarType(mod), mask_id)); + const mask_id = try self.constInt(ty.scalarType(zcu), mask_value, .direct); + return try self.buildBinary(.bit_and, value, Temporary.init(ty.scalarType(zcu), mask_id)); }, .signed => { // Shift left and right so that we can copy the sight bit that way. - const shift_amt_id = try self.constInt(ty.scalarType(mod), info.backing_bits - info.bits, .direct); - const shift_amt = Temporary.init(ty.scalarType(mod), shift_amt_id); + const shift_amt_id = try self.constInt(ty.scalarType(zcu), info.backing_bits - info.bits, .direct); + const shift_amt = Temporary.init(ty.scalarType(zcu), shift_amt_id); const left = try self.buildBinary(.sll, value, shift_amt); return try self.buildBinary(.sra, left, shift_amt); }, @@ -3897,7 +3898,7 @@ const NavGen = struct { } fn airShlOverflow(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3916,7 +3917,7 @@ const NavGen = struct { // Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that, // so just manually upcast it if required. - const casted_shift = try self.buildIntConvert(base.ty.scalarType(mod), shift); + const casted_shift = try self.buildIntConvert(base.ty.scalarType(zcu), shift); const left = try self.buildBinary(.sll, base, casted_shift); const result = try self.normalize(left, info); @@ -3955,12 +3956,12 @@ const NavGen = struct { fn airClzCtz(self: *NavGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef { if (self.liveness.isUnused(inst)) return null; - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const target = self.getTarget(); const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.temporary(ty_op.operand); - const scalar_result_ty = self.typeOfIndex(inst).scalarType(mod); + const scalar_result_ty = self.typeOfIndex(inst).scalarType(zcu); const info = self.arithmeticTypeInfo(operand.ty); switch (info.class) { @@ -4004,16 +4005,16 @@ const NavGen = struct { } fn airReduce(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const operand = try self.resolve(reduce.operand); const operand_ty = self.typeOf(reduce.operand); - const scalar_ty = operand_ty.scalarType(mod); + const scalar_ty = operand_ty.scalarType(zcu); const scalar_ty_id = try self.resolveType(scalar_ty, .direct); const info = self.arithmeticTypeInfo(operand_ty); - const len = operand_ty.vectorLen(mod); + const len = operand_ty.vectorLen(zcu); const first = try self.extractVectorComponent(scalar_ty, operand, 0); @@ -4080,7 +4081,7 @@ const NavGen = struct { fn airShuffle(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); @@ -4092,7 +4093,7 @@ const NavGen = struct { const a_ty = self.typeOf(extra.a); const b_ty = self.typeOf(extra.b); - const scalar_ty = result_ty.scalarType(mod); + const scalar_ty = result_ty.scalarType(zcu); const scalar_ty_id = try self.resolveType(scalar_ty, .direct); // If all of the types are SPIR-V vectors, we can use OpVectorShuffle. @@ -4100,20 +4101,20 @@ const NavGen = struct { // The SPIR-V shuffle instruction is similar to the Air instruction, except that the elements are // numbered consecutively instead of using negatives. - const components = try self.gpa.alloc(Word, result_ty.vectorLen(mod)); + const components = try self.gpa.alloc(Word, result_ty.vectorLen(zcu)); defer self.gpa.free(components); - const a_len = a_ty.vectorLen(mod); + const a_len = a_ty.vectorLen(zcu); for (components, 0..) |*component, i| { const elem = try mask.elemValue(pt, i); - if (elem.isUndef(mod)) { + if (elem.isUndef(zcu)) { // This is explicitly valid for OpVectorShuffle, it indicates undefined. component.* = 0xFFFF_FFFF; continue; } - const index = elem.toSignedInt(pt); + const index = elem.toSignedInt(zcu); if (index >= 0) { component.* = @intCast(index); } else { @@ -4134,17 +4135,17 @@ const NavGen = struct { // Fall back to manually extracting and inserting components. - const components = try self.gpa.alloc(IdRef, result_ty.vectorLen(mod)); + const components = try self.gpa.alloc(IdRef, result_ty.vectorLen(zcu)); defer self.gpa.free(components); for (components, 0..) |*id, i| { const elem = try mask.elemValue(pt, i); - if (elem.isUndef(mod)) { + if (elem.isUndef(zcu)) { id.* = try self.spv.constUndef(scalar_ty_id); continue; } - const index = elem.toSignedInt(pt); + const index = elem.toSignedInt(zcu); if (index >= 0) { id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index)); } else { @@ -4218,10 +4219,10 @@ const NavGen = struct { } fn ptrAdd(self: *NavGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const result_ty_id = try self.resolveType(result_ty, .direct); - switch (ptr_ty.ptrSize(mod)) { + switch (ptr_ty.ptrSize(zcu)) { .One => { // Pointer to array // TODO: Is this correct? @@ -4275,15 +4276,15 @@ const NavGen = struct { rhs: Temporary, ) !Temporary { const pt = self.pt; - const mod = pt.zcu; - const scalar_ty = lhs.ty.scalarType(mod); - const is_vector = lhs.ty.isVector(mod); + const zcu = pt.zcu; + const scalar_ty = lhs.ty.scalarType(zcu); + const is_vector = lhs.ty.isVector(zcu); - switch (scalar_ty.zigTypeTag(mod)) { + switch (scalar_ty.zigTypeTag(zcu)) { .Int, .Bool, .Float => {}, .Enum => { assert(!is_vector); - const ty = lhs.ty.intTagType(mod); + const ty = lhs.ty.intTagType(zcu); return try self.cmp(op, lhs.pun(ty), rhs.pun(ty)); }, .ErrorSet => { @@ -4321,10 +4322,10 @@ const NavGen = struct { const ty = lhs.ty; - const payload_ty = ty.optionalChild(mod); - if (ty.optionalReprIsPayload(mod)) { - assert(payload_ty.hasRuntimeBitsIgnoreComptime(pt)); - assert(!payload_ty.isSlice(mod)); + const payload_ty = ty.optionalChild(zcu); + if (ty.optionalReprIsPayload(zcu)) { + assert(payload_ty.hasRuntimeBitsIgnoreComptime(zcu)); + assert(!payload_ty.isSlice(zcu)); return try self.cmp(op, lhs.pun(payload_ty), rhs.pun(payload_ty)); } @@ -4332,12 +4333,12 @@ const NavGen = struct { const lhs_id = try lhs.materialize(self); const rhs_id = try rhs.materialize(self); - const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) + const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) try self.extractField(Type.bool, lhs_id, 1) else try self.convertToDirect(Type.bool, lhs_id); - const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) + const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) try self.extractField(Type.bool, rhs_id, 1) else try self.convertToDirect(Type.bool, rhs_id); @@ -4345,7 +4346,7 @@ const NavGen = struct { const lhs_valid = Temporary.init(Type.bool, lhs_valid_id); const rhs_valid = Temporary.init(Type.bool, rhs_valid_id); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { return try self.cmp(op, lhs_valid, rhs_valid); } @@ -4465,7 +4466,7 @@ const NavGen = struct { src_ty: Type, src_id: IdRef, ) !IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const src_ty_id = try self.resolveType(src_ty, .direct); const dst_ty_id = try self.resolveType(dst_ty, .direct); @@ -4477,7 +4478,7 @@ const NavGen = struct { // TODO: Some more cases are missing here // See fn bitCast in llvm.zig - if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) { + if (src_ty.zigTypeTag(zcu) == .Int and dst_ty.isPtrAtRuntime(zcu)) { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = dst_ty_id, @@ -4490,7 +4491,7 @@ const NavGen = struct { // We can only use OpBitcast for specific conversions: between numerical types, and // between pointers. If the resolved spir-v types fall into this category then emit OpBitcast, // otherwise use a temporary and perform a pointer cast. - const can_bitcast = (src_ty.isNumeric(mod) and dst_ty.isNumeric(mod)) or (src_ty.isPtrAtRuntime(mod) and dst_ty.isPtrAtRuntime(mod)); + const can_bitcast = (src_ty.isNumeric(zcu) and dst_ty.isNumeric(zcu)) or (src_ty.isPtrAtRuntime(zcu) and dst_ty.isPtrAtRuntime(zcu)); if (can_bitcast) { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ @@ -4519,7 +4520,7 @@ const NavGen = struct { // the result here. // TODO: This detail could cause stuff like @as(*const i1, @ptrCast(&@as(u1, 1))) to break // should we change the representation of strange integers? - if (dst_ty.zigTypeTag(mod) == .Int) { + if (dst_ty.zigTypeTag(zcu) == .Int) { const info = self.arithmeticTypeInfo(dst_ty); const result = try self.normalize(Temporary.init(dst_ty, result_id), info); return try result.materialize(self); @@ -4675,19 +4676,19 @@ const NavGen = struct { fn airArrayToSlice(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const array_ptr_ty = self.typeOf(ty_op.operand); - const array_ty = array_ptr_ty.childType(mod); + const array_ty = array_ptr_ty.childType(zcu); const slice_ty = self.typeOfIndex(inst); - const elem_ptr_ty = slice_ty.slicePtrFieldType(mod); + const elem_ptr_ty = slice_ty.slicePtrFieldType(zcu); const elem_ptr_ty_id = try self.resolveType(elem_ptr_ty, .direct); const array_ptr_id = try self.resolve(ty_op.operand); - const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct); + const len_id = try self.constInt(Type.usize, array_ty.arrayLen(zcu), .direct); - const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(pt)) + const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(zcu)) // Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type. try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id) else @@ -4720,16 +4721,16 @@ const NavGen = struct { fn airAggregateInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const result_ty = self.typeOfIndex(inst); - const len: usize = @intCast(result_ty.arrayLen(mod)); + const len: usize = @intCast(result_ty.arrayLen(zcu)); const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); - switch (result_ty.zigTypeTag(mod)) { + switch (result_ty.zigTypeTag(zcu)) { .Struct => { - if (mod.typeToPackedStruct(result_ty)) |struct_type| { + if (zcu.typeToPackedStruct(result_ty)) |struct_type| { _ = struct_type; unreachable; // TODO } @@ -4744,7 +4745,7 @@ const NavGen = struct { .anon_struct_type => |tuple| { for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| { if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; - assert(Type.fromInterned(field_ty).hasRuntimeBits(pt)); + assert(Type.fromInterned(field_ty).hasRuntimeBits(zcu)); const id = try self.resolve(element); types[index] = Type.fromInterned(field_ty); @@ -4759,7 +4760,7 @@ const NavGen = struct { const field_index = it.next().?; if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - assert(field_ty.hasRuntimeBitsIgnoreComptime(pt)); + assert(field_ty.hasRuntimeBitsIgnoreComptime(zcu)); const id = try self.resolve(element); types[index] = field_ty; @@ -4777,7 +4778,7 @@ const NavGen = struct { ); }, .Vector => { - const n_elems = result_ty.vectorLen(mod); + const n_elems = result_ty.vectorLen(zcu); const elem_ids = try self.gpa.alloc(IdRef, n_elems); defer self.gpa.free(elem_ids); @@ -4788,8 +4789,8 @@ const NavGen = struct { return try self.constructVector(result_ty, elem_ids); }, .Array => { - const array_info = result_ty.arrayInfo(mod); - const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(mod)); + const array_info = result_ty.arrayInfo(zcu); + const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(zcu)); const elem_ids = try self.gpa.alloc(IdRef, n_elems); defer self.gpa.free(elem_ids); @@ -4810,14 +4811,14 @@ const NavGen = struct { fn sliceOrArrayLen(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef { const pt = self.pt; - const mod = pt.zcu; - switch (ty.ptrSize(mod)) { + const zcu = pt.zcu; + switch (ty.ptrSize(zcu)) { .Slice => return self.extractField(Type.usize, operand_id, 1), .One => { - const array_ty = ty.childType(mod); - const elem_ty = array_ty.childType(mod); - const abi_size = elem_ty.abiSize(pt); - const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size; + const array_ty = ty.childType(zcu); + const elem_ty = array_ty.childType(zcu); + const abi_size = elem_ty.abiSize(zcu); + const size = array_ty.arrayLenIncludingSentinel(zcu) * abi_size; return try self.constInt(Type.usize, size, .direct); }, .Many, .C => unreachable, @@ -4825,9 +4826,9 @@ const NavGen = struct { } fn sliceOrArrayPtr(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef { - const mod = self.pt.zcu; - if (ty.isSlice(mod)) { - const ptr_ty = ty.slicePtrFieldType(mod); + const zcu = self.pt.zcu; + if (ty.isSlice(zcu)) { + const ptr_ty = ty.slicePtrFieldType(zcu); return self.extractField(ptr_ty, operand_id, 0); } return operand_id; @@ -4857,11 +4858,11 @@ const NavGen = struct { } fn airSliceElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -4874,28 +4875,28 @@ const NavGen = struct { } fn airSliceElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); - const ptr_ty = slice_ty.slicePtrFieldType(mod); + const ptr_ty = slice_ty.slicePtrFieldType(zcu); const ptr_ty_id = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); const elem_ptr = try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{}); - return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) }); + return try self.load(slice_ty.childType(zcu), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(zcu) }); } fn ptrElemPtr(self: *NavGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; // Construct new pointer type for the resulting pointer - const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. - const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod))); - if (ptr_ty.isSinglePointer(mod)) { + const elem_ty = ptr_ty.elemType2(zcu); // use elemType() so that we get T for *[N]T. + const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(zcu))); + if (ptr_ty.isSinglePointer(zcu)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. return try self.accessChainId(elem_ptr_ty_id, ptr_id, &.{index_id}); @@ -4907,14 +4908,14 @@ const NavGen = struct { fn airPtrElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const src_ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = src_ptr_ty.childType(mod); + const elem_ty = src_ptr_ty.childType(zcu); const ptr_id = try self.resolve(bin_op.lhs); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) { const dst_ptr_ty = self.typeOfIndex(inst); return try self.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id); } @@ -4924,10 +4925,10 @@ const NavGen = struct { } fn airArrayElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const array_ty = self.typeOf(bin_op.lhs); - const elem_ty = array_ty.childType(mod); + const elem_ty = array_ty.childType(zcu); const array_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -4946,7 +4947,7 @@ const NavGen = struct { // For now, just generate a temporary and use that. // TODO: This backend probably also should use isByRef from llvm... - const is_vector = array_ty.isVector(mod); + const is_vector = array_ty.isVector(zcu); const elem_repr: Repr = if (is_vector) .direct else .indirect; const ptr_array_ty_id = try self.ptrType2(array_ty, .Function, .direct); @@ -4985,26 +4986,26 @@ const NavGen = struct { } fn airPtrElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOfIndex(inst); const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); const elem_ptr_id = try self.ptrElemPtr(ptr_ty, ptr_id, index_id); - return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); + return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) }); } fn airVectorStoreElem(self: *NavGen, inst: Air.Inst.Index) !void { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; const vector_ptr_ty = self.typeOf(data.vector_ptr); - const vector_ty = vector_ptr_ty.childType(mod); - const scalar_ty = vector_ty.scalarType(mod); + const vector_ty = vector_ptr_ty.childType(zcu); + const scalar_ty = vector_ty.scalarType(zcu); - const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(mod)); + const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(zcu)); const scalar_ptr_ty_id = try self.ptrType(scalar_ty, storage_class); const vector_ptr = try self.resolve(data.vector_ptr); @@ -5013,30 +5014,30 @@ const NavGen = struct { const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index}); try self.store(scalar_ty, elem_ptr_id, operand, .{ - .is_volatile = vector_ptr_ty.isVolatilePtr(mod), + .is_volatile = vector_ptr_ty.isVolatilePtr(zcu), }); } fn airSetUnionTag(self: *NavGen, inst: Air.Inst.Index) !void { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ptr_ty = self.typeOf(bin_op.lhs); - const un_ty = un_ptr_ty.childType(mod); + const un_ty = un_ptr_ty.childType(zcu); const layout = self.unionLayout(un_ty); if (layout.tag_size == 0) return; - const tag_ty = un_ty.unionTagTypeSafety(mod).?; - const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod))); + const tag_ty = un_ty.unionTagTypeSafety(zcu).?; + const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(zcu))); const union_ptr_id = try self.resolve(bin_op.lhs); const new_tag_id = try self.resolve(bin_op.rhs); if (!layout.has_payload) { - try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) }); + try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) }); } else { const ptr_id = try self.accessChain(tag_ptr_ty_id, union_ptr_id, &.{layout.tag_index}); - try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) }); + try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) }); } } @@ -5044,14 +5045,14 @@ const NavGen = struct { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const layout = self.unionLayout(un_ty); if (layout.tag_size == 0) return null; const union_handle = try self.resolve(ty_op.operand); if (!layout.has_payload) return union_handle; - const tag_ty = un_ty.unionTagTypeSafety(mod).?; + const tag_ty = un_ty.unionTagTypeSafety(zcu).?; return try self.extractField(tag_ty, union_handle, layout.tag_index); } @@ -5068,9 +5069,9 @@ const NavGen = struct { // Note: The result here is not cached, because it generates runtime code. const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; - const union_ty = mod.typeToUnion(ty).?; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const union_ty = zcu.typeToUnion(ty).?; const tag_ty = Type.fromInterned(union_ty.enum_tag_ty); if (union_ty.flagsUnordered(ip).layout == .@"packed") { @@ -5082,7 +5083,7 @@ const NavGen = struct { const tag_int = if (layout.tag_size != 0) blk: { const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field); const tag_int_val = try tag_val.intFromEnum(tag_ty, pt); - break :blk tag_int_val.toUnsignedInt(pt); + break :blk tag_int_val.toUnsignedInt(zcu); } else 0; if (!layout.has_payload) { @@ -5099,7 +5100,7 @@ const NavGen = struct { } const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]); - if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function); const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index}); const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function); @@ -5123,15 +5124,15 @@ const NavGen = struct { fn airUnionInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const ty = self.typeOfIndex(inst); - const union_obj = mod.typeToUnion(ty).?; + const union_obj = zcu.typeToUnion(ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); - const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt)) + const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu)) try self.resolve(extra.init) else null; @@ -5140,23 +5141,23 @@ const NavGen = struct { fn airStructFieldVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const object_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = object_ty.structFieldType(field_index, mod); + const field_ty = object_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return null; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null; - switch (object_ty.zigTypeTag(mod)) { - .Struct => switch (object_ty.containerLayout(mod)) { + switch (object_ty.zigTypeTag(zcu)) { + .Struct => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => return try self.extractField(field_ty, object_id, field_index), }, - .Union => switch (object_ty.containerLayout(mod)) { + .Union => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => { // Store, ptr-elem-ptr, pointer-cast, load @@ -5185,16 +5186,16 @@ const NavGen = struct { fn airFieldParentPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const parent_ty = ty_pl.ty.toType().childType(mod); + const parent_ty = ty_pl.ty.toType().childType(zcu); const result_ty_id = try self.resolveType(ty_pl.ty.toType(), .indirect); const field_ptr = try self.resolve(extra.field_ptr); const field_ptr_int = try self.intFromPtr(field_ptr); - const field_offset = parent_ty.structFieldOffset(extra.field_index, pt); + const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu); const base_ptr_int = base_ptr_int: { if (field_offset == 0) break :base_ptr_int field_ptr_int; @@ -5319,10 +5320,10 @@ const NavGen = struct { } fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ptr_ty = self.typeOfIndex(inst); - assert(ptr_ty.ptrAddressSpace(mod) == .generic); - const child_ty = ptr_ty.childType(mod); + assert(ptr_ty.ptrAddressSpace(zcu) == .generic); + const child_ty = ptr_ty.childType(zcu); return try self.alloc(child_ty, .{}); } @@ -5494,9 +5495,9 @@ const NavGen = struct { // ir.Block in a different SPIR-V block. const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty = self.typeOfIndex(inst); - const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(pt); + const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu); const cf = switch (self.control_flow) { .structured => |*cf| cf, @@ -5570,7 +5571,7 @@ const NavGen = struct { const sblock = cf.block_stack.getLast(); - if (ty.isNoReturn(mod)) { + if (ty.isNoReturn(zcu)) { // If this block is noreturn, this instruction is the last of a block, // and we must simply jump to the block's merge unconditionally. try self.structuredBreak(next_block); @@ -5626,13 +5627,13 @@ const NavGen = struct { } fn airBr(self: *NavGen, inst: Air.Inst.Index) !void { - const pt = self.pt; + const zcu = self.pt.zcu; const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const operand_ty = self.typeOf(br.operand); switch (self.control_flow) { .structured => |*cf| { - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { const operand_id = try self.resolve(br.operand); const block_result_var_id = cf.block_results.get(br.block_inst).?; try self.store(operand_ty, block_result_var_id, operand_id, .{}); @@ -5643,7 +5644,7 @@ const NavGen = struct { }, .unstructured => |cf| { const block = cf.blocks.get(br.block_inst).?; - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { const operand_id = try self.resolve(br.operand); // current_block_label should not be undefined here, lest there // is a br or br_void in the function's body. @@ -5770,35 +5771,35 @@ const NavGen = struct { } fn airLoad(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = self.typeOf(ty_op.operand); const elem_ty = self.typeOfIndex(inst); const operand = try self.resolve(ty_op.operand); - if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; + if (!ptr_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null; - return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); + return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) }); } fn airStore(self: *NavGen, inst: Air.Inst.Index) !void { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(mod); + const elem_ty = ptr_ty.childType(zcu); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); - try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); + try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) }); } fn airRet(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(operand); - if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { - const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?; - if (Type.fromInterned(fn_info.return_type).isError(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + const fn_info = zcu.typeToFunc(zcu.navValue(self.owner_nav).typeOf(zcu)).?; + if (Type.fromInterned(fn_info.return_type).isError(zcu)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5815,14 +5816,14 @@ const NavGen = struct { fn airRetLoad(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); - const ret_ty = ptr_ty.childType(mod); + const ret_ty = ptr_ty.childType(zcu); - if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { - const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?; - if (Type.fromInterned(fn_info.return_type).isError(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + const fn_info = zcu.typeToFunc(zcu.navValue(self.owner_nav).typeOf(zcu)).?; + if (Type.fromInterned(fn_info.return_type).isError(zcu)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5834,14 +5835,14 @@ const NavGen = struct { } const ptr = try self.resolve(un_op); - const value = try self.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); + const value = try self.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) }); try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = value, }); } fn airTry(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const err_union_id = try self.resolve(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); @@ -5854,7 +5855,7 @@ const NavGen = struct { const eu_layout = self.errorUnionLayout(payload_ty); - if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { const err_id = if (eu_layout.payload_has_bits) try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex()) else @@ -5911,18 +5912,18 @@ const NavGen = struct { } fn airErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand); const err_ty_id = try self.resolveType(Type.anyerror, .direct); - if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { // No error possible, so just return undefined. return try self.spv.constUndef(err_ty_id); } - const payload_ty = err_union_ty.errorUnionPayload(mod); + const payload_ty = err_union_ty.errorUnionPayload(zcu); const eu_layout = self.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -5947,10 +5948,10 @@ const NavGen = struct { } fn airWrapErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_union_ty = self.typeOfIndex(inst); - const payload_ty = err_union_ty.errorUnionPayload(mod); + const payload_ty = err_union_ty.errorUnionPayload(zcu); const operand_id = try self.resolve(ty_op.operand); const eu_layout = self.errorUnionLayout(payload_ty); @@ -5995,28 +5996,28 @@ const NavGen = struct { fn airIsNull(self: *NavGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); const operand_ty = self.typeOf(un_op); - const optional_ty = if (is_pointer) operand_ty.childType(mod) else operand_ty; - const payload_ty = optional_ty.optionalChild(mod); + const optional_ty = if (is_pointer) operand_ty.childType(zcu) else operand_ty; + const payload_ty = optional_ty.optionalChild(zcu); const bool_ty_id = try self.resolveType(Type.bool, .direct); - if (optional_ty.optionalReprIsPayload(mod)) { + if (optional_ty.optionalReprIsPayload(zcu)) { // Pointer payload represents nullability: pointer or slice. const loaded_id = if (is_pointer) try self.load(optional_ty, operand_id, .{}) else operand_id; - const ptr_ty = if (payload_ty.isSlice(mod)) - payload_ty.slicePtrFieldType(mod) + const ptr_ty = if (payload_ty.isSlice(zcu)) + payload_ty.slicePtrFieldType(zcu) else payload_ty; - const ptr_id = if (payload_ty.isSlice(mod)) + const ptr_id = if (payload_ty.isSlice(zcu)) try self.extractField(ptr_ty, loaded_id, 0) else loaded_id; @@ -6036,8 +6037,8 @@ const NavGen = struct { const is_non_null_id = blk: { if (is_pointer) { - if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { - const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod)); + if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(zcu)); const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class); const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1}); break :blk try self.load(Type.bool, tag_ptr_id, .{}); @@ -6046,7 +6047,7 @@ const NavGen = struct { break :blk try self.load(Type.bool, operand_id, .{}); } - break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) + break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) try self.extractField(Type.bool, operand_id, 1) else // Optional representation is bool indicating whether the optional is set @@ -6071,16 +6072,16 @@ const NavGen = struct { } fn airIsErr(self: *NavGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); const err_union_ty = self.typeOf(un_op); - if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { return try self.constBool(pred == .is_non_err, .direct); } - const payload_ty = err_union_ty.errorUnionPayload(mod); + const payload_ty = err_union_ty.errorUnionPayload(zcu); const eu_layout = self.errorUnionLayout(payload_ty); const bool_ty_id = try self.resolveType(Type.bool, .direct); @@ -6105,15 +6106,15 @@ const NavGen = struct { fn airUnwrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null; - if (optional_ty.optionalReprIsPayload(mod)) { + if (optional_ty.optionalReprIsPayload(zcu)) { return operand_id; } @@ -6122,22 +6123,22 @@ const NavGen = struct { fn airUnwrapOptionalPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); - const optional_ty = operand_ty.childType(mod); - const payload_ty = optional_ty.optionalChild(mod); + const optional_ty = operand_ty.childType(zcu); + const payload_ty = optional_ty.optionalChild(zcu); const result_ty = self.typeOfIndex(inst); const result_ty_id = try self.resolveType(result_ty, .direct); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // There is no payload, but we still need to return a valid pointer. // We can just return anything here, so just return a pointer to the operand. return try self.bitCast(result_ty, operand_ty, operand_id); } - if (optional_ty.optionalReprIsPayload(mod)) { + if (optional_ty.optionalReprIsPayload(zcu)) { // They are the same value. return try self.bitCast(result_ty, operand_ty, operand_id); } @@ -6147,18 +6148,18 @@ const NavGen = struct { fn airWrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { return try self.constBool(true, .indirect); } const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload(mod)) { + if (optional_ty.optionalReprIsPayload(zcu)) { return operand_id; } @@ -6170,7 +6171,7 @@ const NavGen = struct { fn airSwitchBr(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const target = self.getTarget(); const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond_ty = self.typeOf(pl_op.operand); @@ -6178,18 +6179,18 @@ const NavGen = struct { var cond_indirect = try self.convertToIndirect(cond_ty, cond); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { + const cond_words: u32 = switch (cond_ty.zigTypeTag(zcu)) { .Bool, .ErrorSet => 1, .Int => blk: { - const bits = cond_ty.intInfo(mod).bits; + const bits = cond_ty.intInfo(zcu).bits; const backing_bits = self.backingIntBits(bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) 1 else 2; }, .Enum => blk: { - const int_ty = cond_ty.intTagType(mod); - const int_info = int_ty.intInfo(mod); + const int_ty = cond_ty.intTagType(zcu); + const int_info = int_ty.intInfo(zcu); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); }; @@ -6200,7 +6201,7 @@ const NavGen = struct { break :blk target.ptrBitWidth() / 32; }, // TODO: Figure out which types apply here, and work around them as we can only do integers. - else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), + else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(zcu))}), }; const num_cases = switch_br.data.cases_len; @@ -6255,14 +6256,14 @@ const NavGen = struct { for (items) |item| { const value = (try self.air.value(item, pt)) orelse unreachable; - const int_val: u64 = switch (cond_ty.zigTypeTag(mod)) { - .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(pt)) else value.toUnsignedInt(pt), + const int_val: u64 = switch (cond_ty.zigTypeTag(zcu)) { + .Bool, .Int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu), .Enum => blk: { // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(pt); // TODO: composite integer constants + break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(zcu); // TODO: composite integer constants }, - .ErrorSet => value.getErrorInt(mod), - .Pointer => value.toUnsignedInt(pt), + .ErrorSet => value.getErrorInt(zcu), + .Pointer => value.toUnsignedInt(zcu), else => unreachable, }; const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) { @@ -6343,9 +6344,9 @@ const NavGen = struct { fn airDbgStmt(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - const path = mod.navFileScope(self.owner_nav).sub_file_path; + const path = zcu.navFileScope(self.owner_nav).sub_file_path; try self.func.body.emit(self.spv.gpa, .OpLine, .{ .file = try self.spv.resolveString(path), .line = self.base_line + dbg_stmt.line + 1, @@ -6354,12 +6355,12 @@ const NavGen = struct { } fn airDbgInlineBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload); const old_base_line = self.base_line; defer self.base_line = old_base_line; - self.base_line = mod.navSrcLine(mod.funcInfo(extra.data.func).owner_nav); + self.base_line = zcu.navSrcLine(zcu.funcInfo(extra.data.func).owner_nav); return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); } @@ -6371,7 +6372,7 @@ const NavGen = struct { } fn airAssembly(self: *NavGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.pt.zcu; + const zcu = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); @@ -6453,20 +6454,20 @@ const NavGen = struct { // TODO: Translate proper error locations. assert(as.errors.items.len != 0); assert(self.error_msg == null); - const src_loc = mod.navSrcLoc(self.owner_nav); - self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); - const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len); + const src_loc = zcu.navSrcLoc(self.owner_nav); + self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); + const notes = try zcu.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len); // Sub-scope to prevent `return error.CodegenFail` from running the errdefers. { - errdefer mod.gpa.free(notes); + errdefer zcu.gpa.free(notes); var i: usize = 0; errdefer for (notes[0..i]) |*note| { - note.deinit(mod.gpa); + note.deinit(zcu.gpa); }; while (i < as.errors.items.len) : (i += 1) { - notes[i] = try Zcu.ErrorMsg.init(mod.gpa, src_loc, "{s}", .{as.errors.items[i].msg}); + notes[i] = try Zcu.ErrorMsg.init(zcu.gpa, src_loc, "{s}", .{as.errors.items[i].msg}); } } self.error_msg.?.notes = notes; @@ -6503,17 +6504,17 @@ const NavGen = struct { _ = modifier; const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { + const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, }; - const fn_info = mod.typeToFunc(zig_fn_ty).?; + const fn_info = zcu.typeToFunc(zig_fn_ty).?; const return_type = fn_info.return_type; const result_type_id = try self.resolveFnReturnType(Type.fromInterned(return_type)); @@ -6529,7 +6530,7 @@ const NavGen = struct { // before starting to emit OpFunctionCall instructions. Hence the // temporary params buffer. const arg_ty = self.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const arg_id = try self.resolve(arg); params[n_params] = arg_id; @@ -6547,7 +6548,7 @@ const NavGen = struct { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(pt)) { + if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(zcu)) { return null; } @@ -6604,12 +6605,12 @@ const NavGen = struct { } fn typeOf(self: *NavGen, inst: Air.Inst.Ref) Type { - const mod = self.pt.zcu; - return self.air.typeOf(inst, &mod.intern_pool); + const zcu = self.pt.zcu; + return self.air.typeOf(inst, &zcu.intern_pool); } fn typeOfIndex(self: *NavGen, inst: Air.Inst.Index) Type { - const mod = self.pt.zcu; - return self.air.typeOfIndex(inst, &mod.intern_pool); + const zcu = self.pt.zcu; + return self.air.typeOfIndex(inst, &zcu.intern_pool); } }; -- cgit v1.2.3 From 80cd53d3bbf5cdc82715a4400592b40fb93cd5c9 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 11 Aug 2024 19:28:42 -0700 Subject: sema: clean-up `{union,struct}FieldAlignment` and friends My main gripes with this design were that it was incorrectly namespaced, the naming was inconsistent and a bit wrong (`fooAlign` vs `fooAlignment`). This commit moves all the logic from `PerThread.zig` to use the zcu + tid system that the previous couple commits introduce. I've organized and merged the functions to be a bit more specific to their own purpose. - `fieldAlignment` takes a struct or union type, an index, and a Zcu (or the Sema version which takes a Pt), and gives you the alignment of the field at the index. - `structFieldAlignment` takes the field type itself, and provides the logic to handle special cases, such as externs. A design goal I had in mind was to avoid using the word 'struct' in the function name, when it worked for things that aren't structs, such as unions. --- src/Sema.zig | 110 ++++++++++++++------------------- src/Sema/bitcast.zig | 6 +- src/Sema/comptime_ptr_access.zig | 4 +- src/Type.zig | 127 +++++++++++++++++++++++++-------------- src/Value.zig | 20 +++--- src/Zcu/PerThread.zig | 32 ---------- src/arch/aarch64/CodeGen.zig | 6 +- src/arch/aarch64/abi.zig | 4 +- src/arch/arm/CodeGen.zig | 6 +- src/arch/arm/abi.zig | 8 +-- src/arch/riscv64/CodeGen.zig | 6 +- src/arch/riscv64/abi.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/wasm/CodeGen.zig | 4 +- src/arch/wasm/abi.zig | 2 +- src/arch/x86_64/CodeGen.zig | 30 ++++----- src/codegen/c.zig | 4 +- src/codegen/llvm.zig | 38 ++++-------- src/codegen/spirv.zig | 2 +- src/mutable_value.zig | 2 +- 20 files changed, 194 insertions(+), 223 deletions(-) (limited to 'src/codegen/spirv.zig') diff --git a/src/Sema.zig b/src/Sema.zig index 5e30315233..2ba3450966 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4887,7 +4887,7 @@ fn validateStructInit( const i: u32 = @intCast(i_usize); if (opt_field_ptr.unwrap()) |field_ptr| { // Determine whether the value stored to this pointer is comptime-known. - const field_ty = struct_ty.structFieldType(i, zcu); + const field_ty = struct_ty.fieldType(i, zcu); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { field_values[i] = opv.toIntern(); continue; @@ -4999,7 +4999,7 @@ fn validateStructInit( var block_index = first_block_index; for (block.instructions.items[first_block_index..]) |cur_inst| { while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) { - const field_ty = struct_ty.structFieldType(field_indices[init_index], zcu); + const field_ty = struct_ty.fieldType(field_indices[init_index], zcu); if (try field_ty.onePossibleValue(pt)) |_| continue; field_ptr_ref = sema.inst_map.get(instrs[init_index]).?; } @@ -8430,7 +8430,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil try indexable_ty.resolveFields(pt); assert(indexable_ty.isIndexable(zcu)); // validated by a previous instruction if (indexable_ty.zigTypeTag(zcu) == .Struct) { - const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), zcu); + const elem_type = indexable_ty.fieldType(@intFromEnum(bin.rhs), zcu); return Air.internedToRef(elem_type.toIntern()); } else { const elem_type = indexable_ty.elemType2(zcu); @@ -14419,7 +14419,7 @@ fn analyzeTupleCat( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i, zcu).toIntern(); + types[i] = lhs_ty.fieldType(i, zcu).toIntern(); const default_val = lhs_ty.structFieldDefaultValue(i, zcu); values[i] = default_val.toIntern(); const operand_src = block.src(.{ .array_cat_lhs = .{ @@ -14433,7 +14433,7 @@ fn analyzeTupleCat( } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i, zcu).toIntern(); + types[i + lhs_len] = rhs_ty.fieldType(i, zcu).toIntern(); const default_val = rhs_ty.structFieldDefaultValue(i, zcu); values[i + lhs_len] = default_val.toIntern(); const operand_src = block.src(.{ .array_cat_rhs = .{ @@ -14791,7 +14791,7 @@ fn analyzeTupleMul( const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (0..tuple_len) |i| { - types[i] = operand_ty.structFieldType(i, zcu).toIntern(); + types[i] = operand_ty.fieldType(i, zcu).toIntern(); values[i] = operand_ty.structFieldDefaultValue(i, zcu).toIntern(); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = src_node, @@ -18466,13 +18466,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const alignment = switch (layout) { - .auto, .@"extern" => try Type.unionFieldNormalAlignmentAdvanced( - union_obj, - @intCast(field_index), - .sema, - pt.zcu, - pt.tid, - ), + .auto, .@"extern" => try ty.fieldAlignmentSema(field_index, pt), .@"packed" => .none, }; @@ -18691,12 +18685,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, - else => try field_ty.structFieldAlignmentAdvanced( + else => try field_ty.structFieldAlignmentSema( struct_type.fieldAlign(ip, field_index), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ), }; @@ -20327,7 +20319,7 @@ fn zirStructInit( assert(field_inits[field_index] == .none); found_fields[field_index] = item.data.field_type; const uncoerced_init = try sema.resolveInst(item.data.init); - const field_ty = resolved_ty.structFieldType(field_index, zcu); + const field_ty = resolved_ty.fieldType(field_index, zcu); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { try resolved_ty.resolveStructFieldInits(pt); @@ -20338,7 +20330,7 @@ fn zirStructInit( }); }; - if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, zcu), zcu)) { + if (!init_val.eql(default_value, resolved_ty.fieldType(field_index, zcu), zcu)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } } @@ -20799,7 +20791,7 @@ fn zirArrayInit( const arg = args[i + 1]; const resolved_arg = try sema.resolveInst(arg); const elem_ty = if (is_tuple) - array_ty.structFieldType(i, zcu) + array_ty.fieldType(i, zcu) else array_ty.elemType2(zcu); dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); @@ -20862,7 +20854,7 @@ fn zirArrayInit( if (is_tuple) { for (resolved_args, 0..) |arg, i| { const elem_ptr_ty = try pt.ptrTypeSema(.{ - .child = array_ty.structFieldType(i, zcu).toIntern(), + .child = array_ty.fieldType(i, zcu).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); @@ -25234,7 +25226,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, .packed_offset = parent_ptr_info.packed_offset, }; - const field_ty = parent_ty.structFieldType(field_index, zcu); + const field_ty = parent_ty.fieldType(field_index, zcu); var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ @@ -25249,19 +25241,17 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( - if (zcu.typeToStruct(parent_ty)) |struct_obj| try field_ty.structFieldAlignmentAdvanced( - struct_obj.fieldAlign(ip, field_index), - struct_obj.layout, - .sema, - pt.zcu, - pt.tid, - ) else if (zcu.typeToUnion(parent_ty)) |union_obj| - try Type.unionFieldNormalAlignmentAdvanced( - union_obj, - field_index, - .sema, - pt.zcu, - pt.tid, + if (zcu.typeToStruct(parent_ty)) |struct_obj| + try field_ty.structFieldAlignmentSema( + struct_obj.fieldAlign(ip, field_index), + struct_obj.layout, + pt, + ) + else if (zcu.typeToUnion(parent_ty)) |union_obj| + try field_ty.unionFieldAlignmentSema( + union_obj.fieldAlign(ip, field_index), + union_obj.flagsUnordered(ip).layout, + pt, ) else actual_field_ptr_info.flags.alignment, @@ -28035,14 +28025,14 @@ fn fieldCallBind( } if (field_name.toUnsigned(ip)) |field_index| { if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field; - return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, zcu), field_index, object_ptr); + return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.fieldType(field_index, zcu), field_index, object_ptr); } } else { const max = concrete_ty.structFieldCount(zcu); for (0..max) |i_usize| { const i: u32 = @intCast(i_usize); if (field_name == concrete_ty.structFieldName(i, zcu).unwrap().?) { - return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, zcu), i, object_ptr); + return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.fieldType(i, zcu), i, object_ptr); } } } @@ -28340,12 +28330,10 @@ fn structFieldPtrByIndex( @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. - const field_align = try Type.fromInterned(field_ty).structFieldAlignmentAdvanced( + const field_align = try Type.fromInterned(field_ty).structFieldAlignmentSema( struct_type.fieldAlign(ip, field_index), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ); ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none) field_align @@ -28477,7 +28465,7 @@ fn tupleFieldValByIndex( ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; - const field_ty = tuple_ty.structFieldType(field_index, zcu); + const field_ty = tuple_ty.fieldType(field_index, zcu); if (tuple_ty.structFieldIsComptime(field_index, zcu)) try tuple_ty.resolveStructFieldInits(pt); @@ -28538,13 +28526,7 @@ fn unionFieldPtr( union_ptr_info.flags.alignment else try union_ty.abiAlignmentSema(pt); - const field_align = try Type.unionFieldNormalAlignmentAdvanced( - union_obj, - field_index, - .sema, - pt.zcu, - pt.tid, - ); + const field_align = try union_ty.fieldAlignmentSema(field_index, pt); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, @@ -28921,7 +28903,7 @@ fn tupleFieldPtr( }); } - const field_ty = tuple_ty.structFieldType(field_index, zcu); + const field_ty = tuple_ty.fieldType(field_index, zcu); const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ @@ -28979,7 +28961,7 @@ fn tupleField( }); } - const field_ty = tuple_ty.structFieldType(field_index, zcu); + const field_ty = tuple_ty.fieldType(field_index, zcu); if (tuple_ty.structFieldIsComptime(field_index, zcu)) try tuple_ty.resolveStructFieldInits(pt); @@ -30615,9 +30597,9 @@ pub fn coerceInMemoryAllowed( const field_count = dest_ty.structFieldCount(zcu); for (0..field_count) |field_idx| { if (dest_ty.structFieldIsComptime(field_idx, zcu) != src_ty.structFieldIsComptime(field_idx, zcu)) break :tuple; - if (dest_ty.structFieldAlign(field_idx, zcu) != src_ty.structFieldAlign(field_idx, zcu)) break :tuple; - const dest_field_ty = dest_ty.structFieldType(field_idx, zcu); - const src_field_ty = src_ty.structFieldType(field_idx, zcu); + if (dest_ty.fieldAlignment(field_idx, zcu) != src_ty.fieldAlignment(field_idx, zcu)) break :tuple; + const dest_field_ty = dest_ty.fieldType(field_idx, zcu); + const src_field_ty = src_ty.fieldType(field_idx, zcu); const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src, null); if (field != .ok) break :tuple; } @@ -35073,7 +35055,7 @@ fn resolvePeerTypesInner( peer_field_val.* = null; continue; }; - peer_field_ty.* = ty.structFieldType(field_index, zcu); + peer_field_ty.* = ty.fieldType(field_index, zcu); peer_field_val.* = if (opt_val) |val| try val.fieldValue(pt, field_index) else null; } @@ -35095,7 +35077,7 @@ fn resolvePeerTypesInner( // Already-resolved types won't be referenced by the error so it's fine // to leave them undefined. const ty = opt_ty orelse continue; - peer_field_ty.* = ty.structFieldType(field_index, zcu); + peer_field_ty.* = ty.fieldType(field_index, zcu); } return .{ .field_error = .{ @@ -35220,9 +35202,9 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { .elem_ty = Type.noreturn, }; if (!ty.isTuple(zcu)) return null; - const elem_ty = ty.structFieldType(0, zcu); + const elem_ty = ty.fieldType(0, zcu); for (1..field_count) |i| { - if (!ty.structFieldType(i, zcu).eql(elem_ty, zcu)) { + if (!ty.fieldType(i, zcu).eql(elem_ty, zcu)) { return null; } } @@ -35309,12 +35291,10 @@ pub fn resolveStructAlignment( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) continue; - const field_align = try field_ty.structFieldAlignmentAdvanced( + const field_align = try field_ty.structFieldAlignmentSema( struct_type.fieldAlign(ip, i), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ); alignment = alignment.maxStrict(field_align); } @@ -35375,12 +35355,10 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { }, else => return err, }; - field_align.* = try field_ty.structFieldAlignmentAdvanced( + field_align.* = try field_ty.structFieldAlignmentSema( struct_type.fieldAlign(ip, i), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ); big_align = big_align.maxStrict(field_align.*); } diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 73aa53e5e6..04229532fc 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -542,7 +542,7 @@ const PackValueBits = struct { while (it.next()) |field_idx| { const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; try pack.padding(want_bit_off - cur_bit_off); - const field_ty = ty.structFieldType(field_idx, zcu); + const field_ty = ty.fieldType(field_idx, zcu); elems[field_idx] = (try pack.get(field_ty)).toIntern(); cur_bit_off = want_bit_off + field_ty.bitSize(zcu); } @@ -552,7 +552,7 @@ const PackValueBits = struct { var cur_bit_off: u64 = ty.bitSize(zcu); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); while (it.next()) |field_idx| { - const field_ty = ty.structFieldType(field_idx, zcu); + const field_ty = ty.fieldType(field_idx, zcu); const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); try pack.padding(cur_bit_off - want_bit_off); elems[field_idx] = (try pack.get(field_ty)).toIntern(); @@ -578,7 +578,7 @@ const PackValueBits = struct { // This is identical between LE and BE targets. const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu)); for (elems, 0..) |*elem, i| { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); elem.* = (try pack.get(field_ty)).toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index 8549e32d2b..893ea6db36 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -451,7 +451,7 @@ fn loadComptimePtrInner( .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { const start_off = cur_ty.structFieldOffset(field_idx, zcu); - const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt); + const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { cur_val = try cur_val.getElem(sema.pt, field_idx); cur_offset -= start_off; @@ -873,7 +873,7 @@ fn prepareComptimePtrStore( .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { const start_off = cur_ty.structFieldOffset(field_idx, zcu); - const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt); + const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { cur_val = try cur_val.elem(pt, sema.arena, field_idx); cur_offset -= start_off; diff --git a/src/Type.zig b/src/Type.zig index 5a47fa9527..16806af49d 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3191,8 +3191,8 @@ pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 { }; } -/// Supports structs and unions. -pub fn structFieldType(ty: Type, index: usize, zcu: *const Zcu) Type { +/// Returns the field type. Supports structs and unions. +pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), @@ -3205,17 +3205,26 @@ pub fn structFieldType(ty: Type, index: usize, zcu: *const Zcu) Type { }; } -pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, .normal, zcu, {}) catch unreachable; +pub fn fieldAlignment(ty: Type, index: usize, zcu: *Zcu) Alignment { + return ty.fieldAlignmentInner(index, .normal, zcu, {}) catch unreachable; +} + +pub fn fieldAlignmentSema(ty: Type, index: usize, pt: Zcu.PerThread) SemaError!Alignment { + return try ty.fieldAlignmentInner(index, .sema, pt.zcu, pt.tid); } -pub fn structFieldAlignAdvanced( +/// Returns the field alignment. Supports structs and unions. +/// If `strat` is `.sema`, may perform type resolution. +/// Asserts the layout is not packed. +/// +/// Provide the struct field as the `ty`. +pub fn fieldAlignmentInner( ty: Type, index: usize, comptime strat: ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) !Alignment { +) SemaError!Alignment { const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { @@ -3223,13 +3232,7 @@ pub fn structFieldAlignAdvanced( assert(struct_type.layout != .@"packed"); const explicit_align = struct_type.fieldAlign(ip, index); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - return field_ty.structFieldAlignmentAdvanced( - explicit_align, - struct_type.layout, - strat, - zcu, - tid, - ); + return field_ty.structFieldAlignmentInner(explicit_align, struct_type.layout, strat, zcu, tid); }, .anon_struct_type => |anon_struct| { return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentInner( @@ -3240,28 +3243,62 @@ pub fn structFieldAlignAdvanced( }, .union_type => { const union_obj = ip.loadUnionType(ty.toIntern()); - return unionFieldNormalAlignmentAdvanced( - union_obj, - @intCast(index), - strat, - zcu, - tid, - ); + const layout = union_obj.flagsUnordered(ip).layout; + assert(layout != .@"packed"); + const explicit_align = union_obj.fieldAlign(ip, index); + const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]); + return field_ty.unionFieldAlignmentInner(explicit_align, layout, strat, zcu, tid); }, else => unreachable, } } -/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. -/// If `strat` is `.sema`, may perform type resolution. -pub fn structFieldAlignmentAdvanced( +/// Returns the alignment of a non-packed struct field. Assert the layout is not packed. +/// +/// Asserts that all resolution needed was done. +pub fn structFieldAlignment( field_ty: Type, explicit_alignment: InternPool.Alignment, layout: std.builtin.Type.ContainerLayout, + zcu: *Zcu, +) Alignment { + return field_ty.structFieldAlignmentInner( + explicit_alignment, + layout, + .normal, + zcu, + {}, + ) catch unreachable; +} + +/// Returns the alignment of a non-packed struct field. Assert the layout is not packed. +/// May do type resolution when needed. +/// Asserts that all resolution needed was done. +pub fn structFieldAlignmentSema( + field_ty: Type, + explicit_alignment: InternPool.Alignment, + layout: std.builtin.Type.ContainerLayout, + pt: Zcu.PerThread, +) SemaError!Alignment { + return try field_ty.structFieldAlignmentInner( + explicit_alignment, + layout, + .sema, + pt.zcu, + pt.tid, + ); +} + +/// Returns the alignment of a non-packed struct field. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn structFieldAlignmentInner( + field_ty: Type, + explicit_alignment: Alignment, + layout: std.builtin.Type.ContainerLayout, comptime strat: Type.ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) Zcu.SemaError!InternPool.Alignment { +) SemaError!Alignment { assert(layout != .@"packed"); if (explicit_alignment != .none) return explicit_alignment; const ty_abi_align = (try field_ty.abiAlignmentInner( @@ -3281,29 +3318,31 @@ pub fn structFieldAlignmentAdvanced( return ty_abi_align; } -/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. -pub fn unionFieldNormalAlignment( - loaded_union: InternPool.LoadedUnionType, - field_index: u32, - zcu: *Zcu, -) InternPool.Alignment { - return unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal, zcu, {}) catch unreachable; +pub fn unionFieldAlignmentSema( + field_ty: Type, + explicit_alignment: Alignment, + layout: std.builtin.Type.ContainerLayout, + pt: Zcu.PerThread, +) SemaError!Alignment { + return field_ty.unionFieldAlignmentInner( + explicit_alignment, + layout, + .sema, + pt.zcu, + pt.tid, + ); } -/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. -/// If `strat` is `.sema`, may perform type resolution. -pub fn unionFieldNormalAlignmentAdvanced( - loaded_union: InternPool.LoadedUnionType, - field_index: u32, +pub fn unionFieldAlignmentInner( + field_ty: Type, + explicit_alignment: Alignment, + layout: std.builtin.Type.ContainerLayout, comptime strat: Type.ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) Zcu.SemaError!InternPool.Alignment { - const ip = &zcu.intern_pool; - assert(loaded_union.flagsUnordered(ip).layout != .@"packed"); - const field_align = loaded_union.fieldAlign(ip, field_index); - if (field_align != .none) return field_align; - const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); +) SemaError!Alignment { + assert(layout != .@"packed"); + if (explicit_alignment != .none) return explicit_alignment; if (field_ty.isNoReturn(zcu)) return .none; return (try field_ty.abiAlignmentInner(strat.toLazy(), zcu, tid)).scalar; } @@ -3608,12 +3647,12 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: const zcu = pt.zcu; const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); - const field_ty = struct_ty.structFieldType(field_idx, zcu); + const field_ty = struct_ty.fieldType(field_idx, zcu); var bit_offset: u16 = 0; var running_bits: u16 = 0; for (0..struct_ty.structFieldCount(zcu)) |i| { - const f_ty = struct_ty.structFieldType(i, zcu); + const f_ty = struct_ty.fieldType(i, zcu); if (i == field_idx) { bit_offset = running_bits; } diff --git a/src/Value.zig b/src/Value.zig index 7dfb83ff65..fd9fc5d51a 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -1414,7 +1414,7 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value { const zcu = pt.zcu; return switch (zcu.intern_pool.indexToKey(val.toIntern())) { .undef => |ty| Value.fromInterned(try pt.intern(.{ - .undef = Type.fromInterned(ty).structFieldType(index, zcu).toIntern(), + .undef = Type.fromInterned(ty).fieldType(index, zcu).toIntern(), })), .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) { .bytes => |bytes| try pt.intern(.{ .int = .{ @@ -3810,9 +3810,9 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value { // `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily. const field_ty: Type, const field_align: InternPool.Alignment = switch (aggregate_ty.zigTypeTag(zcu)) { .Struct => field: { - const field_ty = aggregate_ty.structFieldType(field_idx, zcu); + const field_ty = aggregate_ty.fieldType(field_idx, zcu); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) }, + .auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) }, .@"extern" => { // Well-defined layout, so just offset the pointer appropriately. const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); @@ -3863,7 +3863,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value { const union_obj = zcu.typeToUnion(aggregate_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) }, + .auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) }, .@"extern" => { // Point to the same address. const result_ty = try pt.ptrTypeSema(info: { @@ -4198,14 +4198,14 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh const base_ptr_ty = base_ptr.typeOf(zcu); const agg_ty = base_ptr_ty.childType(zcu); const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) { - .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced( - @intCast(field.index), + .Struct => .{ agg_ty.fieldType(field.index, zcu), try agg_ty.fieldAlignmentInner( + field.index, if (have_sema) .sema else .normal, pt.zcu, if (have_sema) pt.tid else {}, ) }, - .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced( - @intCast(field.index), + .Union => .{ agg_ty.unionFieldTypeByIndex(field.index, zcu), try agg_ty.fieldAlignmentInner( + field.index, if (have_sema) .sema else .normal, pt.zcu, if (have_sema) pt.tid else {}, @@ -4344,7 +4344,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh .Struct => switch (cur_ty.containerLayout(zcu)) { .auto, .@"packed" => break, .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const field_ty = cur_ty.structFieldType(field_idx, zcu); + const field_ty = cur_ty.fieldType(field_idx, zcu); const start_off = cur_ty.structFieldOffset(field_idx, zcu); const end_off = start_off + field_ty.abiSize(zcu); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { @@ -4401,7 +4401,7 @@ pub fn resolveLazy( .u64, .i64, .big_int => return val, .lazy_align, .lazy_size => return pt.intValue( Type.fromInterned(int.ty), - (try val.getUnsignedIntInner(.sema, pt.zcu, pt.tid)).?, + try val.toUnsignedIntSema(pt), ), }, .slice => |slice| { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index ab9e6bbabb..29de95038c 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -3040,38 +3040,6 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 { } } -/// Returns 0 if the union is represented with 0 bits at runtime. -pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); - var max_align: InternPool.Alignment = .none; - if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(zcu); - for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; - - const field_align = zcu.unionFieldNormalAlignment(loaded_union, @intCast(field_index)); - max_align = max_align.max(field_align); - } - return max_align; -} - -/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. -pub fn structFieldAlignment( - pt: Zcu.PerThread, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, -) InternPool.Alignment { - return field_ty.structFieldAlignmentAdvanced( - explicit_alignment, - layout, - .normal, - pt.zcu, - {}, - ) catch unreachable; -} - /// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets /// into the packed struct InternPool data rather than computing this on the /// fly, however it was found to perform worse when measured on real world diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index f8d998ebe5..844a3e584a 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4144,7 +4144,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const zcu = pt.zcu; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_ty = struct_ty.structFieldType(index, zcu); + const struct_field_ty = struct_ty.fieldType(index, zcu); const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu))); switch (mcv) { @@ -5473,10 +5473,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0, zcu); + const wrapped_ty = ty.fieldType(0, zcu); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1, zcu); + const overflow_bit_ty = ty.fieldType(1, zcu); const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu))); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index ef3f9e7acd..b3926b8cc1 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -95,7 +95,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 { var count: u8 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); const field_count = countFloats(field_ty, zcu, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; @@ -130,7 +130,7 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type { const fields_len = ty.structFieldCount(zcu); var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); if (getFloatArrayType(field_ty, zcu)) |some| return some; } return null; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cea6d7d43e..6549868fa5 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2926,7 +2926,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, zcu)); - const struct_field_ty = struct_ty.structFieldType(index, zcu); + const struct_field_ty = struct_ty.fieldType(index, zcu); switch (mcv) { .dead, .unreach => unreachable, @@ -5434,10 +5434,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0, zcu); + const wrapped_ty = ty.fieldType(0, zcu); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); - const overflow_bit_ty = ty.structFieldType(1, zcu); + const overflow_bit_ty = ty.fieldType(1, zcu); const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, zcu)); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index ff3c40cb09..718350164c 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -44,8 +44,8 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class { const fields = ty.structFieldCount(zcu); var i: u32 = 0; while (i < fields) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); - const field_alignment = ty.structFieldAlign(i, zcu); + const field_ty = ty.fieldType(i, zcu); + const field_alignment = ty.fieldAlignment(i, zcu); const field_size = field_ty.bitSize(zcu); if (field_size > 32 or field_alignment.compare(.gt, .@"32")) { return Class.arrSize(bit_size, 64); @@ -66,7 +66,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class { for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| { if (Type.fromInterned(field_ty).bitSize(zcu) > 32 or - Type.unionFieldNormalAlignment(union_obj, @intCast(field_index), zcu).compare(.gt, .@"32")) + ty.fieldAlignment(field_index, zcu).compare(.gt, .@"32")) { return Class.arrSize(bit_size, 64); } @@ -141,7 +141,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 { var count: u32 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); const field_count = countFloats(field_ty, zcu, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 2c6535ac4f..7028844779 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -4576,7 +4576,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const src_mcv = try func.resolveInst(operand); const struct_ty = func.typeOf(operand); - const field_ty = struct_ty.structFieldType(index, zcu); + const field_ty = struct_ty.fieldType(index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; const field_off: u32 = switch (struct_ty.containerLayout(zcu)) { @@ -7882,7 +7882,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { const elem_i: u32 = @intCast(elem_i_usize); if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); if (elem_bit_size > 64) { return func.fail( @@ -7916,7 +7916,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); const elem_mcv = try func.resolveInst(elem); try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv); diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 36e72dd8da..5e8f57cc0b 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -26,7 +26,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class { var any_fp = false; var field_count: usize = 0; for (0..ty.structFieldCount(zcu)) |field_index| { - const field_ty = ty.structFieldType(field_index, zcu); + const field_ty = ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (field_ty.isRuntimeFloat()) any_fp = true diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0d59814fda..a5bd92a9d6 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3980,10 +3980,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0, zcu); + const wrapped_ty = ty.fieldType(0, zcu); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1, zcu); + const overflow_bit_ty = ty.fieldType(1, zcu); const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu))); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index e728a99d88..49732a387b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3954,7 +3954,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index, zcu); + const field_ty = struct_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); const result: WValue = switch (struct_ty.containerLayout(zcu)) { @@ -5378,7 +5378,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (elements, 0..) |elem, elem_index| { if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue; - const elem_ty = result_ty.structFieldType(elem_index, zcu); + const elem_ty = result_ty.fieldType(elem_index, zcu); const field_offset = result_ty.structFieldOffset(elem_index, zcu); _ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify); prev_field_offset = field_offset; diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 9a5bbb0ca2..b6e66ad85e 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -108,7 +108,7 @@ pub fn scalarType(ty: Type, zcu: *Zcu) Type { return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu); } else { assert(ty.structFieldCount(zcu) == 1); - return scalarType(ty.structFieldType(0, zcu), zcu); + return scalarType(ty.fieldType(0, zcu), zcu); } }, .Union => { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 94814b70f6..68e3936d8e 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4352,14 +4352,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .eflags = cc }, .{}, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), partial_mcv, .{}, ); @@ -4392,7 +4392,7 @@ fn genSetFrameTruncatedOverflowCompare( }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const ty = tuple_ty.structFieldType(0, zcu); + const ty = tuple_ty.fieldType(0, zcu); const int_info = ty.intInfo(zcu); const hi_bits = (int_info.bits - 1) % 64 + 1; @@ -4450,7 +4450,7 @@ fn genSetFrameTruncatedOverflowCompare( try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, .{}, ); @@ -4637,7 +4637,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), result, .{}, ); @@ -4649,7 +4649,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .eflags = .ne }, .{}, ); @@ -4761,14 +4761,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), .{ .register_pair = .{ .rax, .rdx } }, .{}, ); try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .register = tmp_regs[1] }, .{}, ); @@ -4816,14 +4816,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), partial_mcv, .{}, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .immediate = 0 }, // cc being set is impossible .{}, ); @@ -8143,7 +8143,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const container_ty = self.typeOf(operand); const container_rc = self.regClassForType(container_ty); - const field_ty = container_ty.structFieldType(index, zcu); + const field_ty = container_ty.fieldType(index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; const field_rc = self.regClassForType(field_ty); const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp); @@ -15273,14 +15273,14 @@ fn genSetMem( try self.genSetMem( base, disp + @as(i32, @intCast(ty.structFieldOffset(0, zcu))), - ty.structFieldType(0, zcu), + ty.fieldType(0, zcu), .{ .register = ro.reg }, opts, ); try self.genSetMem( base, disp + @as(i32, @intCast(ty.structFieldOffset(1, zcu))), - ty.structFieldType(1, zcu), + ty.fieldType(1, zcu), .{ .eflags = ro.eflags }, opts, ); @@ -18150,7 +18150,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_i: u32 = @intCast(elem_i_usize); if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); if (elem_bit_size > 64) { return self.fail( @@ -18232,7 +18232,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d188435c3e..754286d80b 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -7206,7 +7206,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { var empty = true; for (0..elements.len) |field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = inst_ty.structFieldType(field_index, zcu); + const field_ty = inst_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) { @@ -7219,7 +7219,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { empty = true; for (resolved_elements, 0..) |element, field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = inst_ty.structFieldType(field_index, zcu); + const field_ty = inst_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) try writer.writeAll(", "); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5256442561..2d989f81e2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2496,16 +2496,10 @@ pub const Object = struct { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const field_size = field_ty.abiSize(zcu); - const field_align = pt.structFieldAlignment( - struct_type.fieldAlign(ip, field_index), - field_ty, - struct_type.layout, - ); + const field_align = ty.fieldAlignment(field_index, zcu); const field_offset = ty.structFieldOffset(field_index, zcu); - const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); - fields.appendAssumeCapacity(try o.builder.debugMemberType( try o.builder.metadataString(field_name.toSlice(ip)), .none, // File @@ -2598,7 +2592,7 @@ pub const Object = struct { const field_size = Type.fromInterned(field_ty).abiSize(zcu); const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) { .@"packed" => .none, - .auto, .@"extern" => Type.unionFieldNormalAlignment(union_type, @intCast(field_index), zcu), + .auto, .@"extern" => ty.fieldAlignment(field_index, zcu), }; const field_name = tag_type.names.get(ip)[field_index]; @@ -3315,11 +3309,7 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = pt.structFieldAlignment( - struct_type.fieldAlign(ip, field_index), - field_ty, - struct_type.layout, - ); + const field_align = t.fieldAlignment(field_index, zcu); const field_ty_align = field_ty.abiAlignment(zcu); if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed"; big_align = big_align.max(field_align); @@ -4127,11 +4117,7 @@ pub const Object = struct { var field_it = struct_type.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = pt.structFieldAlignment( - struct_type.fieldAlign(ip, field_index), - field_ty, - struct_type.layout, - ); + const field_align = ty.fieldAlignment(field_index, zcu); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -6528,7 +6514,7 @@ pub const FuncGen = struct { const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index, zcu); + const field_ty = struct_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none; if (!isByRef(struct_ty, zcu)) { @@ -6590,7 +6576,7 @@ pub const FuncGen = struct { const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; const field_ptr = try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const alignment = struct_ty.structFieldAlign(field_index, zcu); + const alignment = struct_ty.fieldAlignment(field_index, zcu); const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = alignment }, @@ -7471,8 +7457,8 @@ pub const FuncGen = struct { assert(self.err_ret_trace != .none); const field_ptr = try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, ""); - const field_alignment = struct_ty.structFieldAlign(field_index, zcu); - const field_ty = struct_ty.structFieldType(field_index, zcu); + const field_alignment = struct_ty.fieldAlignment(field_index, zcu); + const field_ty = struct_ty.fieldType(field_index, zcu); const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_alignment }, @@ -10080,7 +10066,7 @@ pub const FuncGen = struct { const field_ptr_ty = try pt.ptrType(.{ .child = self.typeOf(elem).toIntern(), .flags = .{ - .alignment = result_ty.structFieldAlign(i, zcu), + .alignment = result_ty.fieldAlignment(i, zcu), }, }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .none); @@ -10185,7 +10171,7 @@ pub const FuncGen = struct { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const field_llvm_ty = try o.lowerType(field_ty); const field_size = field_ty.abiSize(zcu); - const field_align = Type.unionFieldNormalAlignment(union_obj, extra.field_index, zcu); + const field_align = union_ty.fieldAlignment(extra.field_index, zcu); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); @@ -11188,7 +11174,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu var types_len: usize = 0; var types: [8]Builder.Type = undefined; for (0..return_type.structFieldCount(zcu)) |field_index| { - const field_ty = return_type.structFieldType(field_index, zcu); + const field_ty = return_type.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; types[types_len] = try o.lowerType(field_ty); types_len += 1; @@ -11444,7 +11430,7 @@ const ParamTypeIterator = struct { .fields => { it.types_len = 0; for (0..ty.structFieldCount(zcu)) |field_index| { - const field_ty = ty.structFieldType(field_index, zcu); + const field_ty = ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; it.types_buffer[it.types_len] = try it.object.lowerType(field_ty); it.types_len += 1; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 44b48efc43..adab565508 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -5148,7 +5148,7 @@ const NavGen = struct { const object_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = object_ty.structFieldType(field_index, zcu); + const field_ty = object_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null; diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 5fe43105f4..9fcac259df 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -223,7 +223,7 @@ pub const MutableValue = union(enum) { @memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem }); }, .Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| { - const field_ty = ty.structFieldType(i, zcu).toIntern(); + const field_ty = ty.fieldType(i, zcu).toIntern(); mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) }; }, else => unreachable, -- cgit v1.2.3