From b4bb64ce78bf2dee9437f366a362ef4d8c77b204 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 11 Aug 2024 03:14:12 -0700 Subject: sema: rework type resolution to use Zcu when possible --- src/codegen/c.zig | 237 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 121 insertions(+), 116 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 397cb071b6..d188435c3e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -334,7 +334,7 @@ pub const Function = struct { const writer = f.object.codeHeaderWriter(); const decl_c_value = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)), }); const gpa = f.object.dg.gpa; try f.allocs.put(gpa, decl_c_value.new_local, false); @@ -372,7 +372,7 @@ pub const Function = struct { fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue { return f.allocAlignedLocal(inst, .{ .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt.zcu)), }); } @@ -648,7 +648,7 @@ pub const DeclGen = struct { // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const ptr_ty = Type.fromInterned(uav.orig_ty); - if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(pt)) { + if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(zcu)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } @@ -688,7 +688,7 @@ pub const DeclGen = struct { // alignment. If there is already an entry, keep the greater alignment. const explicit_alignment = ptr_type.flags.alignment; if (explicit_alignment != .none) { - const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt); + const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu); if (explicit_alignment.order(abi_alignment).compare(.gt)) { const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val); aligned_gop.value_ptr.* = if (aligned_gop.found_existing) @@ -722,7 +722,7 @@ pub const DeclGen = struct { // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip)); const ptr_ty = try pt.navPtrType(owner_nav); - if (!nav_ty.isFnOrHasRuntimeBits(pt)) { + if (!nav_ty.isFnOrHasRuntimeBits(zcu)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } @@ -805,7 +805,7 @@ pub const DeclGen = struct { } }, - .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) { + .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(zcu)) { // Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer. const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); try writer.writeByte('('); @@ -923,7 +923,7 @@ pub const DeclGen = struct { try writer.writeAll("(("); try dg.renderCType(writer, ctype); try writer.print("){x})", .{try dg.fmtIntLiteral( - try pt.intValue(Type.usize, val.toUnsignedInt(pt)), + try pt.intValue(Type.usize, val.toUnsignedInt(zcu)), .Other, )}); }, @@ -970,7 +970,7 @@ pub const DeclGen = struct { .enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location), .float => { const bits = ty.floatBits(target.*); - const f128_val = val.toFloat(f128, pt); + const f128_val = val.toFloat(f128, zcu); // All unsigned ints matching float types are pre-allocated. const repr_ty = pt.intType(.unsigned, bits) catch unreachable; @@ -984,10 +984,10 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))), - 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))), - 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))), - 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))), + 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))), + 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))), + 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))), + 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))), 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))), else => unreachable, } @@ -998,10 +998,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16, pt)}), - 32 => try writer.print("{x}", .{val.toFloat(f32, pt)}), - 64 => try writer.print("{x}", .{val.toFloat(f64, pt)}), - 80 => try writer.print("{x}", .{val.toFloat(f80, pt)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1041,10 +1041,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}), - 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}), - 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}), - 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}), + 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}), + 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}), + 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}), + 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}), 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}), else => unreachable, }; @@ -1167,11 +1167,11 @@ pub const DeclGen = struct { const elem_val_u8: u8 = if (elem_val.isUndef(zcu)) undefPattern(u8) else - @intCast(elem_val.toUnsignedInt(pt)); + @intCast(elem_val.toUnsignedInt(zcu)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8: u8 = @intCast(s.toUnsignedInt(pt)); + const s_u8: u8 = @intCast(s.toUnsignedInt(zcu)); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1203,7 +1203,7 @@ pub const DeclGen = struct { const comptime_val = tuple.values.get(ip)[field_index]; if (comptime_val != .none) continue; const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) try writer.writeByte(','); @@ -1238,7 +1238,7 @@ pub const DeclGen = struct { var need_comma = false; while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; @@ -1265,7 +1265,7 @@ pub const DeclGen = struct { for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; eff_num_fields += 1; } @@ -1273,7 +1273,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderUndefValue(writer, ty, location); try writer.writeByte(')'); - } else if (ty.bitSize(pt) > 64) { + } else if (ty.bitSize(zcu) > 64) { // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) var num_or = eff_num_fields - 1; while (num_or > 0) : (num_or -= 1) { @@ -1286,7 +1286,7 @@ pub const DeclGen = struct { var needs_closing_paren = false; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { .bytes => |bytes| try pt.intern(.{ .int = .{ @@ -1312,7 +1312,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset += field_ty.bitSize(pt); + bit_offset += field_ty.bitSize(zcu); needs_closing_paren = true; eff_index += 1; } @@ -1322,7 +1322,7 @@ pub const DeclGen = struct { var empty = true; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) try writer.writeAll(" | "); try writer.writeByte('('); @@ -1346,7 +1346,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, Value.fromInterned(field_val), .Other); } - bit_offset += field_ty.bitSize(pt); + bit_offset += field_ty.bitSize(zcu); empty = false; } try writer.writeByte(')'); @@ -1396,7 +1396,7 @@ pub const DeclGen = struct { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; if (loaded_union.flagsUnordered(ip).layout == .@"packed") { - if (field_ty.hasRuntimeBits(pt)) { + if (field_ty.hasRuntimeBits(zcu)) { if (field_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); try dg.renderCType(writer, ctype); @@ -1427,7 +1427,7 @@ pub const DeclGen = struct { ), .payload => { try writer.writeByte('{'); - if (field_ty.hasRuntimeBits(pt)) { + if (field_ty.hasRuntimeBits(zcu)) { try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))}); try dg.renderValue( writer, @@ -1439,7 +1439,7 @@ pub const DeclGen = struct { const inner_field_ty = Type.fromInterned( loaded_union.field_types.get(ip)[inner_field_index], ); - if (!inner_field_ty.hasRuntimeBits(pt)) continue; + if (!inner_field_ty.hasRuntimeBits(zcu)) continue; try dg.renderUndefValue(writer, inner_field_ty, initializer_type); break; } @@ -1588,7 +1588,7 @@ pub const DeclGen = struct { var need_comma = false; while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; @@ -1613,7 +1613,7 @@ pub const DeclGen = struct { for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; @@ -1651,7 +1651,7 @@ pub const DeclGen = struct { const inner_field_ty = Type.fromInterned( loaded_union.field_types.get(ip)[inner_field_index], ); - if (!inner_field_ty.hasRuntimeBits(pt)) continue; + if (!inner_field_ty.hasRuntimeBits(pt.zcu)) continue; try dg.renderUndefValue( writer, inner_field_ty, @@ -1902,7 +1902,8 @@ pub const DeclGen = struct { }; fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool { const pt = dg.pt; - const dest_bits = dest_ty.bitSize(pt); + const zcu = pt.zcu; + const dest_bits = dest_ty.bitSize(zcu); const dest_int_info = dest_ty.intInfo(pt.zcu); const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu); @@ -1911,7 +1912,7 @@ pub const DeclGen = struct { .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(pt); + const src_bits = src_eff_ty.bitSize(zcu); const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or @@ -1943,7 +1944,7 @@ pub const DeclGen = struct { ) !void { const pt = dg.pt; const zcu = pt.zcu; - const dest_bits = dest_ty.bitSize(pt); + const dest_bits = dest_ty.bitSize(zcu); const dest_int_info = dest_ty.intInfo(zcu); const src_is_ptr = src_ty.isPtrAtRuntime(zcu); @@ -1952,7 +1953,7 @@ pub const DeclGen = struct { .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(pt); + const src_bits = src_eff_ty.bitSize(zcu); const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or @@ -2033,7 +2034,7 @@ pub const DeclGen = struct { qualifiers, CType.AlignAs.fromAlignment(.{ .@"align" = alignment, - .abi = ty.abiAlignment(dg.pt), + .abi = ty.abiAlignment(dg.pt.zcu), }), ); } @@ -2239,9 +2240,10 @@ pub const DeclGen = struct { } const pt = dg.pt; - const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{ + const zcu = pt.zcu; + const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @as(u16, @intCast(ty.bitSize(pt))), + .bits = @as(u16, @intCast(ty.bitSize(zcu))), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); @@ -2891,7 +2893,7 @@ pub fn genDecl(o: *Object) !void { const nav = ip.getNav(o.dg.pass.nav); const nav_ty = Type.fromInterned(nav.typeOf(ip)); - if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return; + if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return; switch (ip.indexToKey(nav.status.resolved.val)) { .@"extern" => |@"extern"| { if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{ @@ -3420,10 +3422,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const zcu = f.object.dg.pt.zcu; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3453,7 +3455,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt); + const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3482,10 +3484,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const zcu = f.object.dg.pt.zcu; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3516,7 +3518,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const slice_ty = f.typeOf(bin_op.lhs); const elem_ty = slice_ty.elemType2(zcu); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu); const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3539,10 +3541,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const zcu = f.object.dg.pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3569,13 +3571,13 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), .alignas = CType.AlignAs.fromAlignment(.{ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(pt), + .abi = elem_ty.abiAlignment(zcu), }), }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); @@ -3588,13 +3590,13 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), .alignas = CType.AlignAs.fromAlignment(.{ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(pt), + .abi = elem_ty.abiAlignment(zcu), }), }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); @@ -3636,7 +3638,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_info = ptr_scalar_ty.ptrInfo(zcu); const src_ty = Type.fromInterned(ptr_info.child); - if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) { try reap(f, inst, &.{ty_op.operand}); return .none; } @@ -3646,7 +3648,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) + ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) else true; const is_array = lowersToArray(src_ty, pt); @@ -3674,7 +3676,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt)))); + const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu)))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3685,9 +3687,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(("); try f.renderType(writer, field_ty); try writer.writeByte(')'); - const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; + const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64; if (cant_cast) { - if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); @@ -3735,7 +3737,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const ret_val = if (is_array) ret_val: { const array_local = try f.allocAlignedLocal(inst, .{ .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)), }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); @@ -3926,7 +3928,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) + ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) else true; const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt); @@ -3976,7 +3978,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const src_bits = src_ty.bitSize(pt); + const src_bits = src_ty.bitSize(zcu); const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; var stack align(@alignOf(ExpectedContents)) = @@ -4006,9 +4008,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); - const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; + const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64; if (cant_cast) { - if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); @@ -4130,7 +4132,7 @@ fn airBinOp( const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(zcu); - if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat()) + if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); const lhs = try f.resolveInst(bin_op.lhs); @@ -4169,7 +4171,7 @@ fn airCmpOp( const lhs_ty = f.typeOf(data.lhs); const scalar_ty = lhs_ty.scalarType(zcu); - const scalar_bits = scalar_ty.bitSize(pt); + const scalar_bits = scalar_ty.bitSize(zcu); if (scalar_ty.isInt(zcu) and scalar_bits > 64) return airCmpBuiltinCall( f, @@ -4219,7 +4221,7 @@ fn airEquality( const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); - const operand_bits = operand_ty.bitSize(pt); + const operand_bits = operand_ty.bitSize(zcu); if (operand_ty.isAbiInt(zcu) and operand_bits > 64) return airCmpBuiltinCall( f, @@ -4312,7 +4314,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); const elem_ty = inst_scalar_ty.elemType2(zcu); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs); const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); const local = try f.allocLocal(inst, inst_ty); @@ -4351,7 +4353,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); - if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat()) + if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, .none); const lhs = try f.resolveInst(bin_op.lhs); @@ -4446,7 +4448,7 @@ fn airCall( if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) { const array_local = try f.allocAlignedLocal(inst, .{ .ctype = arg_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)), + .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)), }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); @@ -4493,7 +4495,7 @@ fn airCall( } else { const local = try f.allocAlignedLocal(inst, .{ .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)), }); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); @@ -4618,7 +4620,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst)) + const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else .none; @@ -4681,7 +4683,7 @@ fn lowerTry( const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu); if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { try writer.writeAll("if ("); @@ -4820,7 +4822,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal try writer.writeAll(", sizeof("); try f.renderType( writer, - if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty, + if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty, ); try writer.writeAll("));\n"); @@ -5030,7 +5032,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); const item_value = try f.air.value(item, pt); - if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{ + if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{ try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)), }) else { if (condition_ty.isPtrAtRuntime(zcu)) { @@ -5112,10 +5114,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: { + const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: { const inst_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(inst_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)), + .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)), }); if (f.wantSafety()) { try f.writeCValue(writer, inst_local, .Other); @@ -5148,7 +5150,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("register "); const output_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(output_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)), + .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)), }); try f.allocs.put(gpa, output_local.new_local, false); try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete); @@ -5183,7 +5185,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { if (is_reg) try writer.writeAll("register "); const input_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(input_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)), + .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)), }); try f.allocs.put(gpa, input_local.new_local, false); try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete); @@ -5526,9 +5528,9 @@ fn fieldLocation( .struct_type => { const loaded_struct = ip.loadStructType(container_ty.toIntern()); return switch (loaded_struct.layout) { - .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) + .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .begin - else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) + else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) .{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] } else .{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| @@ -5542,10 +5544,10 @@ fn fieldLocation( .begin, }; }, - .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) + .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .begin - else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) - .{ .byte_offset = container_ty.structFieldOffset(field_index, pt) } + else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) + .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) } else .{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| .{ .identifier = field_name.toSlice(ip) } @@ -5556,8 +5558,8 @@ fn fieldLocation( switch (loaded_union.flagsUnordered(ip).layout) { .auto, .@"extern" => { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) - return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt)) + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) + return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu)) .{ .field = .{ .identifier = "payload" } } else .begin; @@ -5706,7 +5708,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { try reap(f, inst, &.{extra.struct_operand}); return .none; } @@ -5738,7 +5740,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { inst_ty.intInfo(zcu).signedness else .unsigned; - const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt)))); + const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu)))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -5749,7 +5751,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { - if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); @@ -5857,7 +5859,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = error_union_ty.errorUnionPayload(zcu); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) { + if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) { // The store will be 'x = x'; elide it. return local; } @@ -5866,7 +5868,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!payload_ty.hasRuntimeBits(pt)) + if (!payload_ty.hasRuntimeBits(zcu)) try f.writeCValue(writer, operand, .Other) else if (error_ty.errorSetIsEmpty(zcu)) try writer.print("{}", .{ @@ -5892,7 +5894,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) { + if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5963,7 +5965,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(zcu); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu); const err_ty = inst_ty.errorUnionSet(zcu); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -6012,7 +6014,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); // First, set the non-error value. - if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete)); try f.writeCValueDeref(writer, operand); try a.assign(f, writer); @@ -6064,7 +6066,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(zcu); const payload = try f.resolveInst(ty_op.operand); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu); const err_ty = inst_ty.errorUnionSet(zcu); try reap(f, inst, &.{ty_op.operand}); @@ -6109,7 +6111,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try a.assign(f, writer); const err_int_ty = try pt.errorIntType(); if (!error_ty.errorSetIsEmpty(zcu)) - if (payload_ty.hasRuntimeBits(pt)) + if (payload_ty.hasRuntimeBits(zcu)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -6430,7 +6432,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); const repr_ty = if (ty.isRuntimeFloat()) - pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable else ty; @@ -6534,7 +6536,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8)); + const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8)); const is_float = ty.isRuntimeFloat(); const is_128 = repr_bits == 128; const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty; @@ -6585,7 +6587,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ty = ptr_ty.childType(zcu); const repr_ty = if (ty.isRuntimeFloat()) - pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable else ty; @@ -6626,7 +6628,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const repr_ty = if (ty.isRuntimeFloat()) - pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable else ty; @@ -6666,7 +6668,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(pt); + const elem_abi_size = elem_ty.abiSize(zcu); const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false; const writer = f.object.writer(); @@ -6831,7 +6833,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const union_ty = f.typeOf(bin_op.lhs).childType(zcu); - const layout = union_ty.unionGetLayout(pt); + const layout = union_ty.unionGetLayout(zcu); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety(zcu).?; @@ -6846,13 +6848,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const union_ty = f.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(pt); + const layout = union_ty.unionGetLayout(zcu); if (layout.tag_size == 0) return .none; const inst_ty = f.typeOfIndex(inst); @@ -6960,6 +6963,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -6978,7 +6982,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other); try writer.writeAll("] = "); - const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt); + const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu); const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); @@ -7001,7 +7005,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.typeOf(reduce.operand); const writer = f.object.writer(); - const use_operator = scalar_ty.bitSize(pt) <= 64; + const use_operator = scalar_ty.bitSize(zcu) <= 64; const op: union(enum) { const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; builtin: Func, @@ -7178,7 +7182,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { var field_it = loaded_struct.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| @@ -7203,7 +7207,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (0..elements.len) |field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) { try writer.writeAll("zig_or_"); @@ -7216,7 +7220,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (resolved_elements, 0..) |element, field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) try writer.writeAll(", "); // TODO: Skip this entire shift if val is 0? @@ -7248,7 +7252,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset += field_ty.bitSize(pt); + bit_offset += field_ty.bitSize(zcu); empty = false; } try writer.writeAll(";\n"); @@ -7258,7 +7262,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| @@ -7294,7 +7298,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload); const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: { - const layout = union_ty.unionGetLayout(pt); + const layout = union_ty.unionGetLayout(zcu); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name, zcu).?; const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); @@ -7818,7 +7822,7 @@ fn formatIntLiteral( }; undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, pt); + } else data.val.toBigInt(&int_buf, zcu); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8); @@ -8062,9 +8066,10 @@ const Vectorize = struct { }; fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool { - return switch (ty.zigTypeTag(pt.zcu)) { + const zcu = pt.zcu; + return switch (ty.zigTypeTag(zcu)) { .Array, .Vector => return true, - else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null, + else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null, }; } -- cgit v1.2.3 From 80cd53d3bbf5cdc82715a4400592b40fb93cd5c9 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 11 Aug 2024 19:28:42 -0700 Subject: sema: clean-up `{union,struct}FieldAlignment` and friends My main gripes with this design were that it was incorrectly namespaced, the naming was inconsistent and a bit wrong (`fooAlign` vs `fooAlignment`). This commit moves all the logic from `PerThread.zig` to use the zcu + tid system that the previous couple commits introduce. I've organized and merged the functions to be a bit more specific to their own purpose. - `fieldAlignment` takes a struct or union type, an index, and a Zcu (or the Sema version which takes a Pt), and gives you the alignment of the field at the index. - `structFieldAlignment` takes the field type itself, and provides the logic to handle special cases, such as externs. A design goal I had in mind was to avoid using the word 'struct' in the function name, when it worked for things that aren't structs, such as unions. --- src/Sema.zig | 110 ++++++++++++++------------------- src/Sema/bitcast.zig | 6 +- src/Sema/comptime_ptr_access.zig | 4 +- src/Type.zig | 127 +++++++++++++++++++++++++-------------- src/Value.zig | 20 +++--- src/Zcu/PerThread.zig | 32 ---------- src/arch/aarch64/CodeGen.zig | 6 +- src/arch/aarch64/abi.zig | 4 +- src/arch/arm/CodeGen.zig | 6 +- src/arch/arm/abi.zig | 8 +-- src/arch/riscv64/CodeGen.zig | 6 +- src/arch/riscv64/abi.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/wasm/CodeGen.zig | 4 +- src/arch/wasm/abi.zig | 2 +- src/arch/x86_64/CodeGen.zig | 30 ++++----- src/codegen/c.zig | 4 +- src/codegen/llvm.zig | 38 ++++-------- src/codegen/spirv.zig | 2 +- src/mutable_value.zig | 2 +- 20 files changed, 194 insertions(+), 223 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Sema.zig b/src/Sema.zig index 5e30315233..2ba3450966 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4887,7 +4887,7 @@ fn validateStructInit( const i: u32 = @intCast(i_usize); if (opt_field_ptr.unwrap()) |field_ptr| { // Determine whether the value stored to this pointer is comptime-known. - const field_ty = struct_ty.structFieldType(i, zcu); + const field_ty = struct_ty.fieldType(i, zcu); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { field_values[i] = opv.toIntern(); continue; @@ -4999,7 +4999,7 @@ fn validateStructInit( var block_index = first_block_index; for (block.instructions.items[first_block_index..]) |cur_inst| { while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) { - const field_ty = struct_ty.structFieldType(field_indices[init_index], zcu); + const field_ty = struct_ty.fieldType(field_indices[init_index], zcu); if (try field_ty.onePossibleValue(pt)) |_| continue; field_ptr_ref = sema.inst_map.get(instrs[init_index]).?; } @@ -8430,7 +8430,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil try indexable_ty.resolveFields(pt); assert(indexable_ty.isIndexable(zcu)); // validated by a previous instruction if (indexable_ty.zigTypeTag(zcu) == .Struct) { - const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), zcu); + const elem_type = indexable_ty.fieldType(@intFromEnum(bin.rhs), zcu); return Air.internedToRef(elem_type.toIntern()); } else { const elem_type = indexable_ty.elemType2(zcu); @@ -14419,7 +14419,7 @@ fn analyzeTupleCat( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i, zcu).toIntern(); + types[i] = lhs_ty.fieldType(i, zcu).toIntern(); const default_val = lhs_ty.structFieldDefaultValue(i, zcu); values[i] = default_val.toIntern(); const operand_src = block.src(.{ .array_cat_lhs = .{ @@ -14433,7 +14433,7 @@ fn analyzeTupleCat( } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i, zcu).toIntern(); + types[i + lhs_len] = rhs_ty.fieldType(i, zcu).toIntern(); const default_val = rhs_ty.structFieldDefaultValue(i, zcu); values[i + lhs_len] = default_val.toIntern(); const operand_src = block.src(.{ .array_cat_rhs = .{ @@ -14791,7 +14791,7 @@ fn analyzeTupleMul( const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (0..tuple_len) |i| { - types[i] = operand_ty.structFieldType(i, zcu).toIntern(); + types[i] = operand_ty.fieldType(i, zcu).toIntern(); values[i] = operand_ty.structFieldDefaultValue(i, zcu).toIntern(); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = src_node, @@ -18466,13 +18466,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const alignment = switch (layout) { - .auto, .@"extern" => try Type.unionFieldNormalAlignmentAdvanced( - union_obj, - @intCast(field_index), - .sema, - pt.zcu, - pt.tid, - ), + .auto, .@"extern" => try ty.fieldAlignmentSema(field_index, pt), .@"packed" => .none, }; @@ -18691,12 +18685,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, - else => try field_ty.structFieldAlignmentAdvanced( + else => try field_ty.structFieldAlignmentSema( struct_type.fieldAlign(ip, field_index), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ), }; @@ -20327,7 +20319,7 @@ fn zirStructInit( assert(field_inits[field_index] == .none); found_fields[field_index] = item.data.field_type; const uncoerced_init = try sema.resolveInst(item.data.init); - const field_ty = resolved_ty.structFieldType(field_index, zcu); + const field_ty = resolved_ty.fieldType(field_index, zcu); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { try resolved_ty.resolveStructFieldInits(pt); @@ -20338,7 +20330,7 @@ fn zirStructInit( }); }; - if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, zcu), zcu)) { + if (!init_val.eql(default_value, resolved_ty.fieldType(field_index, zcu), zcu)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } } @@ -20799,7 +20791,7 @@ fn zirArrayInit( const arg = args[i + 1]; const resolved_arg = try sema.resolveInst(arg); const elem_ty = if (is_tuple) - array_ty.structFieldType(i, zcu) + array_ty.fieldType(i, zcu) else array_ty.elemType2(zcu); dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); @@ -20862,7 +20854,7 @@ fn zirArrayInit( if (is_tuple) { for (resolved_args, 0..) |arg, i| { const elem_ptr_ty = try pt.ptrTypeSema(.{ - .child = array_ty.structFieldType(i, zcu).toIntern(), + .child = array_ty.fieldType(i, zcu).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); @@ -25234,7 +25226,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, .packed_offset = parent_ptr_info.packed_offset, }; - const field_ty = parent_ty.structFieldType(field_index, zcu); + const field_ty = parent_ty.fieldType(field_index, zcu); var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ @@ -25249,19 +25241,17 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( - if (zcu.typeToStruct(parent_ty)) |struct_obj| try field_ty.structFieldAlignmentAdvanced( - struct_obj.fieldAlign(ip, field_index), - struct_obj.layout, - .sema, - pt.zcu, - pt.tid, - ) else if (zcu.typeToUnion(parent_ty)) |union_obj| - try Type.unionFieldNormalAlignmentAdvanced( - union_obj, - field_index, - .sema, - pt.zcu, - pt.tid, + if (zcu.typeToStruct(parent_ty)) |struct_obj| + try field_ty.structFieldAlignmentSema( + struct_obj.fieldAlign(ip, field_index), + struct_obj.layout, + pt, + ) + else if (zcu.typeToUnion(parent_ty)) |union_obj| + try field_ty.unionFieldAlignmentSema( + union_obj.fieldAlign(ip, field_index), + union_obj.flagsUnordered(ip).layout, + pt, ) else actual_field_ptr_info.flags.alignment, @@ -28035,14 +28025,14 @@ fn fieldCallBind( } if (field_name.toUnsigned(ip)) |field_index| { if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field; - return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, zcu), field_index, object_ptr); + return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.fieldType(field_index, zcu), field_index, object_ptr); } } else { const max = concrete_ty.structFieldCount(zcu); for (0..max) |i_usize| { const i: u32 = @intCast(i_usize); if (field_name == concrete_ty.structFieldName(i, zcu).unwrap().?) { - return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, zcu), i, object_ptr); + return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.fieldType(i, zcu), i, object_ptr); } } } @@ -28340,12 +28330,10 @@ fn structFieldPtrByIndex( @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. - const field_align = try Type.fromInterned(field_ty).structFieldAlignmentAdvanced( + const field_align = try Type.fromInterned(field_ty).structFieldAlignmentSema( struct_type.fieldAlign(ip, field_index), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ); ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none) field_align @@ -28477,7 +28465,7 @@ fn tupleFieldValByIndex( ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; - const field_ty = tuple_ty.structFieldType(field_index, zcu); + const field_ty = tuple_ty.fieldType(field_index, zcu); if (tuple_ty.structFieldIsComptime(field_index, zcu)) try tuple_ty.resolveStructFieldInits(pt); @@ -28538,13 +28526,7 @@ fn unionFieldPtr( union_ptr_info.flags.alignment else try union_ty.abiAlignmentSema(pt); - const field_align = try Type.unionFieldNormalAlignmentAdvanced( - union_obj, - field_index, - .sema, - pt.zcu, - pt.tid, - ); + const field_align = try union_ty.fieldAlignmentSema(field_index, pt); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, @@ -28921,7 +28903,7 @@ fn tupleFieldPtr( }); } - const field_ty = tuple_ty.structFieldType(field_index, zcu); + const field_ty = tuple_ty.fieldType(field_index, zcu); const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ @@ -28979,7 +28961,7 @@ fn tupleField( }); } - const field_ty = tuple_ty.structFieldType(field_index, zcu); + const field_ty = tuple_ty.fieldType(field_index, zcu); if (tuple_ty.structFieldIsComptime(field_index, zcu)) try tuple_ty.resolveStructFieldInits(pt); @@ -30615,9 +30597,9 @@ pub fn coerceInMemoryAllowed( const field_count = dest_ty.structFieldCount(zcu); for (0..field_count) |field_idx| { if (dest_ty.structFieldIsComptime(field_idx, zcu) != src_ty.structFieldIsComptime(field_idx, zcu)) break :tuple; - if (dest_ty.structFieldAlign(field_idx, zcu) != src_ty.structFieldAlign(field_idx, zcu)) break :tuple; - const dest_field_ty = dest_ty.structFieldType(field_idx, zcu); - const src_field_ty = src_ty.structFieldType(field_idx, zcu); + if (dest_ty.fieldAlignment(field_idx, zcu) != src_ty.fieldAlignment(field_idx, zcu)) break :tuple; + const dest_field_ty = dest_ty.fieldType(field_idx, zcu); + const src_field_ty = src_ty.fieldType(field_idx, zcu); const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src, null); if (field != .ok) break :tuple; } @@ -35073,7 +35055,7 @@ fn resolvePeerTypesInner( peer_field_val.* = null; continue; }; - peer_field_ty.* = ty.structFieldType(field_index, zcu); + peer_field_ty.* = ty.fieldType(field_index, zcu); peer_field_val.* = if (opt_val) |val| try val.fieldValue(pt, field_index) else null; } @@ -35095,7 +35077,7 @@ fn resolvePeerTypesInner( // Already-resolved types won't be referenced by the error so it's fine // to leave them undefined. const ty = opt_ty orelse continue; - peer_field_ty.* = ty.structFieldType(field_index, zcu); + peer_field_ty.* = ty.fieldType(field_index, zcu); } return .{ .field_error = .{ @@ -35220,9 +35202,9 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { .elem_ty = Type.noreturn, }; if (!ty.isTuple(zcu)) return null; - const elem_ty = ty.structFieldType(0, zcu); + const elem_ty = ty.fieldType(0, zcu); for (1..field_count) |i| { - if (!ty.structFieldType(i, zcu).eql(elem_ty, zcu)) { + if (!ty.fieldType(i, zcu).eql(elem_ty, zcu)) { return null; } } @@ -35309,12 +35291,10 @@ pub fn resolveStructAlignment( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) continue; - const field_align = try field_ty.structFieldAlignmentAdvanced( + const field_align = try field_ty.structFieldAlignmentSema( struct_type.fieldAlign(ip, i), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ); alignment = alignment.maxStrict(field_align); } @@ -35375,12 +35355,10 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { }, else => return err, }; - field_align.* = try field_ty.structFieldAlignmentAdvanced( + field_align.* = try field_ty.structFieldAlignmentSema( struct_type.fieldAlign(ip, i), struct_type.layout, - .sema, - pt.zcu, - pt.tid, + pt, ); big_align = big_align.maxStrict(field_align.*); } diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 73aa53e5e6..04229532fc 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -542,7 +542,7 @@ const PackValueBits = struct { while (it.next()) |field_idx| { const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8; try pack.padding(want_bit_off - cur_bit_off); - const field_ty = ty.structFieldType(field_idx, zcu); + const field_ty = ty.fieldType(field_idx, zcu); elems[field_idx] = (try pack.get(field_ty)).toIntern(); cur_bit_off = want_bit_off + field_ty.bitSize(zcu); } @@ -552,7 +552,7 @@ const PackValueBits = struct { var cur_bit_off: u64 = ty.bitSize(zcu); var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip); while (it.next()) |field_idx| { - const field_ty = ty.structFieldType(field_idx, zcu); + const field_ty = ty.fieldType(field_idx, zcu); const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu); try pack.padding(cur_bit_off - want_bit_off); elems[field_idx] = (try pack.get(field_ty)).toIntern(); @@ -578,7 +578,7 @@ const PackValueBits = struct { // This is identical between LE and BE targets. const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu)); for (elems, 0..) |*elem, i| { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); elem.* = (try pack.get(field_ty)).toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index 8549e32d2b..893ea6db36 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -451,7 +451,7 @@ fn loadComptimePtrInner( .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { const start_off = cur_ty.structFieldOffset(field_idx, zcu); - const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt); + const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { cur_val = try cur_val.getElem(sema.pt, field_idx); cur_offset -= start_off; @@ -873,7 +873,7 @@ fn prepareComptimePtrStore( .@"packed" => break, // let the bitcast logic handle this .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { const start_off = cur_ty.structFieldOffset(field_idx, zcu); - const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt); + const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { cur_val = try cur_val.elem(pt, sema.arena, field_idx); cur_offset -= start_off; diff --git a/src/Type.zig b/src/Type.zig index 5a47fa9527..16806af49d 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3191,8 +3191,8 @@ pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 { }; } -/// Supports structs and unions. -pub fn structFieldType(ty: Type, index: usize, zcu: *const Zcu) Type { +/// Returns the field type. Supports structs and unions. +pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), @@ -3205,17 +3205,26 @@ pub fn structFieldType(ty: Type, index: usize, zcu: *const Zcu) Type { }; } -pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, .normal, zcu, {}) catch unreachable; +pub fn fieldAlignment(ty: Type, index: usize, zcu: *Zcu) Alignment { + return ty.fieldAlignmentInner(index, .normal, zcu, {}) catch unreachable; +} + +pub fn fieldAlignmentSema(ty: Type, index: usize, pt: Zcu.PerThread) SemaError!Alignment { + return try ty.fieldAlignmentInner(index, .sema, pt.zcu, pt.tid); } -pub fn structFieldAlignAdvanced( +/// Returns the field alignment. Supports structs and unions. +/// If `strat` is `.sema`, may perform type resolution. +/// Asserts the layout is not packed. +/// +/// Provide the struct field as the `ty`. +pub fn fieldAlignmentInner( ty: Type, index: usize, comptime strat: ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) !Alignment { +) SemaError!Alignment { const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .struct_type => { @@ -3223,13 +3232,7 @@ pub fn structFieldAlignAdvanced( assert(struct_type.layout != .@"packed"); const explicit_align = struct_type.fieldAlign(ip, index); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - return field_ty.structFieldAlignmentAdvanced( - explicit_align, - struct_type.layout, - strat, - zcu, - tid, - ); + return field_ty.structFieldAlignmentInner(explicit_align, struct_type.layout, strat, zcu, tid); }, .anon_struct_type => |anon_struct| { return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentInner( @@ -3240,28 +3243,62 @@ pub fn structFieldAlignAdvanced( }, .union_type => { const union_obj = ip.loadUnionType(ty.toIntern()); - return unionFieldNormalAlignmentAdvanced( - union_obj, - @intCast(index), - strat, - zcu, - tid, - ); + const layout = union_obj.flagsUnordered(ip).layout; + assert(layout != .@"packed"); + const explicit_align = union_obj.fieldAlign(ip, index); + const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]); + return field_ty.unionFieldAlignmentInner(explicit_align, layout, strat, zcu, tid); }, else => unreachable, } } -/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. -/// If `strat` is `.sema`, may perform type resolution. -pub fn structFieldAlignmentAdvanced( +/// Returns the alignment of a non-packed struct field. Assert the layout is not packed. +/// +/// Asserts that all resolution needed was done. +pub fn structFieldAlignment( field_ty: Type, explicit_alignment: InternPool.Alignment, layout: std.builtin.Type.ContainerLayout, + zcu: *Zcu, +) Alignment { + return field_ty.structFieldAlignmentInner( + explicit_alignment, + layout, + .normal, + zcu, + {}, + ) catch unreachable; +} + +/// Returns the alignment of a non-packed struct field. Assert the layout is not packed. +/// May do type resolution when needed. +/// Asserts that all resolution needed was done. +pub fn structFieldAlignmentSema( + field_ty: Type, + explicit_alignment: InternPool.Alignment, + layout: std.builtin.Type.ContainerLayout, + pt: Zcu.PerThread, +) SemaError!Alignment { + return try field_ty.structFieldAlignmentInner( + explicit_alignment, + layout, + .sema, + pt.zcu, + pt.tid, + ); +} + +/// Returns the alignment of a non-packed struct field. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn structFieldAlignmentInner( + field_ty: Type, + explicit_alignment: Alignment, + layout: std.builtin.Type.ContainerLayout, comptime strat: Type.ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) Zcu.SemaError!InternPool.Alignment { +) SemaError!Alignment { assert(layout != .@"packed"); if (explicit_alignment != .none) return explicit_alignment; const ty_abi_align = (try field_ty.abiAlignmentInner( @@ -3281,29 +3318,31 @@ pub fn structFieldAlignmentAdvanced( return ty_abi_align; } -/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. -pub fn unionFieldNormalAlignment( - loaded_union: InternPool.LoadedUnionType, - field_index: u32, - zcu: *Zcu, -) InternPool.Alignment { - return unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal, zcu, {}) catch unreachable; +pub fn unionFieldAlignmentSema( + field_ty: Type, + explicit_alignment: Alignment, + layout: std.builtin.Type.ContainerLayout, + pt: Zcu.PerThread, +) SemaError!Alignment { + return field_ty.unionFieldAlignmentInner( + explicit_alignment, + layout, + .sema, + pt.zcu, + pt.tid, + ); } -/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. -/// If `strat` is `.sema`, may perform type resolution. -pub fn unionFieldNormalAlignmentAdvanced( - loaded_union: InternPool.LoadedUnionType, - field_index: u32, +pub fn unionFieldAlignmentInner( + field_ty: Type, + explicit_alignment: Alignment, + layout: std.builtin.Type.ContainerLayout, comptime strat: Type.ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) Zcu.SemaError!InternPool.Alignment { - const ip = &zcu.intern_pool; - assert(loaded_union.flagsUnordered(ip).layout != .@"packed"); - const field_align = loaded_union.fieldAlign(ip, field_index); - if (field_align != .none) return field_align; - const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); +) SemaError!Alignment { + assert(layout != .@"packed"); + if (explicit_alignment != .none) return explicit_alignment; if (field_ty.isNoReturn(zcu)) return .none; return (try field_ty.abiAlignmentInner(strat.toLazy(), zcu, tid)).scalar; } @@ -3608,12 +3647,12 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: const zcu = pt.zcu; const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); - const field_ty = struct_ty.structFieldType(field_idx, zcu); + const field_ty = struct_ty.fieldType(field_idx, zcu); var bit_offset: u16 = 0; var running_bits: u16 = 0; for (0..struct_ty.structFieldCount(zcu)) |i| { - const f_ty = struct_ty.structFieldType(i, zcu); + const f_ty = struct_ty.fieldType(i, zcu); if (i == field_idx) { bit_offset = running_bits; } diff --git a/src/Value.zig b/src/Value.zig index 7dfb83ff65..fd9fc5d51a 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -1414,7 +1414,7 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value { const zcu = pt.zcu; return switch (zcu.intern_pool.indexToKey(val.toIntern())) { .undef => |ty| Value.fromInterned(try pt.intern(.{ - .undef = Type.fromInterned(ty).structFieldType(index, zcu).toIntern(), + .undef = Type.fromInterned(ty).fieldType(index, zcu).toIntern(), })), .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) { .bytes => |bytes| try pt.intern(.{ .int = .{ @@ -3810,9 +3810,9 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value { // `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily. const field_ty: Type, const field_align: InternPool.Alignment = switch (aggregate_ty.zigTypeTag(zcu)) { .Struct => field: { - const field_ty = aggregate_ty.structFieldType(field_idx, zcu); + const field_ty = aggregate_ty.fieldType(field_idx, zcu); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) }, + .auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) }, .@"extern" => { // Well-defined layout, so just offset the pointer appropriately. const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); @@ -3863,7 +3863,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value { const union_obj = zcu.typeToUnion(aggregate_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) }, + .auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) }, .@"extern" => { // Point to the same address. const result_ty = try pt.ptrTypeSema(info: { @@ -4198,14 +4198,14 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh const base_ptr_ty = base_ptr.typeOf(zcu); const agg_ty = base_ptr_ty.childType(zcu); const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) { - .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced( - @intCast(field.index), + .Struct => .{ agg_ty.fieldType(field.index, zcu), try agg_ty.fieldAlignmentInner( + field.index, if (have_sema) .sema else .normal, pt.zcu, if (have_sema) pt.tid else {}, ) }, - .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced( - @intCast(field.index), + .Union => .{ agg_ty.unionFieldTypeByIndex(field.index, zcu), try agg_ty.fieldAlignmentInner( + field.index, if (have_sema) .sema else .normal, pt.zcu, if (have_sema) pt.tid else {}, @@ -4344,7 +4344,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh .Struct => switch (cur_ty.containerLayout(zcu)) { .auto, .@"packed" => break, .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| { - const field_ty = cur_ty.structFieldType(field_idx, zcu); + const field_ty = cur_ty.fieldType(field_idx, zcu); const start_off = cur_ty.structFieldOffset(field_idx, zcu); const end_off = start_off + field_ty.abiSize(zcu); if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) { @@ -4401,7 +4401,7 @@ pub fn resolveLazy( .u64, .i64, .big_int => return val, .lazy_align, .lazy_size => return pt.intValue( Type.fromInterned(int.ty), - (try val.getUnsignedIntInner(.sema, pt.zcu, pt.tid)).?, + try val.toUnsignedIntSema(pt), ), }, .slice => |slice| { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index ab9e6bbabb..29de95038c 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -3040,38 +3040,6 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 { } } -/// Returns 0 if the union is represented with 0 bits at runtime. -pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment { - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); - var max_align: InternPool.Alignment = .none; - if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(zcu); - for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; - - const field_align = zcu.unionFieldNormalAlignment(loaded_union, @intCast(field_index)); - max_align = max_align.max(field_align); - } - return max_align; -} - -/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. -pub fn structFieldAlignment( - pt: Zcu.PerThread, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, -) InternPool.Alignment { - return field_ty.structFieldAlignmentAdvanced( - explicit_alignment, - layout, - .normal, - pt.zcu, - {}, - ) catch unreachable; -} - /// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets /// into the packed struct InternPool data rather than computing this on the /// fly, however it was found to perform worse when measured on real world diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index f8d998ebe5..844a3e584a 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4144,7 +4144,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const zcu = pt.zcu; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_ty = struct_ty.structFieldType(index, zcu); + const struct_field_ty = struct_ty.fieldType(index, zcu); const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu))); switch (mcv) { @@ -5473,10 +5473,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0, zcu); + const wrapped_ty = ty.fieldType(0, zcu); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1, zcu); + const overflow_bit_ty = ty.fieldType(1, zcu); const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu))); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index ef3f9e7acd..b3926b8cc1 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -95,7 +95,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 { var count: u8 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); const field_count = countFloats(field_ty, zcu, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; @@ -130,7 +130,7 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type { const fields_len = ty.structFieldCount(zcu); var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); if (getFloatArrayType(field_ty, zcu)) |some| return some; } return null; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cea6d7d43e..6549868fa5 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2926,7 +2926,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, zcu)); - const struct_field_ty = struct_ty.structFieldType(index, zcu); + const struct_field_ty = struct_ty.fieldType(index, zcu); switch (mcv) { .dead, .unreach => unreachable, @@ -5434,10 +5434,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0, zcu); + const wrapped_ty = ty.fieldType(0, zcu); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); - const overflow_bit_ty = ty.structFieldType(1, zcu); + const overflow_bit_ty = ty.fieldType(1, zcu); const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, zcu)); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index ff3c40cb09..718350164c 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -44,8 +44,8 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class { const fields = ty.structFieldCount(zcu); var i: u32 = 0; while (i < fields) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); - const field_alignment = ty.structFieldAlign(i, zcu); + const field_ty = ty.fieldType(i, zcu); + const field_alignment = ty.fieldAlignment(i, zcu); const field_size = field_ty.bitSize(zcu); if (field_size > 32 or field_alignment.compare(.gt, .@"32")) { return Class.arrSize(bit_size, 64); @@ -66,7 +66,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class { for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| { if (Type.fromInterned(field_ty).bitSize(zcu) > 32 or - Type.unionFieldNormalAlignment(union_obj, @intCast(field_index), zcu).compare(.gt, .@"32")) + ty.fieldAlignment(field_index, zcu).compare(.gt, .@"32")) { return Class.arrSize(bit_size, 64); } @@ -141,7 +141,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 { var count: u32 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i, zcu); + const field_ty = ty.fieldType(i, zcu); const field_count = countFloats(field_ty, zcu, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 2c6535ac4f..7028844779 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -4576,7 +4576,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void { const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { const src_mcv = try func.resolveInst(operand); const struct_ty = func.typeOf(operand); - const field_ty = struct_ty.structFieldType(index, zcu); + const field_ty = struct_ty.fieldType(index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; const field_off: u32 = switch (struct_ty.containerLayout(zcu)) { @@ -7882,7 +7882,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { const elem_i: u32 = @intCast(elem_i_usize); if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); if (elem_bit_size > 64) { return func.fail( @@ -7916,7 +7916,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void { } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); const elem_mcv = try func.resolveInst(elem); try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv); diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 36e72dd8da..5e8f57cc0b 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -26,7 +26,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class { var any_fp = false; var field_count: usize = 0; for (0..ty.structFieldCount(zcu)) |field_index| { - const field_ty = ty.structFieldType(field_index, zcu); + const field_ty = ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (field_ty.isRuntimeFloat()) any_fp = true diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0d59814fda..a5bd92a9d6 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3980,10 +3980,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0, zcu); + const wrapped_ty = ty.fieldType(0, zcu); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1, zcu); + const overflow_bit_ty = ty.fieldType(1, zcu); const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu))); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index e728a99d88..49732a387b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3954,7 +3954,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index, zcu); + const field_ty = struct_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); const result: WValue = switch (struct_ty.containerLayout(zcu)) { @@ -5378,7 +5378,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (elements, 0..) |elem, elem_index| { if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue; - const elem_ty = result_ty.structFieldType(elem_index, zcu); + const elem_ty = result_ty.fieldType(elem_index, zcu); const field_offset = result_ty.structFieldOffset(elem_index, zcu); _ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify); prev_field_offset = field_offset; diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 9a5bbb0ca2..b6e66ad85e 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -108,7 +108,7 @@ pub fn scalarType(ty: Type, zcu: *Zcu) Type { return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu); } else { assert(ty.structFieldCount(zcu) == 1); - return scalarType(ty.structFieldType(0, zcu), zcu); + return scalarType(ty.fieldType(0, zcu), zcu); } }, .Union => { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 94814b70f6..68e3936d8e 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4352,14 +4352,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .eflags = cc }, .{}, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), partial_mcv, .{}, ); @@ -4392,7 +4392,7 @@ fn genSetFrameTruncatedOverflowCompare( }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const ty = tuple_ty.structFieldType(0, zcu); + const ty = tuple_ty.fieldType(0, zcu); const int_info = ty.intInfo(zcu); const hi_bits = (int_info.bits - 1) % 64 + 1; @@ -4450,7 +4450,7 @@ fn genSetFrameTruncatedOverflowCompare( try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, .{}, ); @@ -4637,7 +4637,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), result, .{}, ); @@ -4649,7 +4649,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .eflags = .ne }, .{}, ); @@ -4761,14 +4761,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), .{ .register_pair = .{ .rax, .rdx } }, .{}, ); try self.genSetMem( .{ .frame = dst_mcv.load_frame.index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .register = tmp_regs[1] }, .{}, ); @@ -4816,14 +4816,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(0, zcu)), - tuple_ty.structFieldType(0, zcu), + tuple_ty.fieldType(0, zcu), partial_mcv, .{}, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(tuple_ty.structFieldOffset(1, zcu)), - tuple_ty.structFieldType(1, zcu), + tuple_ty.fieldType(1, zcu), .{ .immediate = 0 }, // cc being set is impossible .{}, ); @@ -8143,7 +8143,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const container_ty = self.typeOf(operand); const container_rc = self.regClassForType(container_ty); - const field_ty = container_ty.structFieldType(index, zcu); + const field_ty = container_ty.fieldType(index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; const field_rc = self.regClassForType(field_ty); const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp); @@ -15273,14 +15273,14 @@ fn genSetMem( try self.genSetMem( base, disp + @as(i32, @intCast(ty.structFieldOffset(0, zcu))), - ty.structFieldType(0, zcu), + ty.fieldType(0, zcu), .{ .register = ro.reg }, opts, ); try self.genSetMem( base, disp + @as(i32, @intCast(ty.structFieldOffset(1, zcu))), - ty.structFieldType(1, zcu), + ty.fieldType(1, zcu), .{ .eflags = ro.eflags }, opts, ); @@ -18150,7 +18150,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_i: u32 = @intCast(elem_i_usize); if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu)); if (elem_bit_size > 64) { return self.fail( @@ -18232,7 +18232,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i, zcu); + const elem_ty = result_ty.fieldType(elem_i, zcu); const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d188435c3e..754286d80b 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -7206,7 +7206,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { var empty = true; for (0..elements.len) |field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = inst_ty.structFieldType(field_index, zcu); + const field_ty = inst_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) { @@ -7219,7 +7219,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { empty = true; for (resolved_elements, 0..) |element, field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = inst_ty.structFieldType(field_index, zcu); + const field_ty = inst_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) try writer.writeAll(", "); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5256442561..2d989f81e2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2496,16 +2496,10 @@ pub const Object = struct { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const field_size = field_ty.abiSize(zcu); - const field_align = pt.structFieldAlignment( - struct_type.fieldAlign(ip, field_index), - field_ty, - struct_type.layout, - ); + const field_align = ty.fieldAlignment(field_index, zcu); const field_offset = ty.structFieldOffset(field_index, zcu); - const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); - fields.appendAssumeCapacity(try o.builder.debugMemberType( try o.builder.metadataString(field_name.toSlice(ip)), .none, // File @@ -2598,7 +2592,7 @@ pub const Object = struct { const field_size = Type.fromInterned(field_ty).abiSize(zcu); const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) { .@"packed" => .none, - .auto, .@"extern" => Type.unionFieldNormalAlignment(union_type, @intCast(field_index), zcu), + .auto, .@"extern" => ty.fieldAlignment(field_index, zcu), }; const field_name = tag_type.names.get(ip)[field_index]; @@ -3315,11 +3309,7 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = pt.structFieldAlignment( - struct_type.fieldAlign(ip, field_index), - field_ty, - struct_type.layout, - ); + const field_align = t.fieldAlignment(field_index, zcu); const field_ty_align = field_ty.abiAlignment(zcu); if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed"; big_align = big_align.max(field_align); @@ -4127,11 +4117,7 @@ pub const Object = struct { var field_it = struct_type.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = pt.structFieldAlignment( - struct_type.fieldAlign(ip, field_index), - field_ty, - struct_type.layout, - ); + const field_align = ty.fieldAlignment(field_index, zcu); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -6528,7 +6514,7 @@ pub const FuncGen = struct { const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index, zcu); + const field_ty = struct_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none; if (!isByRef(struct_ty, zcu)) { @@ -6590,7 +6576,7 @@ pub const FuncGen = struct { const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; const field_ptr = try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const alignment = struct_ty.structFieldAlign(field_index, zcu); + const alignment = struct_ty.fieldAlignment(field_index, zcu); const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = alignment }, @@ -7471,8 +7457,8 @@ pub const FuncGen = struct { assert(self.err_ret_trace != .none); const field_ptr = try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, ""); - const field_alignment = struct_ty.structFieldAlign(field_index, zcu); - const field_ty = struct_ty.structFieldType(field_index, zcu); + const field_alignment = struct_ty.fieldAlignment(field_index, zcu); + const field_ty = struct_ty.fieldType(field_index, zcu); const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_alignment }, @@ -10080,7 +10066,7 @@ pub const FuncGen = struct { const field_ptr_ty = try pt.ptrType(.{ .child = self.typeOf(elem).toIntern(), .flags = .{ - .alignment = result_ty.structFieldAlign(i, zcu), + .alignment = result_ty.fieldAlignment(i, zcu), }, }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .none); @@ -10185,7 +10171,7 @@ pub const FuncGen = struct { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const field_llvm_ty = try o.lowerType(field_ty); const field_size = field_ty.abiSize(zcu); - const field_align = Type.unionFieldNormalAlignment(union_obj, extra.field_index, zcu); + const field_align = union_ty.fieldAlignment(extra.field_index, zcu); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); @@ -11188,7 +11174,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu var types_len: usize = 0; var types: [8]Builder.Type = undefined; for (0..return_type.structFieldCount(zcu)) |field_index| { - const field_ty = return_type.structFieldType(field_index, zcu); + const field_ty = return_type.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; types[types_len] = try o.lowerType(field_ty); types_len += 1; @@ -11444,7 +11430,7 @@ const ParamTypeIterator = struct { .fields => { it.types_len = 0; for (0..ty.structFieldCount(zcu)) |field_index| { - const field_ty = ty.structFieldType(field_index, zcu); + const field_ty = ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; it.types_buffer[it.types_len] = try it.object.lowerType(field_ty); it.types_len += 1; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 44b48efc43..adab565508 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -5148,7 +5148,7 @@ const NavGen = struct { const object_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = object_ty.structFieldType(field_index, zcu); + const field_ty = object_ty.fieldType(field_index, zcu); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null; diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 5fe43105f4..9fcac259df 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -223,7 +223,7 @@ pub const MutableValue = union(enum) { @memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem }); }, .Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| { - const field_ty = ty.structFieldType(i, zcu).toIntern(); + const field_ty = ty.fieldType(i, zcu).toIntern(); mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) }; }, else => unreachable, -- cgit v1.2.3