diff options
| author | Jacob Young <jacobly0@users.noreply.github.com> | 2024-06-15 16:10:53 -0400 |
|---|---|---|
| committer | Jacob Young <jacobly0@users.noreply.github.com> | 2024-07-07 22:59:52 -0400 |
| commit | 525f341f33af9b8aad53931fd5511f00a82cb090 (patch) | |
| tree | cec3280498c1122858580946ac5e31f8feb807ce /src/codegen | |
| parent | 8f20e81b8816aadd8ceb1b04bd3727cc1d124464 (diff) | |
| download | zig-525f341f33af9b8aad53931fd5511f00a82cb090.tar.gz zig-525f341f33af9b8aad53931fd5511f00a82cb090.zip | |
Zcu: introduce `PerThread` and pass to all the functions
Diffstat (limited to 'src/codegen')
| -rw-r--r-- | src/codegen/c.zig | 745 | ||||
| -rw-r--r-- | src/codegen/c/Type.zig | 66 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 1431 | ||||
| -rw-r--r-- | src/codegen/spirv.zig | 450 |
4 files changed, 1431 insertions, 1261 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2fd3d2b164..2fa8a98cbb 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -333,15 +333,15 @@ pub const Function = struct { const gop = try f.value_map.getOrPut(ref); if (gop.found_existing) return gop.value_ptr.*; - const zcu = f.object.dg.zcu; - const val = (try f.air.value(ref, zcu)).?; + const pt = f.object.dg.pt; + const val = (try f.air.value(ref, pt)).?; const ty = f.typeOf(ref); - const result: CValue = if (lowersToArray(ty, zcu)) result: { + const result: CValue = if (lowersToArray(ty, pt)) result: { const writer = f.object.codeHeaderWriter(); const decl_c_value = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)), }); const gpa = f.object.dg.gpa; try f.allocs.put(gpa, decl_c_value.new_local, false); @@ -358,7 +358,7 @@ pub const Function = struct { } fn wantSafety(f: *Function) bool { - return switch (f.object.dg.zcu.optimizeMode()) { + return switch (f.object.dg.pt.zcu.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; @@ -379,7 +379,7 @@ pub const Function = struct { fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue { return f.allocAlignedLocal(inst, .{ .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)), }); } @@ -500,7 +500,8 @@ pub const Function = struct { fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 { const gpa = f.object.dg.gpa; - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const gop = try f.lazy_fns.getOrPut(gpa, key); @@ -539,13 +540,11 @@ pub const Function = struct { } fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { - const zcu = f.object.dg.zcu; - return f.air.typeOf(inst, &zcu.intern_pool); + return f.air.typeOf(inst, &f.object.dg.pt.zcu.intern_pool); } fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { - const zcu = f.object.dg.zcu; - return f.air.typeOfIndex(inst, &zcu.intern_pool); + return f.air.typeOfIndex(inst, &f.object.dg.pt.zcu.intern_pool); } fn copyCValue(f: *Function, ctype: CType, dst: CValue, src: CValue) !void { @@ -608,7 +607,7 @@ pub const Object = struct { /// This data is available both when outputting .c code and when outputting an .h file. pub const DeclGen = struct { gpa: mem.Allocator, - zcu: *Zcu, + pt: Zcu.PerThread, mod: *Module, pass: Pass, is_naked_fn: bool, @@ -634,7 +633,7 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const decl_index = dg.pass.decl; const decl = zcu.declPtr(decl_index); const src_loc = decl.navSrcLoc(zcu); @@ -648,7 +647,8 @@ pub const DeclGen = struct { anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ctype_pool = &dg.ctype_pool; const decl_val = Value.fromInterned(anon_decl.val); @@ -656,7 +656,7 @@ pub const DeclGen = struct { // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const ptr_ty = Type.fromInterned(anon_decl.orig_ty); - if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) { + if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(pt)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } @@ -696,7 +696,7 @@ pub const DeclGen = struct { // alignment. If there is already an entry, keep the greater alignment. const explicit_alignment = ptr_type.flags.alignment; if (explicit_alignment != .none) { - const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu); + const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt); if (explicit_alignment.order(abi_alignment).compare(.gt)) { const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, anon_decl.val); aligned_gop.value_ptr.* = if (aligned_gop.found_existing) @@ -713,15 +713,16 @@ pub const DeclGen = struct { decl_index: InternPool.DeclIndex, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ctype_pool = &dg.ctype_pool; const decl = zcu.declPtr(decl_index); assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const decl_ty = decl.typeOf(zcu); - const ptr_ty = try decl.declPtrType(zcu); - if (!decl_ty.isFnOrHasRuntimeBits(zcu)) { + const ptr_ty = try decl.declPtrType(pt); + if (!decl_ty.isFnOrHasRuntimeBits(pt)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } @@ -756,12 +757,13 @@ pub const DeclGen = struct { derivation: Value.PointerDeriveStep, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; switch (derivation) { .comptime_alloc_ptr, .comptime_field_ptr => unreachable, .int => |int| { const ptr_ctype = try dg.ctypeFromType(int.ptr_ty, .complete); - const addr_val = try zcu.intValue(Type.usize, int.addr); + const addr_val = try pt.intValue(Type.usize, int.addr); try writer.writeByte('('); try dg.renderCType(writer, ptr_ctype); try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)}); @@ -777,12 +779,12 @@ pub const DeclGen = struct { }, .field_ptr => |field| { - const parent_ptr_ty = try field.parent.ptrType(zcu); + const parent_ptr_ty = try field.parent.ptrType(pt); // Ensure complete type definition is available before accessing fields. _ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete); - switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) { + switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, pt)) { .begin => { const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete); try writer.writeByte('('); @@ -801,7 +803,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); - const offset_val = try zcu.intValue(Type.usize, byte_offset); + const offset_val = try pt.intValue(Type.usize, byte_offset); try writer.writeAll("((char *)"); try dg.renderPointer(writer, field.parent.*, location); try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)}); @@ -809,7 +811,7 @@ pub const DeclGen = struct { } }, - .elem_ptr => |elem| if (!(try elem.parent.ptrType(zcu)).childType(zcu).hasRuntimeBits(zcu)) { + .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) { // Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer. const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); try writer.writeByte('('); @@ -817,11 +819,11 @@ pub const DeclGen = struct { try writer.writeByte(')'); try dg.renderPointer(writer, elem.parent.*, location); } else { - const index_val = try zcu.intValue(Type.usize, elem.elem_idx); + const index_val = try pt.intValue(Type.usize, elem.elem_idx); // We want to do pointer arithmetic on a pointer to the element type. // We might have a pointer-to-array. In this case, we must cast first. const result_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); - const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(zcu), .complete); + const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(pt), .complete); if (result_ctype.eql(parent_ctype)) { // The pointer already has an appropriate type - just do the arithmetic. try writer.writeByte('('); @@ -846,7 +848,7 @@ pub const DeclGen = struct { if (oac.byte_offset == 0) { try dg.renderPointer(writer, oac.parent.*, location); } else { - const offset_val = try zcu.intValue(Type.usize, oac.byte_offset); + const offset_val = try pt.intValue(Type.usize, oac.byte_offset); try writer.writeAll("((char *)"); try dg.renderPointer(writer, oac.parent.*, location); try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)}); @@ -856,8 +858,7 @@ pub const DeclGen = struct { } fn renderErrorName(dg: *DeclGen, writer: anytype, err_name: InternPool.NullTerminatedString) !void { - const zcu = dg.zcu; - const ip = &zcu.intern_pool; + const ip = &dg.pt.zcu.intern_pool; try writer.print("zig_error_{}", .{fmtIdent(err_name.toSlice(ip))}); } @@ -867,7 +868,8 @@ pub const DeclGen = struct { val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = &dg.mod.resolved_target.result; const ctype_pool = &dg.ctype_pool; @@ -927,7 +929,7 @@ pub const DeclGen = struct { try writer.writeAll("(("); try dg.renderCType(writer, ctype); try writer.print("){x})", .{try dg.fmtIntLiteral( - try zcu.intValue(Type.usize, val.toUnsignedInt(zcu)), + try pt.intValue(Type.usize, val.toUnsignedInt(pt)), .Other, )}); }, @@ -974,10 +976,10 @@ pub const DeclGen = struct { .enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location), .float => { const bits = ty.floatBits(target.*); - const f128_val = val.toFloat(f128, zcu); + const f128_val = val.toFloat(f128, pt); // All unsigned ints matching float types are pre-allocated. - const repr_ty = zcu.intType(.unsigned, bits) catch unreachable; + const repr_ty = pt.intType(.unsigned, bits) catch unreachable; assert(bits <= 128); var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; @@ -988,10 +990,10 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))), - 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))), - 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))), - 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))), + 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))), + 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))), + 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))), + 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))), 128 => repr_val_big.set(@as(u128, @bitCast(f128_val))), else => unreachable, } @@ -1002,10 +1004,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}), - 32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}), - 64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}), - 80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, pt)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, pt)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, pt)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, pt)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1045,10 +1047,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}), - 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}), - 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}), - 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}), + 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}), + 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}), + 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}), + 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}), 128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}), else => unreachable, }; @@ -1056,7 +1058,7 @@ pub const DeclGen = struct { empty = false; } try writer.print("{x}", .{try dg.fmtIntLiteral( - try zcu.intValue_big(repr_ty, repr_val_big.toConst()), + try pt.intValue_big(repr_ty, repr_val_big.toConst()), location, )}); if (!empty) try writer.writeByte(')'); @@ -1084,7 +1086,7 @@ pub const DeclGen = struct { .ptr => { var arena = std.heap.ArenaAllocator.init(zcu.gpa); defer arena.deinit(); - const derivation = try val.pointerDerivation(arena.allocator(), zcu); + const derivation = try val.pointerDerivation(arena.allocator(), pt); try dg.renderPointer(writer, derivation, location); }, .opt => |opt| switch (ctype.info(ctype_pool)) { @@ -1167,15 +1169,15 @@ pub const DeclGen = struct { try literal.start(); var index: usize = 0; while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(zcu, index); + const elem_val = try val.elemValue(pt, index); const elem_val_u8: u8 = if (elem_val.isUndef(zcu)) undefPattern(u8) else - @intCast(elem_val.toUnsignedInt(zcu)); + @intCast(elem_val.toUnsignedInt(pt)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8: u8 = @intCast(s.toUnsignedInt(zcu)); + const s_u8: u8 = @intCast(s.toUnsignedInt(pt)); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1184,7 +1186,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(zcu, index); + const elem_val = try val.elemValue(pt, index); try dg.renderValue(writer, elem_val, initializer_type); } if (ai.sentinel) |s| { @@ -1207,13 +1209,13 @@ pub const DeclGen = struct { const comptime_val = tuple.values.get(ip)[field_index]; if (comptime_val != .none) continue; const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) try writer.writeByte(','); const field_val = Value.fromInterned( switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1242,12 +1244,12 @@ pub const DeclGen = struct { var need_comma = false; while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1262,14 +1264,14 @@ pub const DeclGen = struct { const int_info = ty.intInfo(zcu); const bits = Type.smallestUnsignedBits(int_info.bits - 1); - const bit_offset_ty = try zcu.intType(.unsigned, bits); + const bit_offset_ty = try pt.intType(.unsigned, bits); var bit_offset: u64 = 0; var eff_num_fields: usize = 0; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; eff_num_fields += 1; } @@ -1277,7 +1279,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderUndefValue(writer, ty, location); try writer.writeByte(')'); - } else if (ty.bitSize(zcu) > 64) { + } else if (ty.bitSize(pt) > 64) { // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) var num_or = eff_num_fields - 1; while (num_or > 0) : (num_or -= 1) { @@ -1290,10 +1292,10 @@ pub const DeclGen = struct { var needs_closing_paren = false; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1307,7 +1309,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); try writer.writeAll(", "); - try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument); + try dg.renderValue(writer, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument); try writer.writeByte(')'); } else { try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); @@ -1316,7 +1318,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset += field_ty.bitSize(zcu); + bit_offset += field_ty.bitSize(pt); needs_closing_paren = true; eff_index += 1; } @@ -1326,7 +1328,7 @@ pub const DeclGen = struct { var empty = true; for (0..loaded_struct.field_types.len) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) try writer.writeAll(" | "); try writer.writeByte('('); @@ -1334,7 +1336,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ + .bytes => |bytes| try pt.intern(.{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes.at(field_index, ip) }, } }), @@ -1345,12 +1347,12 @@ pub const DeclGen = struct { if (bit_offset != 0) { try dg.renderValue(writer, Value.fromInterned(field_val), .Other); try writer.writeAll(" << "); - try dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument); + try dg.renderValue(writer, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument); } else { try dg.renderValue(writer, Value.fromInterned(field_val), .Other); } - bit_offset += field_ty.bitSize(zcu); + bit_offset += field_ty.bitSize(pt); empty = false; } try writer.writeByte(')'); @@ -1363,7 +1365,7 @@ pub const DeclGen = struct { .un => |un| { const loaded_union = ip.loadUnionType(ty.toIntern()); if (un.tag == .none) { - const backing_ty = try ty.unionBackingType(zcu); + const backing_ty = try ty.unionBackingType(pt); switch (loaded_union.getLayout(ip)) { .@"packed" => { if (!location.isInitializer()) { @@ -1378,7 +1380,7 @@ pub const DeclGen = struct { return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{}); } - const ptr_ty = try zcu.singleConstPtrType(ty); + const ptr_ty = try pt.singleConstPtrType(ty); try writer.writeAll("*(("); try dg.renderType(writer, ptr_ty); try writer.writeAll(")("); @@ -1400,7 +1402,7 @@ pub const DeclGen = struct { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; if (loaded_union.getLayout(ip) == .@"packed") { - if (field_ty.hasRuntimeBits(zcu)) { + if (field_ty.hasRuntimeBits(pt)) { if (field_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); try dg.renderCType(writer, ctype); @@ -1431,7 +1433,7 @@ pub const DeclGen = struct { ), .payload => { try writer.writeByte('{'); - if (field_ty.hasRuntimeBits(zcu)) { + if (field_ty.hasRuntimeBits(pt)) { try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))}); try dg.renderValue( writer, @@ -1443,7 +1445,7 @@ pub const DeclGen = struct { const inner_field_ty = Type.fromInterned( loaded_union.field_types.get(ip)[inner_field_index], ); - if (!inner_field_ty.hasRuntimeBits(zcu)) continue; + if (!inner_field_ty.hasRuntimeBits(pt)) continue; try dg.renderUndefValue(writer, inner_field_ty, initializer_type); break; } @@ -1464,7 +1466,8 @@ pub const DeclGen = struct { ty: Type, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; + const pt = dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = &dg.mod.resolved_target.result; const ctype_pool = &dg.ctype_pool; @@ -1490,7 +1493,7 @@ pub const DeclGen = struct { => { const bits = ty.floatBits(target.*); // All unsigned ints matching float types are pre-allocated. - const repr_ty = zcu.intType(.unsigned, bits) catch unreachable; + const repr_ty = dg.pt.intType(.unsigned, bits) catch unreachable; try writer.writeAll("zig_make_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -1515,14 +1518,14 @@ pub const DeclGen = struct { .error_set_type, .inferred_error_set_type, => return writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), location), + try dg.fmtIntLiteral(try pt.undefValue(ty), location), }), .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => { try writer.writeAll("(("); try dg.renderCType(writer, ctype); return writer.print("){x})", .{ - try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other), + try dg.fmtIntLiteral(try pt.undefValue(Type.usize), .Other), }); }, .Slice => { @@ -1536,7 +1539,7 @@ pub const DeclGen = struct { const ptr_ty = ty.slicePtrFieldType(zcu); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other), + try dg.fmtIntLiteral(try dg.pt.undefValue(Type.usize), .Other), }); }, }, @@ -1591,7 +1594,7 @@ pub const DeclGen = struct { var need_comma = false; while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; @@ -1600,7 +1603,7 @@ pub const DeclGen = struct { return writer.writeByte('}'); }, .@"packed" => return writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other), + try dg.fmtIntLiteral(try pt.undefValue(ty), .Other), }), } }, @@ -1616,7 +1619,7 @@ pub const DeclGen = struct { for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (need_comma) try writer.writeByte(','); need_comma = true; @@ -1654,7 +1657,7 @@ pub const DeclGen = struct { const inner_field_ty = Type.fromInterned( loaded_union.field_types.get(ip)[inner_field_index], ); - if (!inner_field_ty.hasRuntimeBits(zcu)) continue; + if (!inner_field_ty.hasRuntimeBits(pt)) continue; try dg.renderUndefValue( writer, inner_field_ty, @@ -1670,7 +1673,7 @@ pub const DeclGen = struct { if (has_tag) try writer.writeByte('}'); }, .@"packed" => return writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other), + try dg.fmtIntLiteral(try pt.undefValue(ty), .Other), }), } }, @@ -1775,7 +1778,7 @@ pub const DeclGen = struct { }, }, ) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; const fn_ty = fn_val.typeOf(zcu); @@ -1856,7 +1859,7 @@ pub const DeclGen = struct { fn ctypeFromType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { defer std.debug.assert(dg.scratch.items.len == 0); - return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.zcu, dg.mod, kind); + return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.pt, dg.mod, kind); } fn byteSize(dg: *DeclGen, ctype: CType) u64 { @@ -1879,8 +1882,8 @@ pub const DeclGen = struct { } fn renderCType(dg: *DeclGen, w: anytype, ctype: CType) error{OutOfMemory}!void { - _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); - try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); + _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{}); + try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1904,18 +1907,18 @@ pub const DeclGen = struct { } }; fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool { - const zcu = dg.zcu; - const dest_bits = dest_ty.bitSize(zcu); - const dest_int_info = dest_ty.intInfo(zcu); + const pt = dg.pt; + const dest_bits = dest_ty.bitSize(pt); + const dest_int_info = dest_ty.intInfo(pt.zcu); - const src_is_ptr = src_ty.isPtrAtRuntime(zcu); + const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu); const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) { .unsigned => Type.usize, .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(zcu); - const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null; + const src_bits = src_eff_ty.bitSize(pt); + const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or @@ -1944,8 +1947,9 @@ pub const DeclGen = struct { src_ty: Type, location: ValueRenderLocation, ) !void { - const zcu = dg.zcu; - const dest_bits = dest_ty.bitSize(zcu); + const pt = dg.pt; + const zcu = pt.zcu; + const dest_bits = dest_ty.bitSize(pt); const dest_int_info = dest_ty.intInfo(zcu); const src_is_ptr = src_ty.isPtrAtRuntime(zcu); @@ -1954,7 +1958,7 @@ pub const DeclGen = struct { .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(zcu); + const src_bits = src_eff_ty.bitSize(pt); const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or @@ -2035,7 +2039,7 @@ pub const DeclGen = struct { qualifiers, CType.AlignAs.fromAlignment(.{ .@"align" = alignment, - .abi = ty.abiAlignment(dg.zcu), + .abi = ty.abiAlignment(dg.pt), }), ); } @@ -2048,6 +2052,7 @@ pub const DeclGen = struct { qualifiers: CQualifiers, alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { + const zcu = dg.pt.zcu; switch (alignas.abiOrder()) { .lt => try w.print("zig_under_align({}) ", .{alignas.toByteUnits()}), .eq => {}, @@ -2055,10 +2060,10 @@ pub const DeclGen = struct { } try w.print("{}", .{ - try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, qualifiers), + try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, qualifiers), }); try dg.writeName(w, name); - try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); + try renderTypeSuffix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, .{}); } fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void { @@ -2162,7 +2167,7 @@ pub const DeclGen = struct { decl_index: InternPool.DeclIndex, variable: InternPool.Key.Variable, ) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const decl = zcu.declPtr(decl_index); const fwd = dg.fwdDeclWriter(); try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static "); @@ -2180,7 +2185,7 @@ pub const DeclGen = struct { } fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(decl_index); @@ -2236,15 +2241,15 @@ pub const DeclGen = struct { .bits => {}, } - const zcu = dg.zcu; - const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{ + const pt = dg.pt; + const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @as(u16, @intCast(ty.bitSize(zcu))), + .bits = @as(u16, @intCast(ty.bitSize(pt))), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); try writer.print(", {}", .{try dg.fmtIntLiteral( - try zcu.intValue(if (is_big) Type.u16 else Type.u8, int_info.bits), + try pt.intValue(if (is_big) Type.u16 else Type.u8, int_info.bits), .FunctionArgument, )}); } @@ -2254,7 +2259,7 @@ pub const DeclGen = struct { val: Value, loc: ValueRenderLocation, ) !std.fmt.Formatter(formatIntLiteral) { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const kind = loc.toCTypeKind(); const ty = val.typeOf(zcu); return std.fmt.Formatter(formatIntLiteral){ .data = .{ @@ -2616,7 +2621,8 @@ pub fn genGlobalAsm(zcu: *Zcu, writer: anytype) !void { } pub fn genErrDecls(o: *Object) !void { - const zcu = o.dg.zcu; + const pt = o.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const writer = o.writer(); @@ -2628,7 +2634,7 @@ pub fn genErrDecls(o: *Object) !void { for (zcu.global_error_set.keys()[1..], 1..) |name_nts, value| { const name = name_nts.toSlice(ip); max_name_len = @max(name.len, max_name_len); - const err_val = try zcu.intern(.{ .err = .{ + const err_val = try pt.intern(.{ .err = .{ .ty = .anyerror_type, .name = name_nts, } }); @@ -2649,12 +2655,12 @@ pub fn genErrDecls(o: *Object) !void { @memcpy(name_buf[name_prefix.len..][0..name_slice.len], name_slice); const identifier = name_buf[0 .. name_prefix.len + name_slice.len]; - const name_ty = try zcu.arrayType(.{ + const name_ty = try pt.arrayType(.{ .len = name_slice.len, .child = .u8_type, .sentinel = .zero_u8, }); - const name_val = try zcu.intern(.{ .aggregate = .{ + const name_val = try pt.intern(.{ .aggregate = .{ .ty = name_ty.toIntern(), .storage = .{ .bytes = name.toString() }, } }); @@ -2673,7 +2679,7 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll(";\n"); } - const name_array_ty = try zcu.arrayType(.{ + const name_array_ty = try pt.arrayType(.{ .len = zcu.global_error_set.count(), .child = .slice_const_u8_sentinel_0_type, }); @@ -2693,14 +2699,15 @@ pub fn genErrDecls(o: *Object) !void { if (value != 0) try writer.writeByte(','); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ fmtIdent(name), - try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, name.len), .StaticInitializer), + try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, name.len), .StaticInitializer), }); } try writer.writeAll("};\n"); } pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void { - const zcu = o.dg.zcu; + const pt = o.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ctype_pool = &o.dg.ctype_pool; const w = o.writer(); @@ -2721,20 +2728,20 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn for (0..tag_names.len) |tag_index| { const tag_name = tag_names.get(ip)[tag_index]; const tag_name_len = tag_name.length(ip); - const tag_val = try zcu.enumValueFieldIndex(enum_ty, @intCast(tag_index)); + const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index)); - const name_ty = try zcu.arrayType(.{ + const name_ty = try pt.arrayType(.{ .len = tag_name_len, .child = .u8_type, .sentinel = .zero_u8, }); - const name_val = try zcu.intern(.{ .aggregate = .{ + const name_val = try pt.intern(.{ .aggregate = .{ .ty = name_ty.toIntern(), .storage = .{ .bytes = tag_name.toString() }, } }); try w.print(" case {}: {{\n static ", .{ - try o.dg.fmtIntLiteral(try tag_val.intFromEnum(enum_ty, zcu), .Other), + try o.dg.fmtIntLiteral(try tag_val.intFromEnum(enum_ty, pt), .Other), }); try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete); try w.writeAll(" = "); @@ -2743,7 +2750,7 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn try o.dg.renderType(w, name_slice_ty); try w.print("){{{}, {}}};\n", .{ fmtIdent("name"), - try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, tag_name_len), .Other), + try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, tag_name_len), .Other), }); try w.writeAll(" }\n"); @@ -2788,7 +2795,7 @@ pub fn genFunc(f: *Function) !void { defer tracy.end(); const o = &f.object; - const zcu = o.dg.zcu; + const zcu = o.dg.pt.zcu; const gpa = o.dg.gpa; const decl_index = o.dg.pass.decl; const decl = zcu.declPtr(decl_index); @@ -2879,12 +2886,13 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); - const zcu = o.dg.zcu; + const pt = o.dg.pt; + const zcu = pt.zcu; const decl_index = o.dg.pass.decl; const decl = zcu.declPtr(decl_index); const decl_ty = decl.typeOf(zcu); - if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return; + if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return; if (decl.val.getExternFunc(zcu)) |_| { const fwd = o.dg.fwdDeclWriter(); try fwd.writeAll("zig_extern "); @@ -2928,7 +2936,7 @@ pub fn genDeclValue( alignment: Alignment, @"linksection": InternPool.OptionalNullTerminatedString, ) !void { - const zcu = o.dg.zcu; + const zcu = o.dg.pt.zcu; const ty = val.typeOf(zcu); const fwd = o.dg.fwdDeclWriter(); @@ -2946,7 +2954,7 @@ pub fn genDeclValue( } pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void { - const zcu = dg.zcu; + const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; const fwd = dg.fwdDeclWriter(); @@ -3088,7 +3096,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con } fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { - const zcu = f.object.dg.zcu; + const zcu = f.object.dg.pt.zcu; const ip = &zcu.intern_pool; const air_tags = f.air.instructions.items(.tag); const air_datas = f.air.instructions.items(.data); @@ -3388,10 +3396,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3414,13 +3422,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu); + const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3449,10 +3458,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3475,14 +3484,15 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); const slice_ty = f.typeOf(bin_op.lhs); const elem_ty = slice_ty.elemType2(zcu); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt); const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3505,10 +3515,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3531,40 +3541,40 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), .alignas = CType.AlignAs.fromAlignment(.{ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(zcu), + .abi = elem_ty.abiAlignment(pt), }), }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); - const gpa = f.object.dg.zcu.gpa; - try f.allocs.put(gpa, local.new_local, true); + try f.allocs.put(zcu.gpa, local.new_local, true); return .{ .local_ref = local.new_local }; } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), .alignas = CType.AlignAs.fromAlignment(.{ .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(zcu), + .abi = elem_ty.abiAlignment(pt), }), }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); - const gpa = f.object.dg.zcu.gpa; - try f.allocs.put(gpa, local.new_local, true); + try f.allocs.put(zcu.gpa, local.new_local, true); return .{ .local_ref = local.new_local }; } @@ -3593,7 +3603,8 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = f.typeOf(ty_op.operand); @@ -3601,7 +3612,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_info = ptr_scalar_ty.ptrInfo(zcu); const src_ty = Type.fromInterned(ptr_info.child); - if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{ty_op.operand}); return .none; } @@ -3611,10 +3622,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) + ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) else true; - const is_array = lowersToArray(src_ty, zcu); + const is_array = lowersToArray(src_ty, pt); const need_memcpy = !is_aligned or is_array; const writer = f.object.writer(); @@ -3634,12 +3645,12 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); } else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) { const host_bits: u16 = ptr_info.packed_offset.host_size * 8; - const host_ty = try zcu.intType(.unsigned, host_bits); + const host_ty = try pt.intType(.unsigned, host_bits); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); + const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const field_ty = try zcu.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu)))); + const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt)))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3650,9 +3661,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(("); try f.renderType(writer, field_ty); try writer.writeByte(')'); - const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64; + const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; if (cant_cast) { - if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); @@ -3680,7 +3691,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const writer = f.object.writer(); const op_inst = un_op.toIndex(); @@ -3695,11 +3707,11 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); var deref = is_ptr; - const is_array = lowersToArray(ret_ty, zcu); + const is_array = lowersToArray(ret_ty, pt); const ret_val = if (is_array) ret_val: { const array_local = try f.allocAlignedLocal(inst, .{ .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(f.object.dg.zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); @@ -3733,7 +3745,8 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -3760,7 +3773,8 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -3809,13 +3823,13 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, operand, .FunctionArgument); try v.elem(f, writer); try writer.print(", {x})", .{ - try f.fmtIntLiteral(try inst_scalar_ty.maxIntScalar(zcu, scalar_ty)), + try f.fmtIntLiteral(try inst_scalar_ty.maxIntScalar(pt, scalar_ty)), }); }, .signed => { const c_bits = toCIntBits(scalar_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - const shift_val = try zcu.intValue(Type.u8, c_bits - dest_bits); + const shift_val = try pt.intValue(Type.u8, c_bits - dest_bits); try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); @@ -3860,7 +3874,8 @@ fn airIntFromBool(f: *Function, inst: Air.Inst.Index) !CValue { } fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; // *a = b; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -3871,7 +3886,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |v| v.isUndefDeep(zcu) else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndefDeep(zcu) else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3887,10 +3902,10 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) + ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte) else true; - const is_array = lowersToArray(Type.fromInterned(ptr_info.child), zcu); + const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt); const need_memcpy = !is_aligned or is_array; const src_val = try f.resolveInst(bin_op.rhs); @@ -3901,7 +3916,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { if (need_memcpy) { // For this memcpy to safely work we need the rhs to have the same // underlying type as the lhs (i.e. they must both be arrays of the same underlying type). - assert(src_ty.eql(Type.fromInterned(ptr_info.child), f.object.dg.zcu)); + assert(src_ty.eql(Type.fromInterned(ptr_info.child), zcu)); // If the source is a constant, writeCValue will emit a brace initialization // so work around this by initializing into new local. @@ -3932,12 +3947,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try v.end(f, inst, writer); } else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) { const host_bits = ptr_info.packed_offset.host_size * 8; - const host_ty = try zcu.intType(.unsigned, host_bits); + const host_ty = try pt.intType(.unsigned, host_bits); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - const bit_offset_val = try zcu.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); + const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset); - const src_bits = src_ty.bitSize(zcu); + const src_bits = src_ty.bitSize(pt); const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; var stack align(@alignOf(ExpectedContents)) = @@ -3950,7 +3965,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); - const mask_val = try zcu.intValue_big(host_ty, mask.toConst()); + const mask_val = try pt.intValue_big(host_ty, mask.toConst()); const v = try Vectorize.start(f, inst, writer, ptr_ty); const a = try Assignment.start(f, writer, src_scalar_ctype); @@ -3967,9 +3982,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); - const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64; + const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64; if (cant_cast) { - if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); @@ -4013,7 +4028,8 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4051,7 +4067,8 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: } fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(zcu); @@ -4084,11 +4101,12 @@ fn airBinOp( operation: []const u8, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(zcu); - if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat()) + if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); const lhs = try f.resolveInst(bin_op.lhs); @@ -4122,11 +4140,12 @@ fn airCmpOp( data: anytype, operator: std.math.CompareOperator, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const lhs_ty = f.typeOf(data.lhs); const scalar_ty = lhs_ty.scalarType(zcu); - const scalar_bits = scalar_ty.bitSize(zcu); + const scalar_bits = scalar_ty.bitSize(pt); if (scalar_ty.isInt(zcu) and scalar_bits > 64) return airCmpBuiltinCall( f, @@ -4170,12 +4189,13 @@ fn airEquality( inst: Air.Inst.Index, operator: std.math.CompareOperator, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); - const operand_bits = operand_ty.bitSize(zcu); + const operand_bits = operand_ty.bitSize(pt); if (operand_ty.isAbiInt(zcu) and operand_bits > 64) return airCmpBuiltinCall( f, @@ -4256,7 +4276,8 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4267,7 +4288,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); const elem_ty = inst_scalar_ty.elemType2(zcu); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs); const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); const local = try f.allocLocal(inst, inst_ty); @@ -4299,13 +4320,14 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { } fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); - if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat()) + if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, .none); const lhs = try f.resolveInst(bin_op.lhs); @@ -4339,7 +4361,8 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons } fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4374,7 +4397,8 @@ fn airCall( inst: Air.Inst.Index, modifier: std.builtin.CallModifier, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; // Not even allowed to call panic in a naked function. if (f.object.dg.is_naked_fn) return .none; @@ -4398,7 +4422,7 @@ fn airCall( if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) { const array_local = try f.allocAlignedLocal(inst, .{ .ctype = arg_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)), }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); @@ -4445,7 +4469,7 @@ fn airCall( } else { const local = try f.allocAlignedLocal(inst, .{ .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)), }); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); @@ -4456,7 +4480,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = (try f.air.value(pl_op.operand, zcu)) orelse break :known; + const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known; break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) { .extern_func => |extern_func| extern_func.decl, .func => |func| func.owner_decl, @@ -4499,7 +4523,7 @@ fn airCall( try writer.writeAll(");\n"); const result = result: { - if (result_local == .none or !lowersToArray(ret_ty, zcu)) + if (result_local == .none or !lowersToArray(ret_ty, pt)) break :result result_local; const array_local = try f.allocLocal(inst, ret_ty); @@ -4533,7 +4557,8 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload); const owner_decl = zcu.funcOwnerDeclPtr(extra.data.func); @@ -4545,10 +4570,11 @@ fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (try f.air.value(pl_op.operand, zcu)) |v| v.isUndefDeep(zcu) else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, pt)) |v| v.isUndefDeep(zcu) else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4564,7 +4590,8 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { } fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const liveness_block = f.liveness.getBlock(inst); const block_id: usize = f.next_block_index; @@ -4572,7 +4599,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst)) + const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else .none; @@ -4611,7 +4638,8 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]); @@ -4627,13 +4655,14 @@ fn lowerTry( err_union_ty: Type, is_ptr: bool, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const err_union = try f.resolveInst(operand); const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt); if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { try writer.writeAll("if ("); @@ -4725,7 +4754,8 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { } fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const target = &f.object.dg.mod.resolved_target.result; const ctype_pool = &f.object.dg.ctype_pool; const writer = f.object.writer(); @@ -4771,7 +4801,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal try writer.writeAll(", sizeof("); try f.renderType( writer, - if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty, + if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty, ); try writer.writeAll("));\n"); @@ -4805,7 +4835,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal try writer.writeByte('('); } try writer.writeAll("zig_wrap_"); - const info_ty = try zcu.intType(dest_info.signedness, bits); + const info_ty = try pt.intType(dest_info.signedness, bits); if (wrap_ctype) |ctype| try f.object.dg.renderCTypeForBuiltinFnName(writer, ctype) else @@ -4935,7 +4965,8 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4979,16 +5010,16 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { for (items) |item| { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); - const item_value = try f.air.value(item, zcu); - if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{ - try f.fmtIntLiteral(try zcu.intValue(lowered_condition_ty, item_int)), + const item_value = try f.air.value(item, pt); + if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{ + try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)), }) else { if (condition_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, (try f.air.value(item, zcu)).?, .Other); + try f.object.dg.renderValue(writer, (try f.air.value(item, pt)).?, .Other); } try writer.writeByte(':'); } @@ -5026,13 +5057,14 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool { - const target = &f.object.dg.mod.resolved_target.result; + const dg = f.object.dg; + const target = &dg.mod.resolved_target.result; return switch (constraint[0]) { '{' => true, 'i', 'r' => false, 'I' => !target.cpu.arch.isArmOrThumb(), else => switch (value) { - .constant => |val| switch (f.object.dg.zcu.intern_pool.indexToKey(val.toIntern())) { + .constant => |val| switch (dg.pt.zcu.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => false, else => true, @@ -5045,7 +5077,8 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool } fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; @@ -5060,10 +5093,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: { + const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: { const inst_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(inst_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)), }); if (f.wantSafety()) { try f.writeCValue(writer, inst_local, .Other); @@ -5096,7 +5129,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("register "); const output_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(output_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)), }); try f.allocs.put(gpa, output_local.new_local, false); try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete); @@ -5131,7 +5164,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { if (is_reg) try writer.writeAll("register "); const input_local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(input_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)), + .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)), }); try f.allocs.put(gpa, input_local.new_local, false); try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete); @@ -5314,7 +5347,8 @@ fn airIsNull( operator: std.math.CompareOperator, is_ptr: bool, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -5369,7 +5403,8 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -5404,7 +5439,8 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue } fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); @@ -5458,21 +5494,22 @@ fn fieldLocation( container_ptr_ty: Type, field_ptr_ty: Type, field_index: u32, - zcu: *Zcu, + pt: Zcu.PerThread, ) union(enum) { begin: void, field: CValue, byte_offset: u64, } { + const zcu = pt.zcu; const ip = &zcu.intern_pool; const container_ty = Type.fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child); switch (ip.indexToKey(container_ty.toIntern())) { .struct_type => { const loaded_struct = ip.loadStructType(container_ty.toIntern()); return switch (loaded_struct.layout) { - .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) + .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) .begin - else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) + else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) .{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] } else .{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| @@ -5480,16 +5517,16 @@ fn fieldLocation( else .{ .field = field_index } }, .@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0) - .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) + + .{ .byte_offset = @divExact(pt.structPackedFieldBitOffset(loaded_struct, field_index) + container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) } else .begin, }; }, - .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) + .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt)) .begin - else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) - .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) } + else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt)) + .{ .byte_offset = container_ty.structFieldOffset(field_index, pt) } else .{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| .{ .identifier = field_name.toSlice(ip) } @@ -5500,8 +5537,8 @@ fn fieldLocation( switch (loaded_union.getLayout(ip)) { .auto, .@"extern" => { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) - return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu)) + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) + return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt)) .{ .field = .{ .identifier = "payload" } } else .begin; @@ -5546,7 +5583,8 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -5564,10 +5602,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, container_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) { + switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8); + const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5580,14 +5618,14 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8); + const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); try f.writeCValue(writer, field_ptr_val, .Other); try writer.print(" - {})", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)), + try f.fmtIntLiteral(try pt.intValue(Type.usize, byte_offset)), }); }, } @@ -5603,7 +5641,8 @@ fn fieldPtr( container_ptr_val: CValue, field_index: u32, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const container_ty = container_ptr_ty.childType(zcu); const field_ptr_ty = f.typeOfIndex(inst); @@ -5617,21 +5656,21 @@ fn fieldPtr( try f.renderType(writer, field_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) { + switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) { .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), .field => |field| { try writer.writeByte('&'); try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - const u8_ptr_ty = try zcu.adjustPtrTypeChild(field_ptr_ty, Type.u8); + const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); try f.writeCValue(writer, container_ptr_val, .Other); try writer.print(" + {})", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset)), + try f.fmtIntLiteral(try pt.intValue(Type.usize, byte_offset)), }); }, } @@ -5641,13 +5680,14 @@ fn fieldPtr( } fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) { try reap(f, inst, &.{extra.struct_operand}); return .none; } @@ -5671,15 +5711,15 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .@"packed" => { const int_info = struct_ty.intInfo(zcu); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index); + const bit_offset = pt.structPackedFieldBitOffset(loaded_struct, extra.field_index); const field_int_signedness = if (inst_ty.isAbiInt(zcu)) inst_ty.intInfo(zcu).signedness else .unsigned; - const field_int_ty = try zcu.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu)))); + const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt)))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -5690,7 +5730,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { - if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); @@ -5702,12 +5742,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { } try f.writeCValue(writer, struct_byval, .Other); if (bit_offset > 0) try writer.print(", {})", .{ - try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)), + try f.fmtIntLiteral(try pt.intValue(bit_offset_ty, bit_offset)), }); if (cant_cast) try writer.writeByte(')'); try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); try writer.writeAll(");\n"); - if (inst_ty.eql(field_int_ty, f.object.dg.zcu)) return temp_local; + if (inst_ty.eql(field_int_ty, zcu)) return temp_local; const local = try f.allocLocal(inst, inst_ty); if (local.new_local != temp_local.new_local) { @@ -5783,7 +5823,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { /// *(E!T) -> E /// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -5797,7 +5838,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = error_union_ty.errorUnionPayload(zcu); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) { + if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) { // The store will be 'x = x'; elide it. return local; } @@ -5806,11 +5847,11 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!payload_ty.hasRuntimeBits(zcu)) + if (!payload_ty.hasRuntimeBits(pt)) try f.writeCValue(writer, operand, .Other) else if (error_ty.errorSetIsEmpty(zcu)) try writer.print("{}", .{ - try f.fmtIntLiteral(try zcu.intValue(try zcu.errorIntType(), 0)), + try f.fmtIntLiteral(try pt.intValue(try pt.errorIntType(), 0)), }) else if (operand_is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) @@ -5821,7 +5862,8 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -5831,7 +5873,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) { + if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5896,12 +5938,13 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(zcu); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); const err_ty = inst_ty.errorUnionSet(zcu); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5935,7 +5978,8 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -5944,12 +5988,12 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const error_union_ty = operand_ty.childType(zcu); const payload_ty = error_union_ty.errorUnionPayload(zcu); - const err_int_ty = try zcu.errorIntType(); - const no_err = try zcu.intValue(err_int_ty, 0); + const err_int_ty = try pt.errorIntType(); + const no_err = try pt.intValue(err_int_ty, 0); try reap(f, inst, &.{ty_op.operand}); // First, set the non-error value. - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete)); try f.writeCValueDeref(writer, operand); try a.assign(f, writer); @@ -5994,13 +6038,14 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(zcu); const payload = try f.resolveInst(ty_op.operand); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt); const err_ty = inst_ty.errorUnionSet(zcu); try reap(f, inst, &.{ty_op.operand}); @@ -6020,14 +6065,15 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, try zcu.intValue(try zcu.errorIntType(), 0), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(try pt.errorIntType(), 0), .Other); try a.end(f, writer); } return local; } fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const writer = f.object.writer(); @@ -6042,9 +6088,9 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const const a = try Assignment.start(f, writer, CType.bool); try f.writeCValue(writer, local, .Other); try a.assign(f, writer); - const err_int_ty = try zcu.errorIntType(); + const err_int_ty = try pt.errorIntType(); if (!error_ty.errorSetIsEmpty(zcu)) - if (payload_ty.hasRuntimeBits(zcu)) + if (payload_ty.hasRuntimeBits(pt)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -6052,17 +6098,18 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(err_int_ty, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, try zcu.intValue(err_int_ty, 0), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(err_int_ty, 0), .Other); try a.end(f, writer); return local; } fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ctype_pool = &f.object.dg.ctype_pool; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6096,7 +6143,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (operand_child_ctype.info(ctype_pool) == .array) { try writer.writeByte('&'); try f.writeCValueDeref(writer, operand); - try writer.print("[{}]", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))}); + try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 0))}); } else try f.writeCValue(writer, operand, .Initializer); } try a.end(f, writer); @@ -6106,7 +6153,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValueMember(writer, local, .{ .identifier = "len" }); try a.assign(f, writer); try writer.print("{}", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, array_ty.arrayLen(zcu))), + try f.fmtIntLiteral(try pt.intValue(Type.usize, array_ty.arrayLen(zcu))), }); try a.end(f, writer); } @@ -6115,7 +6162,8 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -6165,7 +6213,8 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try f.resolveInst(un_op); @@ -6194,7 +6243,8 @@ fn airUnBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const operand = try f.resolveInst(operand_ref); try reap(f, inst, &.{operand_ref}); @@ -6237,7 +6287,8 @@ fn airBinBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); @@ -6292,7 +6343,8 @@ fn airCmpBuiltinCall( operation: enum { cmp, operator }, info: BuiltinInfo, ) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); @@ -6333,7 +6385,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print("{s}{}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(try zcu.intValue(Type.i32, 0)), + try f.fmtIntLiteral(try pt.intValue(Type.i32, 0)), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -6342,7 +6394,8 @@ fn airCmpBuiltinCall( } fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const inst_ty = f.typeOfIndex(inst); @@ -6358,7 +6411,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); const repr_ty = if (ty.isRuntimeFloat()) - zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable else ty; @@ -6448,7 +6501,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.typeOfIndex(inst); @@ -6461,10 +6515,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8)); + const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8)); const is_float = ty.isRuntimeFloat(); const is_128 = repr_bits == 128; - const repr_ty = if (is_float) zcu.intType(.unsigned, repr_bits) catch unreachable else ty; + const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty; const local = try f.allocLocal(inst, inst_ty); try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); @@ -6503,7 +6557,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const atomic_load = f.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); @@ -6511,7 +6566,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ty = ptr_ty.childType(zcu); const repr_ty = if (ty.isRuntimeFloat()) - zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable else ty; @@ -6539,7 +6594,8 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = f.typeOf(bin_op.lhs); const ty = ptr_ty.childType(zcu); @@ -6551,7 +6607,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const repr_ty = if (ty.isRuntimeFloat()) - zcu.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable + pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable else ty; @@ -6574,7 +6630,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa } fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; if (ptr_ty.isSlice(zcu)) { try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" }); } else { @@ -6583,14 +6640,15 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo } fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ty = f.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(zcu); - const val_is_undef = if (try f.air.value(bin_op.rhs, zcu)) |val| val.isUndefDeep(zcu) else false; + const elem_abi_size = elem_ty.abiSize(pt); + const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6628,7 +6686,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable - const elem_ptr_ty = try zcu.ptrType(.{ + const elem_ptr_ty = try pt.ptrType(.{ .child = elem_ty.toIntern(), .flags = .{ .size = .C, @@ -6640,7 +6698,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, 0), .Initializer); + try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, 0), .Initializer); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -6705,7 +6763,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); @@ -6733,10 +6792,11 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } fn writeArrayLen(f: *Function, writer: ArrayListWriter, dest_ptr: CValue, dest_ty: Type) !void { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; switch (dest_ty.ptrSize(zcu)) { .One => try writer.print("{}", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))), + try f.fmtIntLiteral(try pt.intValue(Type.usize, dest_ty.childType(zcu).arrayLen(zcu))), }), .Many, .C => unreachable, .Slice => try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }), @@ -6744,14 +6804,15 @@ fn writeArrayLen(f: *Function, writer: ArrayListWriter, dest_ptr: CValue, dest_t } fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const union_ptr = try f.resolveInst(bin_op.lhs); const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const union_ty = f.typeOf(bin_op.lhs).childType(zcu); - const layout = union_ty.unionGetLayout(zcu); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety(zcu).?; @@ -6765,14 +6826,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const union_ty = f.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(zcu); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const inst_ty = f.typeOfIndex(inst); @@ -6787,7 +6848,8 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const inst_ty = f.typeOfIndex(inst); @@ -6824,7 +6886,8 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -6879,7 +6942,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { } fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -6895,11 +6958,11 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { for (0..extra.mask_len) |index| { try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, try zcu.intValue(Type.usize, index), .Other); + try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other); try writer.writeAll("] = "); - const mask_elem = (try mask.elemValue(zcu, index)).toSignedInt(zcu); - const src_val = try zcu.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); + const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt); + const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63))); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); @@ -6911,7 +6974,8 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { } fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const reduce = f.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const scalar_ty = f.typeOfIndex(inst); @@ -6920,7 +6984,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.typeOf(reduce.operand); const writer = f.object.writer(); - const use_operator = scalar_ty.bitSize(zcu) <= 64; + const use_operator = scalar_ty.bitSize(pt) <= 64; const op: union(enum) { const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; builtin: Func, @@ -6971,37 +7035,37 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderValue(writer, switch (reduce.operation) { .Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.false, - .Int => try zcu.intValue(scalar_ty, 0), + .Int => try pt.intValue(scalar_ty, 0), else => unreachable, }, .And => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.true, .Int => switch (scalar_ty.intInfo(zcu).signedness) { - .unsigned => try scalar_ty.maxIntScalar(zcu, scalar_ty), - .signed => try zcu.intValue(scalar_ty, -1), + .unsigned => try scalar_ty.maxIntScalar(pt, scalar_ty), + .signed => try pt.intValue(scalar_ty, -1), }, else => unreachable, }, .Add => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => try zcu.intValue(scalar_ty, 0), - .Float => try zcu.floatValue(scalar_ty, 0.0), + .Int => try pt.intValue(scalar_ty, 0), + .Float => try pt.floatValue(scalar_ty, 0.0), else => unreachable, }, .Mul => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => try zcu.intValue(scalar_ty, 1), - .Float => try zcu.floatValue(scalar_ty, 1.0), + .Int => try pt.intValue(scalar_ty, 1), + .Float => try pt.floatValue(scalar_ty, 1.0), else => unreachable, }, .Min => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.true, - .Int => try scalar_ty.maxIntScalar(zcu, scalar_ty), - .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)), + .Int => try scalar_ty.maxIntScalar(pt, scalar_ty), + .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(zcu)) { .Bool => Value.false, - .Int => try scalar_ty.minIntScalar(zcu, scalar_ty), - .Float => try zcu.floatValue(scalar_ty, std.math.nan(f128)), + .Int => try scalar_ty.minIntScalar(pt, scalar_ty), + .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, }, .Initializer); @@ -7046,7 +7110,8 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const inst_ty = f.typeOfIndex(inst); @@ -7096,7 +7161,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { var field_it = loaded_struct.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| @@ -7113,7 +7178,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = "); const int_info = inst_ty.intInfo(zcu); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); + const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset: u64 = 0; @@ -7121,7 +7186,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (0..elements.len) |field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) { try writer.writeAll("zig_or_"); @@ -7134,7 +7199,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (resolved_elements, 0..) |element, field_index| { if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; if (!empty) try writer.writeAll(", "); // TODO: Skip this entire shift if val is 0? @@ -7160,13 +7225,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.print(", {}", .{ - try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)), + try f.fmtIntLiteral(try pt.intValue(bit_offset_ty, bit_offset)), }); try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset += field_ty.bitSize(zcu); + bit_offset += field_ty.bitSize(pt); empty = false; } try writer.writeAll(";\n"); @@ -7176,7 +7241,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| @@ -7194,7 +7259,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; @@ -7211,15 +7277,15 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { if (loaded_union.getLayout(ip) == .@"packed") return f.moveCValue(inst, union_ty, payload); const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: { - const layout = union_ty.unionGetLayout(zcu); + const layout = union_ty.unionGetLayout(pt); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name, zcu).?; - const tag_val = try zcu.enumValueFieldIndex(tag_ty, field_index); + const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); const a = try Assignment.start(f, writer, try f.ctypeFromType(tag_ty, .complete)); try f.writeCValueMember(writer, local, .{ .identifier = "tag" }); try a.assign(f, writer); - try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, zcu))}); + try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, pt))}); try a.end(f, writer); } break :field .{ .payload_identifier = field_name.toSlice(ip) }; @@ -7234,7 +7300,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const prefetch = f.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; const ptr_ty = f.typeOf(prefetch.ptr); @@ -7291,7 +7358,8 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { } fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; @@ -7326,7 +7394,8 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { } fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const decl_index = f.object.dg.pass.decl; const decl = zcu.declPtr(decl_index); @@ -7699,7 +7768,8 @@ fn formatIntLiteral( options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - const zcu = data.dg.zcu; + const pt = data.dg.pt; + const zcu = pt.zcu; const target = &data.dg.mod.resolved_target.result; const ctype_pool = &data.dg.ctype_pool; @@ -7732,7 +7802,7 @@ fn formatIntLiteral( }; undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, zcu); + } else data.val.toBigInt(&int_buf, pt); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8); @@ -7866,7 +7936,7 @@ fn formatIntLiteral( .int_info = c_limb_int_info, .kind = data.kind, .ctype = c_limb_ctype, - .val = try zcu.intValue_big(Type.comptime_int, c_limb_mut.toConst()), + .val = try pt.intValue_big(Type.comptime_int, c_limb_mut.toConst()), }, fmt, options, writer); } } @@ -7940,17 +8010,18 @@ const Vectorize = struct { index: CValue = .none, pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { - const zcu = f.object.dg.zcu; + const pt = f.object.dg.pt; + const zcu = pt.zcu; return if (ty.zigTypeTag(zcu) == .Vector) index: { const local = try f.allocLocal(inst, Type.usize); try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 0))}); try f.writeCValue(writer, local, .Other); - try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, ty.vectorLen(zcu)))}); + try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, ty.vectorLen(zcu)))}); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 1))}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try pt.intValue(Type.usize, 1))}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; @@ -7974,10 +8045,10 @@ const Vectorize = struct { } }; -fn lowersToArray(ty: Type, zcu: *Zcu) bool { - return switch (ty.zigTypeTag(zcu)) { +fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool { + return switch (ty.zigTypeTag(pt.zcu)) { .Array, .Vector => return true, - else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null, + else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null, }; } diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index 0a0d84f061..6d98aaafcb 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -1339,11 +1339,11 @@ pub const Pool = struct { allocator: std.mem.Allocator, scratch: *std.ArrayListUnmanaged(u32), ty: Type, - zcu: *Zcu, + pt: Zcu.PerThread, mod: *Module, kind: Kind, ) !CType { - const ip = &zcu.intern_pool; + const ip = &pt.zcu.intern_pool; switch (ty.toIntern()) { .u0_type, .i0_type, @@ -1400,7 +1400,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ip.loadEnumType(ip_index).tag_ty), - zcu, + pt, mod, kind, ), @@ -1409,7 +1409,7 @@ pub const Pool = struct { .adhoc_inferred_error_set_type, => return pool.fromIntInfo(allocator, .{ .signedness = .unsigned, - .bits = zcu.errorSetBits(), + .bits = pt.zcu.errorSetBits(), }, mod, kind), .manyptr_u8_type, => return pool.getPointer(allocator, .{ @@ -1492,13 +1492,13 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ptr_info.child), - zcu, + pt, mod, .forward, ), .alignas = AlignAs.fromAlignment(.{ .@"align" = ptr_info.flags.alignment, - .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu), + .abi = Type.fromInterned(ptr_info.child).abiAlignment(pt), }), }; break :elem_ctype if (elem.alignas.abiOrder().compare(.gte)) @@ -1535,7 +1535,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ip.slicePtrType(ip_index)), - zcu, + pt, mod, kind, ), @@ -1560,7 +1560,7 @@ pub const Pool = struct { allocator, scratch, elem_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1574,7 +1574,7 @@ pub const Pool = struct { .{ .name = .{ .index = .array }, .ctype = array_ctype, - .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)), }, }; return pool.fromFields(allocator, .@"struct", &fields, kind); @@ -1586,7 +1586,7 @@ pub const Pool = struct { allocator, scratch, elem_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1600,7 +1600,7 @@ pub const Pool = struct { .{ .name = .{ .index = .array }, .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)), }, }; return pool.fromFields(allocator, .@"struct", &fields, kind); @@ -1611,7 +1611,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(payload_type), - zcu, + pt, mod, kind.noParameter(), ); @@ -1635,7 +1635,7 @@ pub const Pool = struct { .name = .{ .index = .payload }, .ctype = payload_ctype, .alignas = AlignAs.fromAbiAlignment( - Type.fromInterned(payload_type).abiAlignment(zcu), + Type.fromInterned(payload_type).abiAlignment(pt), ), }, }; @@ -1643,7 +1643,7 @@ pub const Pool = struct { }, .anyframe_type => unreachable, .error_union_type => |error_union_info| { - const error_set_bits = zcu.errorSetBits(); + const error_set_bits = pt.zcu.errorSetBits(); const error_set_ctype = try pool.fromIntInfo(allocator, .{ .signedness = .unsigned, .bits = error_set_bits, @@ -1654,7 +1654,7 @@ pub const Pool = struct { allocator, scratch, payload_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1671,7 +1671,7 @@ pub const Pool = struct { .{ .name = .{ .index = .payload }, .ctype = payload_ctype, - .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)), }, }; return pool.fromFields(allocator, .@"struct", &fields, kind); @@ -1685,7 +1685,7 @@ pub const Pool = struct { .tag = .@"struct", .name = .{ .owner_decl = loaded_struct.decl.unwrap().? }, }); - if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu)) + if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) fwd_decl else CType.void; @@ -1706,7 +1706,7 @@ pub const Pool = struct { allocator, scratch, field_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1718,7 +1718,7 @@ pub const Pool = struct { String.fromUnnamed(@intCast(field_index)); const field_alignas = AlignAs.fromAlignment(.{ .@"align" = loaded_struct.fieldAlign(ip, field_index), - .abi = field_type.abiAlignment(zcu), + .abi = field_type.abiAlignment(pt), }); pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ .name = field_name.index, @@ -1745,7 +1745,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(loaded_struct.backingIntType(ip).*), - zcu, + pt, mod, kind, ), @@ -1766,7 +1766,7 @@ pub const Pool = struct { allocator, scratch, field_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1780,7 +1780,7 @@ pub const Pool = struct { .name = field_name.index, .ctype = field_ctype.index, .flags = .{ .alignas = AlignAs.fromAbiAlignment( - field_type.abiAlignment(zcu), + field_type.abiAlignment(pt), ) }, }); } @@ -1806,7 +1806,7 @@ pub const Pool = struct { extra_index, ); } - const fwd_decl = try pool.fromType(allocator, scratch, ty, zcu, mod, .forward); + const fwd_decl = try pool.fromType(allocator, scratch, ty, pt, mod, .forward); try pool.ensureUnusedCapacity(allocator, 1); const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, @@ -1824,7 +1824,7 @@ pub const Pool = struct { .tag = if (has_tag) .@"struct" else .@"union", .name = .{ .owner_decl = loaded_union.decl }, }); - if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu)) + if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) fwd_decl else CType.void; @@ -1847,7 +1847,7 @@ pub const Pool = struct { allocator, scratch, field_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1858,7 +1858,7 @@ pub const Pool = struct { ); const field_alignas = AlignAs.fromAlignment(.{ .@"align" = loaded_union.fieldAlign(ip, field_index), - .abi = field_type.abiAlignment(zcu), + .abi = field_type.abiAlignment(pt), }); pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ .name = field_name.index, @@ -1895,7 +1895,7 @@ pub const Pool = struct { allocator, scratch, tag_type, - zcu, + pt, mod, kind.noParameter(), ); @@ -1903,7 +1903,7 @@ pub const Pool = struct { struct_fields[struct_fields_len] = .{ .name = .{ .index = .tag }, .ctype = tag_ctype, - .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)), + .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)), }; struct_fields_len += 1; } @@ -1951,7 +1951,7 @@ pub const Pool = struct { }, .@"packed" => return pool.fromIntInfo(allocator, .{ .signedness = .unsigned, - .bits = @intCast(ty.bitSize(zcu)), + .bits = @intCast(ty.bitSize(pt)), }, mod, kind), } }, @@ -1960,7 +1960,7 @@ pub const Pool = struct { allocator, scratch, Type.fromInterned(ip.loadEnumType(ip_index).tag_ty), - zcu, + pt, mod, kind, ), @@ -1975,7 +1975,7 @@ pub const Pool = struct { allocator, scratch, return_type, - zcu, + pt, mod, kind.asParameter(), ) else CType.void; @@ -1987,7 +1987,7 @@ pub const Pool = struct { allocator, scratch, param_type, - zcu, + pt, mod, kind.asParameter(), ); @@ -2011,7 +2011,7 @@ pub const Pool = struct { .inferred_error_set_type, => return pool.fromIntInfo(allocator, .{ .signedness = .unsigned, - .bits = zcu.errorSetBits(), + .bits = pt.zcu.errorSetBits(), }, mod, kind), .undef, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 6efef20f22..ca574070bf 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -15,8 +15,6 @@ const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const Air = @import("../Air.zig"); @@ -810,7 +808,7 @@ pub const Object = struct { gpa: Allocator, builder: Builder, - module: *Module, + pt: Zcu.PerThread, debug_compile_unit: Builder.Metadata, @@ -820,7 +818,7 @@ pub const Object = struct { debug_enums: std.ArrayListUnmanaged(Builder.Metadata), debug_globals: std.ArrayListUnmanaged(Builder.Metadata), - debug_file_map: std.AutoHashMapUnmanaged(*const Module.File, Builder.Metadata), + debug_file_map: std.AutoHashMapUnmanaged(*const Zcu.File, Builder.Metadata), debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata), debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata), @@ -992,7 +990,10 @@ pub const Object = struct { obj.* = .{ .gpa = gpa, .builder = builder, - .module = comp.module.?, + .pt = .{ + .zcu = comp.module.?, + .tid = .main, + }, .debug_compile_unit = debug_compile_unit, .debug_enums_fwd_ref = debug_enums_fwd_ref, .debug_globals_fwd_ref = debug_globals_fwd_ref, @@ -1033,7 +1034,8 @@ pub const Object = struct { // If o.error_name_table is null, then it was not referenced by any instructions. if (o.error_name_table == .none) return; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const error_name_list = mod.global_error_set.keys(); const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len); @@ -1072,7 +1074,7 @@ pub const Object = struct { table_variable_index.setMutability(.constant, &o.builder); table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); table_variable_index.setAlignment( - slice_ty.abiAlignment(mod).toLlvm(), + slice_ty.abiAlignment(pt).toLlvm(), &o.builder, ); @@ -1083,8 +1085,7 @@ pub const Object = struct { // If there is no such function in the module, it means the source code does not need it. const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return; const llvm_fn = o.builder.getGlobal(name) orelse return; - const mod = o.module; - const errors_len = mod.global_error_set.count(); + const errors_len = o.pt.zcu.global_error_set.count(); var wip = try Builder.WipFunction.init(&o.builder, .{ .function = llvm_fn.ptrConst(&o.builder).kind.function, @@ -1106,10 +1107,8 @@ pub const Object = struct { } fn genModuleLevelAssembly(object: *Object) !void { - const mod = object.module; - const writer = object.builder.setModuleAsm(); - for (mod.global_assembly.values()) |assembly| { + for (object.pt.zcu.global_assembly.values()) |assembly| { try writer.print("{s}\n", .{assembly}); } try object.builder.finishModuleAsm(); @@ -1131,6 +1130,9 @@ pub const Object = struct { }; pub fn emit(self: *Object, options: EmitOptions) !void { + const zcu = self.pt.zcu; + const comp = zcu.comp; + { try self.genErrorNameTable(); try self.genCmpLtErrorsLenFunction(); @@ -1143,8 +1145,8 @@ pub const Object = struct { const namespace_index = self.debug_unresolved_namespace_scopes.keys()[i]; const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i]; - const namespace = self.module.namespacePtr(namespace_index); - const debug_type = try self.lowerDebugType(namespace.getType(self.module)); + const namespace = zcu.namespacePtr(namespace_index); + const debug_type = try self.lowerDebugType(namespace.getType(zcu)); self.builder.debugForwardReferenceSetType(fwd_ref, debug_type); } @@ -1206,12 +1208,12 @@ pub const Object = struct { try file.writeAll(ptr[0..(bitcode.len * 4)]); } - if (!build_options.have_llvm or !self.module.comp.config.use_lib_llvm) { + if (!build_options.have_llvm or !comp.config.use_lib_llvm) { log.err("emitting without libllvm not implemented", .{}); return error.FailedToEmit; } - initializeLLVMTarget(self.module.comp.root_mod.resolved_target.result.cpu.arch); + initializeLLVMTarget(comp.root_mod.resolved_target.result.cpu.arch); const context: *llvm.Context = llvm.Context.create(); errdefer context.dispose(); @@ -1247,8 +1249,8 @@ pub const Object = struct { @panic("Invalid LLVM triple"); } - const optimize_mode = self.module.comp.root_mod.optimize_mode; - const pic = self.module.comp.root_mod.pic; + const optimize_mode = comp.root_mod.optimize_mode; + const pic = comp.root_mod.pic; const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug) .None @@ -1257,12 +1259,12 @@ pub const Object = struct { const reloc_mode: llvm.RelocMode = if (pic) .PIC - else if (self.module.comp.config.link_mode == .dynamic) + else if (comp.config.link_mode == .dynamic) llvm.RelocMode.DynamicNoPIC else .Static; - const code_model: llvm.CodeModel = switch (self.module.comp.root_mod.code_model) { + const code_model: llvm.CodeModel = switch (comp.root_mod.code_model) { .default => .Default, .tiny => .Tiny, .small => .Small, @@ -1277,24 +1279,24 @@ pub const Object = struct { var target_machine = llvm.TargetMachine.create( target, target_triple_sentinel, - if (self.module.comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null, - self.module.comp.root_mod.resolved_target.llvm_cpu_features.?, + if (comp.root_mod.resolved_target.result.cpu.model.llvm_name) |s| s.ptr else null, + comp.root_mod.resolved_target.llvm_cpu_features.?, opt_level, reloc_mode, code_model, - self.module.comp.function_sections, - self.module.comp.data_sections, + comp.function_sections, + comp.data_sections, float_abi, - if (target_util.llvmMachineAbi(self.module.comp.root_mod.resolved_target.result)) |s| s.ptr else null, + if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |s| s.ptr else null, ); errdefer target_machine.dispose(); if (pic) module.setModulePICLevel(); - if (self.module.comp.config.pie) module.setModulePIELevel(); + if (comp.config.pie) module.setModulePIELevel(); if (code_model != .Default) module.setModuleCodeModel(code_model); - if (self.module.comp.llvm_opt_bisect_limit >= 0) { - context.setOptBisectLimit(self.module.comp.llvm_opt_bisect_limit); + if (comp.llvm_opt_bisect_limit >= 0) { + context.setOptBisectLimit(comp.llvm_opt_bisect_limit); } // Unfortunately, LLVM shits the bed when we ask for both binary and assembly. @@ -1352,11 +1354,13 @@ pub const Object = struct { pub fn updateFunc( o: *Object, - zcu: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { + assert(std.meta.eql(pt, o.pt)); + const zcu = pt.zcu; const comp = zcu.comp; const func = zcu.funcInfo(func_index); const decl_index = func.owner_decl; @@ -1437,7 +1441,7 @@ pub const Object = struct { var llvm_arg_i: u32 = 0; // This gets the LLVM values from the function and stores them in `dg.args`. - const sret = firstParamSRet(fn_info, zcu, target); + const sret = firstParamSRet(fn_info, pt, target); const ret_ptr: Builder.Value = if (sret) param: { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; @@ -1478,8 +1482,8 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); const param = wip.arg(llvm_arg_i); - if (isByRef(param_ty, zcu)) { - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = param.typeOfWip(&wip); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); @@ -1495,12 +1499,12 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); const param = wip.arg(llvm_arg_i); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; - if (isByRef(param_ty, zcu)) { + if (isByRef(param_ty, pt)) { args.appendAssumeCapacity(param); } else { args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); @@ -1510,12 +1514,12 @@ pub const Object = struct { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); const param = wip.arg(llvm_arg_i); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder); llvm_arg_i += 1; - if (isByRef(param_ty, zcu)) { + if (isByRef(param_ty, pt)) { args.appendAssumeCapacity(param); } else { args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); @@ -1528,11 +1532,11 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1556,7 +1560,7 @@ pub const Object = struct { const elem_align = (if (ptr_info.flags.alignment != .none) @as(InternPool.Alignment, ptr_info.flags.alignment) else - Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm(); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); const ptr_param = wip.arg(llvm_arg_i); llvm_arg_i += 1; @@ -1573,7 +1577,7 @@ pub const Object = struct { const field_types = it.types_buffer[0..it.types_len]; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(zcu).toLlvm(); + const param_alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, param_alignment, target); const llvm_ty = try o.builder.structType(.normal, field_types); for (0..field_types.len) |field_i| { @@ -1585,7 +1589,7 @@ pub const Object = struct { _ = try wip.store(.normal, param, field_ptr, alignment); } - const is_by_ref = isByRef(param_ty, zcu); + const is_by_ref = isByRef(param_ty, pt); args.appendAssumeCapacity(if (is_by_ref) arg_ptr else @@ -1603,11 +1607,11 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1618,11 +1622,11 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(zcu).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); - args.appendAssumeCapacity(if (isByRef(param_ty, zcu)) + args.appendAssumeCapacity(if (isByRef(param_ty, pt)) arg_ptr else try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); @@ -1700,8 +1704,9 @@ pub const Object = struct { try fg.wip.finish(); } - pub fn updateDecl(self: *Object, module: *Module, decl_index: InternPool.DeclIndex) !void { - const decl = module.declPtr(decl_index); + pub fn updateDecl(self: *Object, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { + assert(std.meta.eql(pt, self.pt)); + const decl = pt.zcu.declPtr(decl_index); var dg: DeclGen = .{ .object = self, .decl = decl, @@ -1711,7 +1716,7 @@ pub const Object = struct { dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_analysis.put(module.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); + try pt.zcu.failed_analysis.put(pt.zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, @@ -1721,10 +1726,12 @@ pub const Object = struct { pub fn updateExports( self: *Object, - zcu: *Zcu, - exported: Module.Exported, + pt: Zcu.PerThread, + exported: Zcu.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { + assert(std.meta.eql(pt, self.pt)); + const zcu = pt.zcu; const decl_index = switch (exported) { .decl_index => |i| i, .value => |val| return updateExportedValue(self, zcu, val, export_indices), @@ -1748,7 +1755,7 @@ pub const Object = struct { fn updateExportedValue( o: *Object, - mod: *Module, + mod: *Zcu, exported_value: InternPool.Index, export_indices: []const u32, ) link.File.UpdateExportsError!void { @@ -1783,7 +1790,7 @@ pub const Object = struct { fn updateExportedGlobal( o: *Object, - mod: *Module, + mod: *Zcu, global_index: Builder.Global.Index, export_indices: []const u32, ) link.File.UpdateExportsError!void { @@ -1879,7 +1886,7 @@ pub const Object = struct { global.delete(&self.builder); } - fn getDebugFile(o: *Object, file: *const Module.File) Allocator.Error!Builder.Metadata { + fn getDebugFile(o: *Object, file: *const Zcu.File) Allocator.Error!Builder.Metadata { const gpa = o.gpa; const gop = try o.debug_file_map.getOrPut(gpa, file); errdefer assert(o.debug_file_map.remove(file)); @@ -1909,7 +1916,8 @@ pub const Object = struct { const gpa = o.gpa; const target = o.target; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; if (o.debug_type_map.get(ty)) |debug_type| return debug_type; @@ -1931,7 +1939,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const builder_name = try o.builder.metadataString(name); - const debug_bits = ty.abiSize(zcu) * 8; // lldb cannot handle non-byte sized types + const debug_bits = ty.abiSize(pt) * 8; // lldb cannot handle non-byte sized types const debug_int_type = switch (info.signedness) { .signed => try o.builder.debugSignedType(builder_name, debug_bits), .unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits), @@ -1941,9 +1949,9 @@ pub const Object = struct { }, .Enum => { const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = o.module.declPtr(owner_decl_index); + const owner_decl = zcu.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_enum_type); return debug_enum_type; @@ -1961,7 +1969,7 @@ pub const Object = struct { for (enum_type.names.get(ip), 0..) |field_name_ip, i| { var bigint_space: Value.BigIntSpace = undefined; const bigint = if (enum_type.values.len != 0) - Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, zcu) + Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, pt) else std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); @@ -1986,8 +1994,8 @@ pub const Object = struct { scope, owner_decl.typeSrcLine(zcu) + 1, // Line try o.lowerDebugType(int_ty), - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(enumerators), ); @@ -2027,10 +2035,10 @@ pub const Object = struct { ptr_info.flags.is_const or ptr_info.flags.is_volatile or ptr_info.flags.size == .Many or ptr_info.flags.size == .C or - !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) + !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt)) { - const bland_ptr_ty = try zcu.ptrType(.{ - .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu)) + const bland_ptr_ty = try pt.ptrType(.{ + .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt)) .anyopaque_type else ptr_info.child, @@ -2060,10 +2068,10 @@ pub const Object = struct { defer gpa.free(name); const line = 0; - const ptr_size = ptr_ty.abiSize(zcu); - const ptr_align = ptr_ty.abiAlignment(zcu); - const len_size = len_ty.abiSize(zcu); - const len_align = len_ty.abiAlignment(zcu); + const ptr_size = ptr_ty.abiSize(pt); + const ptr_align = ptr_ty.abiAlignment(pt); + const len_size = len_ty.abiSize(pt); + const len_align = len_ty.abiAlignment(pt); const len_offset = len_align.forward(ptr_size); @@ -2095,8 +2103,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope line, .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_ptr_type, debug_len_type, @@ -2124,7 +2132,7 @@ pub const Object = struct { 0, // Line debug_elem_ty, target.ptrBitWidth(), - (ty.ptrAlignment(zcu).toByteUnits() orelse 0) * 8, + (ty.ptrAlignment(pt).toByteUnits() orelse 0) * 8, 0, // Offset ); @@ -2149,7 +2157,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = o.module.declPtr(owner_decl_index); + const owner_decl = zcu.declPtr(owner_decl_index); const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); const debug_opaque_type = try o.builder.debugStructType( try o.builder.metadataString(name), @@ -2171,8 +2179,8 @@ pub const Object = struct { .none, // Scope 0, // Line try o.lowerDebugType(ty.childType(zcu)), - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2214,8 +2222,8 @@ pub const Object = struct { .none, // Scope 0, // Line debug_elem_type, - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2231,7 +2239,7 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); const child_ty = ty.optionalChild(zcu); - if (!child_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(pt)) { const debug_bool_type = try o.builder.debugBoolType( try o.builder.metadataString(name), 8, @@ -2258,10 +2266,10 @@ pub const Object = struct { } const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(zcu); - const payload_align = child_ty.abiAlignment(zcu); - const non_null_size = non_null_ty.abiSize(zcu); - const non_null_align = non_null_ty.abiAlignment(zcu); + const payload_size = child_ty.abiSize(pt); + const payload_align = child_ty.abiAlignment(pt); + const non_null_size = non_null_ty.abiSize(pt); + const non_null_align = non_null_ty.abiAlignment(pt); const non_null_offset = non_null_align.forward(payload_size); const debug_data_type = try o.builder.debugMemberType( @@ -2292,8 +2300,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_data_type, debug_some_type, @@ -2310,7 +2318,7 @@ pub const Object = struct { }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // TODO: Maybe remove? const debug_error_union_type = try o.lowerDebugType(Type.anyerror); try o.debug_type_map.put(gpa, ty, debug_error_union_type); @@ -2320,10 +2328,10 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const error_size = Type.anyerror.abiSize(zcu); - const error_align = Type.anyerror.abiAlignment(zcu); - const payload_size = payload_ty.abiSize(zcu); - const payload_align = payload_ty.abiAlignment(zcu); + const error_size = Type.anyerror.abiSize(pt); + const error_align = Type.anyerror.abiAlignment(pt); + const payload_size = payload_ty.abiSize(pt); + const payload_align = payload_ty.abiAlignment(pt); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -2371,8 +2379,8 @@ pub const Object = struct { o.debug_compile_unit, // Sope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&fields), ); @@ -2399,8 +2407,8 @@ pub const Object = struct { const info = Type.fromInterned(backing_int_ty).intInfo(zcu); const builder_name = try o.builder.metadataString(name); const debug_int_type = switch (info.signedness) { - .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(zcu) * 8), - .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(zcu) * 8), + .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(pt) * 8), + .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(pt) * 8), }; try o.debug_type_map.put(gpa, ty, debug_int_type); return debug_int_type; @@ -2420,10 +2428,10 @@ pub const Object = struct { const debug_fwd_ref = try o.builder.debugForwardReference(); for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(zcu); - const field_align = Type.fromInterned(field_ty).abiAlignment(zcu); + const field_size = Type.fromInterned(field_ty).abiSize(pt); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); const field_offset = field_align.forward(offset); offset = field_offset + field_size; @@ -2451,8 +2459,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2479,7 +2487,7 @@ pub const Object = struct { else => {}, } - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { const owner_decl_index = ty.getOwnerDecl(zcu); const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); try o.debug_type_map.put(gpa, ty, debug_struct_type); @@ -2502,14 +2510,14 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - const field_size = field_ty.abiSize(zcu); - const field_align = zcu.structFieldAlignment( + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; + const field_size = field_ty.abiSize(pt); + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, ); - const field_offset = ty.structFieldOffset(field_index, zcu); + const field_offset = ty.structFieldOffset(field_index, pt); const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls); @@ -2532,8 +2540,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2553,7 +2561,7 @@ pub const Object = struct { const union_type = ip.loadUnionType(ty.toIntern()); if (!union_type.haveFieldTypes(ip) or - !ty.hasRuntimeBitsIgnoreComptime(zcu) or + !ty.hasRuntimeBitsIgnoreComptime(pt) or !union_type.haveLayout(ip)) { const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); @@ -2561,7 +2569,7 @@ pub const Object = struct { return debug_union_type; } - const layout = zcu.getUnionLayout(union_type); + const layout = pt.getUnionLayout(union_type); const debug_fwd_ref = try o.builder.debugForwardReference(); @@ -2575,8 +2583,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple( &.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))}, ), @@ -2603,12 +2611,12 @@ pub const Object = struct { for (0..tag_type.names.len) |field_index| { const field_ty = union_type.field_types.get(ip)[field_index]; - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; - const field_size = Type.fromInterned(field_ty).abiSize(zcu); + const field_size = Type.fromInterned(field_ty).abiSize(pt); const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { .@"packed" => .none, - .auto, .@"extern" => zcu.unionFieldNormalAlignment(union_type, @intCast(field_index)), + .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)), }; const field_name = tag_type.names.get(ip)[field_index]; @@ -2637,8 +2645,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2696,8 +2704,8 @@ pub const Object = struct { o.debug_compile_unit, // Scope 0, // Line .none, // Underlying type - ty.abiSize(zcu) * 8, - (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8, + ty.abiSize(pt) * 8, + (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&full_fields), ); @@ -2718,13 +2726,13 @@ pub const Object = struct { try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len); // Return type goes first. - if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(zcu)) { - const sret = firstParamSRet(fn_info, zcu, target); + if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(pt)) { + const sret = firstParamSRet(fn_info, pt, target); const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty)); if (sret) { - const ptr_ty = try zcu.singleMutPtrType(Type.fromInterned(fn_info.return_type)); + const ptr_ty = try pt.singleMutPtrType(Type.fromInterned(fn_info.return_type)); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } } else { @@ -2732,18 +2740,18 @@ pub const Object = struct { } if (Type.fromInterned(fn_info.return_type).isError(zcu) and - o.module.comp.config.any_error_tracing) + zcu.comp.config.any_error_tracing) { - const ptr_ty = try zcu.singleMutPtrType(try o.getStackTraceType()); + const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType()); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } for (0..fn_info.param_types.len) |i| { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]); - if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; - if (isByRef(param_ty, zcu)) { - const ptr_ty = try zcu.singleMutPtrType(param_ty); + if (isByRef(param_ty, pt)) { + const ptr_ty = try pt.singleMutPtrType(param_ty); debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty)); } else { debug_param_types.appendAssumeCapacity(try o.lowerDebugType(param_ty)); @@ -2770,7 +2778,7 @@ pub const Object = struct { } fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata { - const zcu = o.module; + const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(namespace_index); const file_scope = namespace.fileScope(zcu); if (namespace.parent == .none) return try o.getDebugFile(file_scope); @@ -2783,7 +2791,7 @@ pub const Object = struct { } fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata { - const zcu = o.module; + const zcu = o.pt.zcu; const decl = zcu.declPtr(decl_index); const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); return o.builder.debugStructType( @@ -2799,7 +2807,7 @@ pub const Object = struct { } fn getStackTraceType(o: *Object) Allocator.Error!Type { - const zcu = o.module; + const zcu = o.pt.zcu; const std_mod = zcu.std_mod; const std_file_imported = zcu.importPkg(std_mod) catch unreachable; @@ -2807,13 +2815,13 @@ pub const Object = struct { const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "builtin", .no_embedded_nulls); const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Module.DeclAdapter{ .zcu = zcu }).?; + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; - const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .zcu = zcu }).?; + const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Zcu.DeclAdapter{ .zcu = zcu }).?; const stack_trace_decl = zcu.declPtr(stack_trace_decl_index); // Sema should have ensured that StackTrace was analyzed. @@ -2824,7 +2832,7 @@ pub const Object = struct { fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 { var buffer = std.ArrayList(u8).init(o.gpa); errdefer buffer.deinit(); - try ty.print(buffer.writer(), o.module); + try ty.print(buffer.writer(), o.pt); return buffer.toOwnedSliceSentinel(0); } @@ -2835,7 +2843,8 @@ pub const Object = struct { o: *Object, decl_index: InternPool.DeclIndex, ) Allocator.Error!Builder.Function.Index { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = o.gpa; const decl = zcu.declPtr(decl_index); @@ -2848,7 +2857,7 @@ pub const Object = struct { assert(decl.has_tv); const fn_info = zcu.typeToFunc(zig_fn_type).?; const target = owner_mod.resolved_target.result; - const sret = firstParamSRet(fn_info, zcu, target); + const sret = firstParamSRet(fn_info, pt, target); const is_extern = decl.isExtern(zcu); const function_index = try o.builder.addFunction( @@ -2929,14 +2938,14 @@ pub const Object = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); - if (!isByRef(param_ty, zcu)) { + if (!isByRef(param_ty, pt)) { try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(zcu); + const alignment = param_ty.abiAlignment(pt); try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty); }, .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder), @@ -2964,7 +2973,7 @@ pub const Object = struct { attributes: *Builder.FunctionAttributes.Wip, owner_mod: *Package.Module, ) Allocator.Error!void { - const comp = o.module.comp; + const comp = o.pt.zcu.comp; if (!owner_mod.red_zone) { try attributes.addFnAttr(.noredzone, &o.builder); @@ -3039,7 +3048,7 @@ pub const Object = struct { } errdefer assert(o.anon_decl_map.remove(decl_val)); - const mod = o.module; + const mod = o.pt.zcu; const decl_ty = mod.intern_pool.typeOf(decl_val); const variable_index = try o.builder.addVariable( @@ -3065,7 +3074,7 @@ pub const Object = struct { if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); - const zcu = o.module; + const zcu = o.pt.zcu; const decl = zcu.declPtr(decl_index); const is_extern = decl.isExtern(zcu); @@ -3100,11 +3109,12 @@ pub const Object = struct { } fn errorIntType(o: *Object) Allocator.Error!Builder.Type { - return o.builder.intType(o.module.errorSetBits()); + return o.builder.intType(o.pt.zcu.errorSetBits()); } fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); const ip = &mod.intern_pool; return switch (t.toIntern()) { @@ -3230,7 +3240,7 @@ pub const Object = struct { ), .opt_type => |child_ty| { // Must stay in sync with `opt_payload` logic in `lowerPtr`. - if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8; + if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(pt)) return .i8; const payload_ty = try o.lowerType(Type.fromInterned(child_ty)); if (t.optionalReprIsPayload(mod)) return payload_ty; @@ -3238,8 +3248,8 @@ pub const Object = struct { comptime assert(optional_layout_version == 3); var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined }; var fields_len: usize = 2; - const offset = Type.fromInterned(child_ty).abiSize(mod) + 1; - const abi_size = t.abiSize(mod); + const offset = Type.fromInterned(child_ty).abiSize(pt) + 1; + const abi_size = t.abiSize(pt); const padding_len = abi_size - offset; if (padding_len > 0) { fields[2] = try o.builder.arrayType(padding_len, .i8); @@ -3252,16 +3262,16 @@ pub const Object = struct { // Must stay in sync with `codegen.errUnionPayloadOffset`. // See logic in `lowerPtr`. const error_type = try o.errorIntType(); - if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod)) + if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(pt)) return error_type; const payload_type = try o.lowerType(Type.fromInterned(error_union_type.payload_type)); - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try o.pt.errorIntType(); - const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(mod); - const error_align = err_int_ty.abiAlignment(mod); + const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(pt); + const error_align = err_int_ty.abiAlignment(pt); - const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(mod); - const error_size = err_int_ty.abiSize(mod); + const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(pt); + const error_size = err_int_ty.abiSize(pt); var fields: [3]Builder.Type = undefined; var fields_len: usize = 2; @@ -3317,12 +3327,12 @@ pub const Object = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = mod.structFieldAlignment( + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, ); - const field_ty_align = field_ty.abiAlignment(mod); + const field_ty_align = field_ty.abiAlignment(pt); if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed"; big_align = big_align.max(field_align); const prev_offset = offset; @@ -3334,7 +3344,7 @@ pub const Object = struct { try o.builder.arrayType(padding_len, .i8), ); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field. If there are runtime bits after this field, // map to the next LLVM field (which we know exists): otherwise, don't // map the field, indicating it's at the end of the struct. @@ -3353,7 +3363,7 @@ pub const Object = struct { }, @intCast(llvm_field_types.items.len)); try llvm_field_types.append(o.gpa, try o.lowerType(field_ty)); - offset += field_ty.abiSize(mod); + offset += field_ty.abiSize(pt); } { const prev_offset = offset; @@ -3386,7 +3396,7 @@ pub const Object = struct { var offset: u64 = 0; var big_align: InternPool.Alignment = .none; - const struct_size = t.abiSize(mod); + const struct_size = t.abiSize(pt); for ( anon_struct_type.types.get(ip), @@ -3395,7 +3405,7 @@ pub const Object = struct { ) |field_ty, field_val, field_index| { if (field_val != .none) continue; - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -3405,7 +3415,7 @@ pub const Object = struct { o.gpa, try o.builder.arrayType(padding_len, .i8), ); - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) { + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field. If there are runtime bits after this field, // map to the next LLVM field (which we know exists): otherwise, don't // map the field, indicating it's at the end of the struct. @@ -3423,7 +3433,7 @@ pub const Object = struct { }, @intCast(llvm_field_types.items.len)); try llvm_field_types.append(o.gpa, try o.lowerType(Type.fromInterned(field_ty))); - offset += Type.fromInterned(field_ty).abiSize(mod); + offset += Type.fromInterned(field_ty).abiSize(pt); } { const prev_offset = offset; @@ -3440,10 +3450,10 @@ pub const Object = struct { if (o.type_map.get(t.toIntern())) |value| return value; const union_obj = ip.loadUnionType(t.toIntern()); - const layout = mod.getUnionLayout(union_obj); + const layout = pt.getUnionLayout(union_obj); if (union_obj.flagsPtr(ip).layout == .@"packed") { - const int_ty = try o.builder.intType(@intCast(t.bitSize(mod))); + const int_ty = try o.builder.intType(@intCast(t.bitSize(pt))); try o.type_map.put(o.gpa, t.toIntern(), int_ty); return int_ty; } @@ -3552,18 +3562,20 @@ pub const Object = struct { /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !mod.typeToFunc(elem_ty).?.is_generic, - .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), - else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(pt), + else => elem_ty.hasRuntimeBitsIgnoreComptime(pt), }; return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8; } fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); const ret_ty = try lowerFnRetTy(o, fn_info); @@ -3571,14 +3583,14 @@ pub const Object = struct { var llvm_params = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_params.deinit(o.gpa); - if (firstParamSRet(fn_info, mod, target)) { + if (firstParamSRet(fn_info, pt, target)) { try llvm_params.append(o.gpa, .ptr); } if (Type.fromInterned(fn_info.return_type).isError(mod) and mod.comp.config.any_error_tracing) { - const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); + const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType()); try llvm_params.append(o.gpa, try o.lowerType(ptr_ty)); } @@ -3595,7 +3607,7 @@ pub const Object = struct { .abi_sized_int => { const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]); try llvm_params.append(o.gpa, try o.builder.intType( - @intCast(param_ty.abiSize(mod) * 8), + @intCast(param_ty.abiSize(pt) * 8), )); }, .slice => { @@ -3633,7 +3645,8 @@ pub const Object = struct { } fn lowerValueToInt(o: *Object, llvm_int_ty: Builder.Type, arg_val: InternPool.Index) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); @@ -3666,15 +3679,15 @@ pub const Object = struct { var running_int = try o.builder.intConst(llvm_int_ty, 0); var running_bits: u16 = 0; for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| { - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; const shift_rhs = try o.builder.intConst(llvm_int_ty, running_bits); - const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(mod, field_index)).toIntern()); + const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(pt, field_index)).toIntern()); const shifted = try o.builder.binConst(.shl, field_val, shift_rhs); running_int = try o.builder.binConst(.xor, running_int, shifted); - const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod)); + const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt)); running_bits += ty_bit_size; } return running_int; @@ -3683,7 +3696,7 @@ pub const Object = struct { else => unreachable, }, .un => |un| { - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; @@ -3701,7 +3714,7 @@ pub const Object = struct { } const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(llvm_int_ty, 0); + if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(llvm_int_ty, 0); return o.lowerValueToInt(llvm_int_ty, un.val); }, .simple_value => |simple_value| switch (simple_value) { @@ -3715,7 +3728,7 @@ pub const Object = struct { .opt => {}, // pointer like optional expected else => unreachable, } - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const bytes: usize = @intCast(std.mem.alignForward(u64, bits, 8) / 8); var stack = std.heap.stackFallback(32, o.gpa); @@ -3729,12 +3742,7 @@ pub const Object = struct { defer allocator.free(limbs); @memset(limbs, 0); - val.writeToPackedMemory( - ty, - mod, - std.mem.sliceAsBytes(limbs)[0..bytes], - 0, - ) catch unreachable; + val.writeToPackedMemory(ty, pt, std.mem.sliceAsBytes(limbs)[0..bytes], 0) catch unreachable; if (builtin.target.cpu.arch.endian() == .little) { if (target.cpu.arch.endian() == .big) @@ -3752,7 +3760,8 @@ pub const Object = struct { } fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); @@ -3811,7 +3820,7 @@ pub const Object = struct { }, .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_space, mod); + const bigint = val.toBigInt(&bigint_space, pt); return lowerBigInt(o, ty, bigint); }, .err => |err| { @@ -3821,24 +3830,24 @@ pub const Object = struct { }, .error_union => |error_union| { const err_val = switch (error_union.val) { - .err_name => |err_name| try mod.intern(.{ .err = .{ + .err_name => |err_name| try pt.intern(.{ .err = .{ .ty = ty.errorUnionSet(mod).toIntern(), .name = err_name, } }), - .payload => (try mod.intValue(try mod.errorIntType(), 0)).toIntern(), + .payload => (try pt.intValue(try pt.errorIntType(), 0)).toIntern(), }; - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); const payload_type = ty.errorUnionPayload(mod); - if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) { // We use the error type directly as the type. return o.lowerValue(err_val); } - const payload_align = payload_type.abiAlignment(mod); - const error_align = err_int_ty.abiAlignment(mod); + const payload_align = payload_type.abiAlignment(pt); + const error_align = err_int_ty.abiAlignment(pt); const llvm_error_value = try o.lowerValue(err_val); const llvm_payload_value = try o.lowerValue(switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), + .err_name => try pt.intern(.{ .undef = payload_type.toIntern() }), .payload => |payload| payload, }); @@ -3869,16 +3878,16 @@ pub const Object = struct { .enum_tag => |enum_tag| o.lowerValue(enum_tag.int), .float => switch (ty.floatBits(target)) { 16 => if (backendSupportsF16(target)) - try o.builder.halfConst(val.toFloat(f16, mod)) + try o.builder.halfConst(val.toFloat(f16, pt)) else - try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, mod)))), - 32 => try o.builder.floatConst(val.toFloat(f32, mod)), - 64 => try o.builder.doubleConst(val.toFloat(f64, mod)), + try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, pt)))), + 32 => try o.builder.floatConst(val.toFloat(f32, pt)), + 64 => try o.builder.doubleConst(val.toFloat(f64, pt)), 80 => if (backendSupportsF80(target)) - try o.builder.x86_fp80Const(val.toFloat(f80, mod)) + try o.builder.x86_fp80Const(val.toFloat(f80, pt)) else - try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, mod)))), - 128 => try o.builder.fp128Const(val.toFloat(f128, mod)), + try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, pt)))), + 128 => try o.builder.fp128Const(val.toFloat(f128, pt)), else => unreachable, }, .ptr => try o.lowerPtr(arg_val, 0), @@ -3891,7 +3900,7 @@ pub const Object = struct { const payload_ty = ty.optionalChild(mod); const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none)); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return non_null_bit; } const llvm_ty = try o.lowerType(ty); @@ -3909,7 +3918,7 @@ pub const Object = struct { var fields: [3]Builder.Type = undefined; var vals: [3]Builder.Constant = undefined; vals[0] = try o.lowerValue(switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .none => try pt.intern(.{ .undef = payload_ty.toIntern() }), else => |payload| payload, }); vals[1] = non_null_bit; @@ -4058,9 +4067,9 @@ pub const Object = struct { 0.., ) |field_ty, field_val, field_index| { if (field_val != .none) continue; - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + const field_align = Type.fromInterned(field_ty).abiAlignment(pt); big_align = big_align.max(field_align); const prev_offset = offset; offset = field_align.forward(offset); @@ -4076,13 +4085,13 @@ pub const Object = struct { } vals[llvm_index] = - try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern()); + try o.lowerValue((try val.fieldValue(pt, field_index)).toIntern()); fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) need_unnamed = true; llvm_index += 1; - offset += Type.fromInterned(field_ty).abiSize(mod); + offset += Type.fromInterned(field_ty).abiSize(pt); } { const prev_offset = offset; @@ -4109,7 +4118,7 @@ pub const Object = struct { if (struct_type.layout == .@"packed") { comptime assert(Type.packed_struct_layout_version == 2); - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4138,7 +4147,7 @@ pub const Object = struct { var field_it = struct_type.iterateRuntimeOrder(ip); while (field_it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - const field_align = mod.structFieldAlignment( + const field_align = pt.structFieldAlignment( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, @@ -4158,20 +4167,20 @@ pub const Object = struct { llvm_index += 1; } - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field - we only needed it for the alignment. continue; } vals[llvm_index] = try o.lowerValue( - (try val.fieldValue(mod, field_index)).toIntern(), + (try val.fieldValue(pt, field_index)).toIntern(), ); fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) need_unnamed = true; llvm_index += 1; - offset += field_ty.abiSize(mod); + offset += field_ty.abiSize(pt); } { const prev_offset = offset; @@ -4195,7 +4204,7 @@ pub const Object = struct { }, .un => |un| { const union_ty = try o.lowerType(ty); - const layout = ty.unionGetLayout(mod); + const layout = ty.unionGetLayout(pt); if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; @@ -4206,8 +4215,8 @@ pub const Object = struct { const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (container_layout == .@"packed") { - if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0); - const bits = ty.bitSize(mod); + if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(union_ty, 0); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4219,7 +4228,7 @@ pub const Object = struct { // must pointer cast to the expected type before accessing the union. need_unnamed = layout.most_aligned_field != field_index; - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { const padding_len = layout.payload_size; break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8)); } @@ -4228,7 +4237,7 @@ pub const Object = struct { if (payload_ty != union_ty.structFields(&o.builder)[ @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)) ]) need_unnamed = true; - const field_size = field_ty.abiSize(mod); + const field_size = field_ty.abiSize(pt); if (field_size == layout.payload_size) break :p payload; const padding_len = layout.payload_size - field_size; const padding_ty = try o.builder.arrayType(padding_len, .i8); @@ -4239,7 +4248,7 @@ pub const Object = struct { } else p: { assert(layout.tag_size == 0); if (container_layout == .@"packed") { - const bits = ty.bitSize(mod); + const bits = ty.bitSize(pt); const llvm_int_ty = try o.builder.intType(@intCast(bits)); return o.lowerValueToInt(llvm_int_ty, arg_val); @@ -4286,7 +4295,7 @@ pub const Object = struct { ty: Type, bigint: std.math.big.int.Const, ) Allocator.Error!Builder.Constant { - const mod = o.module; + const mod = o.pt.zcu; return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint); } @@ -4295,7 +4304,8 @@ pub const Object = struct { ptr_val: InternPool.Index, prev_offset: u64, ) Error!Builder.Constant { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { @@ -4320,7 +4330,7 @@ pub const Object = struct { eu_ptr, offset + @import("../codegen.zig").errUnionPayloadOffset( Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu), - zcu, + pt, ), ), .opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset), @@ -4336,7 +4346,7 @@ pub const Object = struct { }; }, .Struct, .Union => switch (agg_ty.containerLayout(zcu)) { - .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu), + .auto => agg_ty.structFieldOffset(@intCast(field.index), pt), .@"extern", .@"packed" => unreachable, }, else => unreachable, @@ -4353,7 +4363,8 @@ pub const Object = struct { o: *Object, anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, ) Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const decl_val = anon_decl.val; const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); @@ -4370,14 +4381,14 @@ pub const Object = struct { const ptr_ty = Type.fromInterned(anon_decl.orig_ty); const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or + if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); if (is_fn_body) @panic("TODO"); const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target); - const alignment = ptr_ty.ptrAlignment(mod); + const alignment = ptr_ty.ptrAlignment(pt); const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( @@ -4389,7 +4400,8 @@ pub const Object = struct { } fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; // In the case of something like: // fn foo() void {} @@ -4408,10 +4420,10 @@ pub const Object = struct { } const decl_ty = decl.typeOf(mod); - const ptr_ty = try decl.declPtrType(mod); + const ptr_ty = try decl.declPtrType(pt); const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or + if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) { return o.lowerPtrToVoid(ptr_ty); @@ -4431,7 +4443,7 @@ pub const Object = struct { } fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant { - const mod = o.module; + const mod = o.pt.zcu; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation @@ -4459,20 +4471,21 @@ pub const Object = struct { /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return .none; - return o.builder.intType(@intCast(ty.abiSize(mod) * 8)); + return o.builder.intType(@intCast(ty.abiSize(pt) * 8)); }, .Bool => return .i8, else => return .none, }; const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return o.builder.intType(@intCast(int_ty.abiSize(mod) * 8)); + return o.builder.intType(@intCast(int_ty.abiSize(pt) * 8)); } else { return .none; } @@ -4486,7 +4499,8 @@ pub const Object = struct { fn_info: InternPool.Key.FuncType, llvm_arg_i: u32, ) Allocator.Error!void { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { @@ -4507,7 +4521,7 @@ pub const Object = struct { const elem_align = if (ptr_info.flags.alignment != .none) ptr_info.flags.alignment else - Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1"); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1"); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder); } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { .signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder), @@ -4540,7 +4554,7 @@ pub const Object = struct { const name = try o.builder.strtabString(lt_errors_fn_name); if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function; - const zcu = o.module; + const zcu = o.pt.zcu; const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.errorIntType()}, .normal), @@ -4559,7 +4573,8 @@ pub const Object = struct { } fn getEnumTagNameFunction(o: *Object, enum_ty: Type) !Builder.Function.Index { - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(enum_ty.toIntern()); @@ -4618,7 +4633,7 @@ pub const Object = struct { const return_block = try wip.block(1, "Name"); const this_tag_int_value = try o.lowerValue( - (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), ); try wip_switch.addCase(this_tag_int_value, return_block, &wip); @@ -4636,13 +4651,13 @@ pub const Object = struct { pub const DeclGen = struct { object: *Object, - decl: *Module.Decl, + decl: *Zcu.Decl, decl_index: InternPool.DeclIndex, - err_msg: ?*Module.ErrorMsg, + err_msg: ?*Zcu.ErrorMsg, fn ownerModule(dg: DeclGen) *Package.Module { const o = dg.object; - const zcu = o.module; + const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(dg.decl.src_namespace); const file_scope = namespace.fileScope(zcu); return file_scope.mod; @@ -4653,15 +4668,15 @@ pub const DeclGen = struct { assert(dg.err_msg == null); const o = dg.object; const gpa = o.gpa; - const mod = o.module; - const src_loc = dg.decl.navSrcLoc(mod); - dg.err_msg = try Module.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); + const src_loc = dg.decl.navSrcLoc(o.pt.zcu); + dg.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } fn genDecl(dg: *DeclGen) !void { const o = dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const ip = &zcu.intern_pool; const decl = dg.decl; const decl_index = dg.decl_index; @@ -4672,7 +4687,7 @@ pub const DeclGen = struct { } else { const variable_index = try o.resolveGlobalDecl(decl_index); variable_index.setAlignment( - decl.getAlignment(zcu).toLlvm(), + decl.getAlignment(pt).toLlvm(), &o.builder, ); if (decl.@"linksection".toSlice(ip)) |section| @@ -4833,23 +4848,21 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(gpa, inst); if (gop.found_existing) return gop.value_ptr.*; - const o = self.dg.object; - const mod = o.module; - const llvm_val = try self.resolveValue((try self.air.value(inst, mod)).?); + const llvm_val = try self.resolveValue((try self.air.value(inst, self.dg.object.pt)).?); gop.value_ptr.* = llvm_val.toValue(); return llvm_val.toValue(); } fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant { const o = self.dg.object; - const mod = o.module; - const ty = val.typeOf(mod); + const pt = o.pt; + const ty = val.typeOf(pt.zcu); const llvm_val = try o.lowerValue(val.toIntern()); - if (!isByRef(ty, mod)) return llvm_val; + if (!isByRef(ty, pt)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. - const target = mod.getTarget(); + const target = pt.zcu.getTarget(); const variable_index = try o.builder.addVariable( .empty, llvm_val.typeOf(&o.builder), @@ -4859,7 +4872,7 @@ pub const FuncGen = struct { variable_index.setLinkage(.private, &o.builder); variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); - variable_index.setAlignment(ty.abiAlignment(mod).toLlvm(), &o.builder); + variable_index.setAlignment(ty.abiAlignment(pt).toLlvm(), &o.builder); return o.builder.convConst( variable_index.toConst(&o.builder), try o.builder.ptrType(toLlvmAddressSpace(.generic, target)), @@ -4868,10 +4881,10 @@ pub const FuncGen = struct { fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; if (o.null_opt_usize == .no_init) { - o.null_opt_usize = try self.resolveValue(Value.fromInterned(try mod.intern(.{ .opt = .{ - .ty = try mod.intern(.{ .opt_type = .usize_type }), + o.null_opt_usize = try self.resolveValue(Value.fromInterned(try pt.intern(.{ .opt = .{ + .ty = try pt.intern(.{ .opt_type = .usize_type }), .val = .none, } }))); } @@ -4880,7 +4893,7 @@ pub const FuncGen = struct { fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { @@ -5145,7 +5158,8 @@ pub const FuncGen = struct { if (maybe_inline_func) |inline_func| { const o = self.dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const func = zcu.funcInfo(inline_func); const decl_index = func.owner_decl; @@ -5161,7 +5175,7 @@ pub const FuncGen = struct { const fqn = try decl.fullyQualifiedName(zcu); - const fn_ty = try zcu.funcType(.{ + const fn_ty = try pt.funcType(.{ .param_types = &.{}, .return_type = .void_type, }); @@ -5228,7 +5242,8 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { @@ -5240,7 +5255,7 @@ pub const FuncGen = struct { const return_type = Type.fromInterned(fn_info.return_type); const llvm_fn = try self.resolveInst(pl_op.operand); const target = mod.getTarget(); - const sret = firstParamSRet(fn_info, mod, target); + const sret = firstParamSRet(fn_info, pt, target); var llvm_args = std.ArrayList(Builder.Value).init(self.gpa); defer llvm_args.deinit(); @@ -5258,14 +5273,13 @@ pub const FuncGen = struct { const llvm_ret_ty = try o.lowerType(return_type); try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder); - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const ret_ptr = try self.buildAllocaWorkaround(return_type, alignment); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; - const err_return_tracing = return_type.isError(mod) and - o.module.comp.config.any_error_tracing; + const err_return_tracing = return_type.isError(mod) and mod.comp.config.any_error_tracing; if (err_return_tracing) { assert(self.err_ret_trace != .none); try llvm_args.append(self.err_ret_trace); @@ -5279,8 +5293,8 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try o.lowerType(param_ty); - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, ""); try llvm_args.append(loaded); } else { @@ -5291,10 +5305,10 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - if (isByRef(param_ty, mod)) { + if (isByRef(param_ty, pt)) { try llvm_args.append(llvm_arg); } else { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = llvm_arg.typeOfWip(&self.wip); const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment); @@ -5306,10 +5320,10 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const param_llvm_ty = try o.lowerType(param_ty); const arg_ptr = try self.buildAllocaWorkaround(param_ty, alignment); - if (isByRef(param_ty, mod)) { + if (isByRef(param_ty, pt)) { const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, ""); _ = try self.wip.store(.normal, loaded, arg_ptr, alignment); } else { @@ -5321,16 +5335,16 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); + const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(pt) * 8)); - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + if (isByRef(param_ty, pt)) { + const alignment = param_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, ""); try llvm_args.append(loaded); } else { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const int_ptr = try self.buildAllocaWorkaround(param_ty, alignment); _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment); const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, ""); @@ -5349,9 +5363,9 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_types = it.types_buffer[0..it.types_len]; const llvm_arg = try self.resolveInst(arg); - const is_by_ref = isByRef(param_ty, mod); + const is_by_ref = isByRef(param_ty, pt); const arg_ptr = if (is_by_ref) llvm_arg else ptr: { - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); break :ptr ptr; @@ -5377,8 +5391,8 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - const alignment = arg_ty.abiAlignment(mod).toLlvm(); - if (!isByRef(arg_ty, mod)) { + const alignment = arg_ty.abiAlignment(pt).toLlvm(); + if (!isByRef(arg_ty, pt)) { const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); llvm_arg = ptr; @@ -5395,8 +5409,8 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - const alignment = arg_ty.abiAlignment(mod).toLlvm(); - if (!isByRef(arg_ty, mod)) { + const alignment = arg_ty.abiAlignment(pt).toLlvm(); + if (!isByRef(arg_ty, pt)) { const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); llvm_arg = ptr; @@ -5418,7 +5432,7 @@ pub const FuncGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); - if (!isByRef(param_ty, mod)) { + if (!isByRef(param_ty, pt)) { try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1); } }, @@ -5426,7 +5440,7 @@ pub const FuncGen = struct { const param_index = it.zig_index - 1; const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]); const param_llvm_ty = try o.lowerType(param_ty); - const alignment = param_ty.abiAlignment(mod).toLlvm(); + const alignment = param_ty.abiAlignment(pt).toLlvm(); try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder), @@ -5460,7 +5474,7 @@ pub const FuncGen = struct { const elem_align = (if (ptr_info.flags.alignment != .none) @as(InternPool.Alignment, ptr_info.flags.alignment) else - Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1")).toLlvm(); + Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm(); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); }, }; @@ -5485,17 +5499,17 @@ pub const FuncGen = struct { return .none; } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(pt)) { return .none; } const llvm_ret_ty = try o.lowerType(return_type); if (ret_ptr) |rp| { - if (isByRef(return_type, mod)) { + if (isByRef(return_type, pt)) { return rp; } else { // our by-ref status disagrees with sret so we must load. - const return_alignment = return_type.abiAlignment(mod).toLlvm(); + const return_alignment = return_type.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, ""); } } @@ -5506,19 +5520,19 @@ pub const FuncGen = struct { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const rp = try self.buildAlloca(abi_ret_ty, alignment); _ = try self.wip.store(.normal, call, rp, alignment); - return if (isByRef(return_type, mod)) + return if (isByRef(return_type, pt)) rp else try self.wip.load(.normal, llvm_ret_ty, rp, alignment, ""); } - if (isByRef(return_type, mod)) { + if (isByRef(return_type, pt)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(mod).toLlvm(); + const alignment = return_type.abiAlignment(pt).toLlvm(); const rp = try self.buildAlloca(llvm_ret_ty, alignment); _ = try self.wip.store(.normal, call, rp, alignment); return rp; @@ -5527,9 +5541,9 @@ pub const FuncGen = struct { } } - fn buildSimplePanic(fg: *FuncGen, panic_id: Module.PanicId) !void { + fn buildSimplePanic(fg: *FuncGen, panic_id: Zcu.PanicId) !void { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?; const msg_decl = mod.declPtr(msg_decl_index); const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod); @@ -5567,15 +5581,16 @@ pub const FuncGen = struct { fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(un_op); if (self.ret_ptr != .none) { - const ptr_ty = try mod.singleMutPtrType(ret_ty); + const ptr_ty = try pt.singleMutPtrType(ret_ty); const operand = try self.resolveInst(un_op); - const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false; + const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef and safety) undef: { const ptr_info = ptr_ty.ptrInfo(mod); const needs_bitmask = (ptr_info.packed_offset.host_size != 0); @@ -5585,10 +5600,10 @@ pub const FuncGen = struct { // https://github.com/ziglang/zig/issues/15337 break :undef; } - const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt)); _ = try self.wip.callMemSet( self.ret_ptr, - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), try o.builder.intValue(.i8, 0xaa), len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, @@ -5615,7 +5630,7 @@ pub const FuncGen = struct { return .none; } const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5629,13 +5644,13 @@ pub const FuncGen = struct { const abi_ret_ty = try lowerFnRetTy(o, fn_info); const operand = try self.resolveInst(un_op); - const val_is_undef = if (try self.air.value(un_op, mod)) |val| val.isUndefDeep(mod) else false; - const alignment = ret_ty.abiAlignment(mod).toLlvm(); + const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false; + const alignment = ret_ty.abiAlignment(pt).toLlvm(); if (val_is_undef and safety) { const llvm_ret_ty = operand.typeOfWip(&self.wip); const rp = try self.buildAlloca(llvm_ret_ty, alignment); - const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt)); _ = try self.wip.callMemSet( rp, alignment, @@ -5651,7 +5666,7 @@ pub const FuncGen = struct { return .none; } - if (isByRef(ret_ty, mod)) { + if (isByRef(ret_ty, pt)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, "")); @@ -5672,12 +5687,13 @@ pub const FuncGen = struct { fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5694,7 +5710,7 @@ pub const FuncGen = struct { } const ptr = try self.resolveInst(un_op); const abi_ret_ty = try lowerFnRetTy(o, fn_info); - const alignment = ret_ty.abiAlignment(mod).toLlvm(); + const alignment = ret_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, "")); return .none; } @@ -5711,17 +5727,17 @@ pub const FuncGen = struct { fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = ty_op.ty.toType(); const llvm_va_list_ty = try o.lowerType(va_list_ty); - const mod = o.module; - const result_alignment = va_list_ty.abiAlignment(mod).toLlvm(); + const result_alignment = va_list_ty.abiAlignment(pt).toLlvm(); const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment); _ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, ""); - return if (isByRef(va_list_ty, mod)) + return if (isByRef(va_list_ty, pt)) dest_list else try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); @@ -5737,15 +5753,15 @@ pub const FuncGen = struct { fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try o.lowerType(va_list_ty); - const result_alignment = va_list_ty.abiAlignment(mod).toLlvm(); + const result_alignment = va_list_ty.abiAlignment(pt).toLlvm(); const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment); _ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, ""); - return if (isByRef(va_list_ty, mod)) + return if (isByRef(va_list_ty, pt)) dest_list else try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); @@ -5802,21 +5818,22 @@ pub const FuncGen = struct { rhs: Builder.Value, ) Allocator.Error!Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { .Enum => scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or operand_ty.optionalReprIsPayload(mod)) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. - const is_by_ref = isByRef(scalar_ty, mod); + const is_by_ref = isByRef(scalar_ty, pt); const opt_llvm_ty = try o.lowerType(scalar_ty); const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref); @@ -5908,7 +5925,8 @@ pub const FuncGen = struct { body: []const Air.Inst.Index, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst_ty = self.typeOfIndex(inst); if (inst_ty.isNoReturn(mod)) { @@ -5916,7 +5934,7 @@ pub const FuncGen = struct { return .none; } - const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod); + const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt); var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 }; defer if (have_block_result) breaks.list.deinit(self.gpa); @@ -5940,7 +5958,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, mod)) { + if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, pt)) { break :ty .ptr; } break :ty raw_llvm_ty; @@ -5958,13 +5976,13 @@ pub const FuncGen = struct { fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = self.blocks.get(branch.block_inst).?; // Add the values to the lists only if the break provides a value. const operand_ty = self.typeOf(branch.operand); - const mod = o.module; - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -5998,7 +6016,7 @@ pub const FuncGen = struct { fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const err_union = try self.resolveInst(pl_op.operand); @@ -6006,14 +6024,14 @@ pub const FuncGen = struct { const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); const err_union_ty = self.typeOf(pl_op.operand); const payload_ty = self.typeOfIndex(inst); - const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); @@ -6033,9 +6051,10 @@ pub const FuncGen = struct { is_unused: bool, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt); const err_union_llvm_ty = try o.lowerType(err_union_ty); const error_type = try o.errorIntType(); @@ -6048,8 +6067,8 @@ pub const FuncGen = struct { else err_union; } - const err_field_index = try errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { + const err_field_index = try errUnionErrorOffset(payload_ty, pt); + if (operand_is_ptr or isByRef(err_union_ty, pt)) { const err_field_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load @@ -6077,13 +6096,13 @@ pub const FuncGen = struct { } if (is_unused) return .none; if (!payload_has_bits) return if (operand_is_ptr) err_union else .none; - const offset = try errUnionPayloadOffset(payload_ty, mod); + const offset = try errUnionPayloadOffset(payload_ty, pt); if (operand_is_ptr) { return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); - } else if (isByRef(err_union_ty, mod)) { + } else if (isByRef(err_union_ty, pt)) { const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); - if (isByRef(payload_ty, mod)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); + if (isByRef(payload_ty, pt)) { if (can_elide_load) return payload_ptr; @@ -6161,7 +6180,7 @@ pub const FuncGen = struct { fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); @@ -6185,7 +6204,8 @@ pub const FuncGen = struct { fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); @@ -6193,7 +6213,7 @@ pub const FuncGen = struct { const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod)); const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) + if (!array_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, ""); const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{ try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0), @@ -6203,7 +6223,8 @@ pub const FuncGen = struct { fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const workaround_operand = try self.resolveInst(ty_op.operand); @@ -6213,7 +6234,7 @@ pub const FuncGen = struct { const operand = o: { // Work around LLVM bug. See https://github.com/ziglang/zig/issues/17381. - const bit_size = operand_scalar_ty.bitSize(mod); + const bit_size = operand_scalar_ty.bitSize(pt); for ([_]u8{ 8, 16, 32, 64, 128 }) |b| { if (bit_size < b) { break :o try self.wip.cast( @@ -6241,7 +6262,7 @@ pub const FuncGen = struct { "", ); - const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(pt))); const rt_int_ty = try o.builder.intType(rt_int_bits); var extended = try self.wip.conv( if (is_signed_int) .signed else .unsigned, @@ -6287,7 +6308,8 @@ pub const FuncGen = struct { _ = fast; const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6309,7 +6331,7 @@ pub const FuncGen = struct { ); } - const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod))); + const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(pt))); const ret_ty = try o.builder.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -6348,19 +6370,20 @@ pub const FuncGen = struct { fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr; } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const llvm_usize = try o.lowerType(Type.usize); switch (ty.ptrSize(mod)) { .Slice => { const len = try fg.wip.extractValue(ptr, &.{1}, ""); const elem_ty = ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); if (abi_size == 1) return len; const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size); return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, ""); @@ -6368,7 +6391,7 @@ pub const FuncGen = struct { .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size); }, .Many, .C => unreachable, @@ -6383,7 +6406,7 @@ pub const FuncGen = struct { fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); @@ -6394,7 +6417,8 @@ pub const FuncGen = struct { fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); @@ -6404,11 +6428,11 @@ pub const FuncGen = struct { const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); const base_ptr = try self.wip.extractValue(slice, &.{0}, ""); const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, ""); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { if (self.canElideLoad(body_tail)) return ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } @@ -6417,7 +6441,7 @@ pub const FuncGen = struct { fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); @@ -6431,7 +6455,8 @@ pub const FuncGen = struct { fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -6440,15 +6465,15 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try o.lowerType(array_ty); const elem_ty = array_ty.childType(mod); - if (isByRef(array_ty, mod)) { + if (isByRef(array_ty, pt)) { const indices: [2]Builder.Value = .{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs, }; - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); if (canElideLoad(self, body_tail)) return elem_ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal); } else { const elem_ptr = @@ -6463,7 +6488,8 @@ pub const FuncGen = struct { fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); @@ -6477,9 +6503,9 @@ pub const FuncGen = struct { &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } else &.{rhs}, ""); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { if (self.canElideLoad(body_tail)) return ptr; - const elem_alignment = elem_ty.abiAlignment(mod).toLlvm(); + const elem_alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } @@ -6488,12 +6514,13 @@ pub const FuncGen = struct { fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.resolveInst(bin_op.lhs); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.resolveInst(bin_op.lhs); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -6530,7 +6557,8 @@ pub const FuncGen = struct { fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -6538,27 +6566,27 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; - if (!isByRef(struct_ty, mod)) { - assert(!isByRef(field_ty, mod)); + if (!isByRef(struct_ty, pt)) { + assert(!isByRef(field_ty, pt)); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout(mod)) { .@"packed" => { const struct_type = mod.typeToStruct(struct_ty).?; - const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index); + const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index); const containing_int = struct_llvm_val; const shift_amt = try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset); const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); @@ -6575,12 +6603,12 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, containing_int, same_size_int, ""); return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const truncated_int = try self.wip.cast(.trunc, containing_int, same_size_int, ""); return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); @@ -6599,12 +6627,12 @@ pub const FuncGen = struct { const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; const field_ptr = try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const alignment = struct_ty.structFieldAlign(field_index, mod); - const field_ptr_ty = try mod.ptrType(.{ + const alignment = struct_ty.structFieldAlign(field_index, pt); + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = alignment }, }); - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6617,12 +6645,12 @@ pub const FuncGen = struct { }, .Union => { const union_llvm_ty = try o.lowerType(struct_ty); - const layout = struct_ty.unionGetLayout(mod); + const layout = struct_ty.unionGetLayout(pt); const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); const field_ptr = try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, ""); const payload_alignment = layout.payload_align.toLlvm(); - if (isByRef(field_ty, mod)) { + if (isByRef(field_ty, pt)) { if (canElideLoad(self, body_tail)) return field_ptr; return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal); } else { @@ -6635,14 +6663,15 @@ pub const FuncGen = struct { fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try self.resolveInst(extra.field_ptr); const parent_ty = ty_pl.ty.toType().childType(mod); - const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, pt); if (field_offset == 0) return field_ptr; const res_ty = try o.lowerType(ty_pl.ty.toType()); @@ -6696,7 +6725,7 @@ pub const FuncGen = struct { fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -6743,9 +6772,9 @@ pub const FuncGen = struct { try o.lowerDebugType(operand_ty), ); - const zcu = o.module; + const pt = o.pt; const owner_mod = self.dg.ownerModule(); - if (isByRef(operand_ty, zcu)) { + if (isByRef(operand_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, .none, @@ -6759,7 +6788,7 @@ pub const FuncGen = struct { "", ); } else if (owner_mod.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(zcu).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, operand, alloca, alignment); _ = try self.wip.callIntrinsic( @@ -6830,7 +6859,8 @@ pub const FuncGen = struct { // This stores whether we need to add an elementtype attribute and // if so, the element type itself. const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count); - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); var llvm_ret_i: usize = 0; @@ -6930,13 +6960,13 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.typeOf(input); - const is_by_ref = isByRef(arg_ty, mod); + const is_by_ref = isByRef(arg_ty, pt); if (is_by_ref) { if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod).toLlvm(); + const alignment = arg_ty.abiAlignment(pt).toLlvm(); const arg_llvm_ty = try o.lowerType(arg_ty); const load_inst = try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, ""); @@ -6948,7 +6978,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod).toLlvm(); + const alignment = arg_ty.abiAlignment(pt).toLlvm(); const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment); llvm_param_values[llvm_param_i] = arg_ptr; @@ -7000,7 +7030,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = llvm_rw_val; llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip); } else { - const alignment = rw_ty.abiAlignment(mod).toLlvm(); + const alignment = rw_ty.abiAlignment(pt).toLlvm(); const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, ""); llvm_param_values[llvm_param_i] = loaded; llvm_param_types[llvm_param_i] = llvm_elem_ty; @@ -7161,7 +7191,7 @@ pub const FuncGen = struct { const output_ptr = try self.resolveInst(output); const output_ptr_ty = self.typeOf(output); - const alignment = output_ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = output_ptr_ty.ptrAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, output_value, output_ptr, alignment); } else { ret_val = output_value; @@ -7179,7 +7209,8 @@ pub const FuncGen = struct { cond: Builder.IntegerCondition, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -7204,7 +7235,7 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const loaded = if (operand_is_ptr) try self.wip.load(.normal, optional_llvm_ty, operand, .default, "") else @@ -7212,7 +7243,7 @@ pub const FuncGen = struct { return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), ""); } - const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); + const is_by_ref = operand_is_ptr or isByRef(optional_ty, pt); return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref); } @@ -7223,7 +7254,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -7241,7 +7273,7 @@ pub const FuncGen = struct { return val.toValue(); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const loaded = if (operand_is_ptr) try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "") else @@ -7249,9 +7281,9 @@ pub const FuncGen = struct { return self.wip.icmp(cond, loaded, zero, ""); } - const err_field_index = try errUnionErrorOffset(payload_ty, mod); + const err_field_index = try errUnionErrorOffset(payload_ty, pt); - const loaded = if (operand_is_ptr or isByRef(err_union_ty, mod)) loaded: { + const loaded = if (operand_is_ptr or isByRef(err_union_ty, pt)) loaded: { const err_union_llvm_ty = try o.lowerType(err_union_ty); const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, ""); @@ -7262,12 +7294,13 @@ pub const FuncGen = struct { fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; @@ -7283,13 +7316,14 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); const non_null_bit = try o.builder.intValue(.i8, 1); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = try self.wip.store(.normal, non_null_bit, operand, .default); return operand; @@ -7314,13 +7348,14 @@ pub const FuncGen = struct { fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. @@ -7328,7 +7363,7 @@ pub const FuncGen = struct { } const opt_llvm_ty = try o.lowerType(optional_ty); - const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -7338,7 +7373,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -7347,17 +7383,17 @@ pub const FuncGen = struct { const result_ty = self.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return if (operand_is_ptr) operand else .none; } - const offset = try errUnionPayloadOffset(payload_ty, mod); + const offset = try errUnionPayloadOffset(payload_ty, pt); const err_union_llvm_ty = try o.lowerType(err_union_ty); if (operand_is_ptr) { return self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); - } else if (isByRef(err_union_ty, mod)) { - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); + } else if (isByRef(err_union_ty, pt)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); - if (isByRef(payload_ty, mod)) { + if (isByRef(payload_ty, pt)) { if (self.canElideLoad(body_tail)) return payload_ptr; return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal); } @@ -7373,7 +7409,8 @@ pub const FuncGen = struct { operand_is_ptr: bool, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -7388,14 +7425,14 @@ pub const FuncGen = struct { } const payload_ty = err_union_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (!operand_is_ptr) return operand; return self.wip.load(.normal, error_type, operand, .default, ""); } - const offset = try errUnionErrorOffset(payload_ty, mod); + const offset = try errUnionErrorOffset(payload_ty, pt); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { + if (operand_is_ptr or isByRef(err_union_ty, pt)) { const err_union_llvm_ty = try o.lowerType(err_union_ty); const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); return self.wip.load(.normal, error_type, err_field_ptr, .default, ""); @@ -7406,22 +7443,23 @@ pub const FuncGen = struct { fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); const non_error_val = try o.builder.intValue(try o.errorIntType(), 0); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { _ = try self.wip.store(.normal, non_error_val, operand, .default); return operand; } const err_union_llvm_ty = try o.lowerType(err_union_ty); { - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); - const error_offset = try errUnionErrorOffset(payload_ty, mod); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); + const error_offset = try errUnionErrorOffset(payload_ty, pt); // First set the non-error value. const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, ""); _ = try self.wip.store(.normal, non_error_val, non_null_ptr, error_alignment); @@ -7429,7 +7467,7 @@ pub const FuncGen = struct { // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return .none; - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); return self.wip.gepStruct(err_union_llvm_ty, operand, payload_offset, ""); } @@ -7446,19 +7484,21 @@ pub const FuncGen = struct { fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + const pt = o.pt; + const mod = pt.zcu; + const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_ty = ty_pl.ty.toType(); const field_index = ty_pl.payload; - const mod = o.module; const struct_llvm_ty = try o.lowerType(struct_ty); const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?; assert(self.err_ret_trace != .none); const field_ptr = try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, ""); - const field_alignment = struct_ty.structFieldAlign(field_index, mod); + const field_alignment = struct_ty.structFieldAlign(field_index, pt); const field_ty = struct_ty.structFieldType(field_index, mod); - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_alignment }, }); @@ -7490,29 +7530,30 @@ pub const FuncGen = struct { fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const payload_ty = self.typeOf(ty_op.operand); const non_null_bit = try o.builder.intValue(.i8, 1); comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) return operand; const llvm_optional_ty = try o.lowerType(optional_ty); - if (isByRef(optional_ty, mod)) { + if (isByRef(optional_ty, pt)) { const directReturn = self.isNextRet(body_tail); const optional_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = optional_ty.abiAlignment(mod).toLlvm(); + const alignment = optional_ty.abiAlignment(pt).toLlvm(); const optional_ptr = try self.buildAllocaWorkaround(optional_ty, alignment); break :brk optional_ptr; }; const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .none); const non_null_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 1, ""); _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default); @@ -7523,36 +7564,36 @@ pub const FuncGen = struct { fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_un_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return operand; } const ok_err_code = try o.builder.intValue(try o.errorIntType(), 0); const err_un_llvm_ty = try o.lowerType(err_un_ty); - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); - const error_offset = try errUnionErrorOffset(payload_ty, mod); - if (isByRef(err_un_ty, mod)) { + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); + const error_offset = try errUnionErrorOffset(payload_ty, pt); + if (isByRef(err_un_ty, pt)) { const directReturn = self.isNextRet(body_tail); const result_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = err_un_ty.abiAlignment(mod).toLlvm(); + const alignment = err_un_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment); break :brk result_ptr; }; const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment); const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .none); return result_ptr; } @@ -7564,33 +7605,34 @@ pub const FuncGen = struct { fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return operand; const err_un_llvm_ty = try o.lowerType(err_un_ty); - const payload_offset = try errUnionPayloadOffset(payload_ty, mod); - const error_offset = try errUnionErrorOffset(payload_ty, mod); - if (isByRef(err_un_ty, mod)) { + const payload_offset = try errUnionPayloadOffset(payload_ty, pt); + const error_offset = try errUnionErrorOffset(payload_ty, pt); + if (isByRef(err_un_ty, pt)) { const directReturn = self.isNextRet(body_tail); const result_ptr = if (directReturn) self.ret_ptr else brk: { - const alignment = err_un_ty.abiAlignment(mod).toLlvm(); + const alignment = err_un_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment); break :brk result_ptr; }; const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); - const err_int_ty = try mod.errorIntType(); - const error_alignment = err_int_ty.abiAlignment(mod).toLlvm(); + const err_int_ty = try pt.errorIntType(); + const error_alignment = err_int_ty.abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, operand, err_ptr, error_alignment); const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); - const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); + const payload_ptr_ty = try pt.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; @@ -7624,7 +7666,8 @@ pub const FuncGen = struct { fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -7636,7 +7679,7 @@ pub const FuncGen = struct { const access_kind: Builder.MemoryAccessKind = if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); - const alignment = vector_ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = vector_ptr_ty.ptrAlignment(pt).toLlvm(); const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, ""); const new_vector = try self.wip.insertElement(loaded, operand, index, ""); @@ -7646,7 +7689,7 @@ pub const FuncGen = struct { fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7666,7 +7709,7 @@ pub const FuncGen = struct { fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7696,7 +7739,7 @@ pub const FuncGen = struct { fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7714,7 +7757,7 @@ pub const FuncGen = struct { unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try fg.resolveInst(bin_op.lhs); @@ -7762,7 +7805,7 @@ pub const FuncGen = struct { fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7782,7 +7825,7 @@ pub const FuncGen = struct { fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7803,7 +7846,7 @@ pub const FuncGen = struct { fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7823,7 +7866,7 @@ pub const FuncGen = struct { fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7844,7 +7887,7 @@ pub const FuncGen = struct { fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7873,7 +7916,7 @@ pub const FuncGen = struct { fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7889,7 +7932,7 @@ pub const FuncGen = struct { fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7921,7 +7964,7 @@ pub const FuncGen = struct { fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7939,7 +7982,7 @@ pub const FuncGen = struct { fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7956,7 +7999,7 @@ pub const FuncGen = struct { fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7992,7 +8035,7 @@ pub const FuncGen = struct { fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -8014,7 +8057,7 @@ pub const FuncGen = struct { fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -8042,7 +8085,8 @@ pub const FuncGen = struct { unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8065,8 +8109,8 @@ pub const FuncGen = struct { const result_index = o.llvmFieldIndex(inst_ty, 0).?; const overflow_index = o.llvmFieldIndex(inst_ty, 1).?; - if (isByRef(inst_ty, mod)) { - const result_alignment = inst_ty.abiAlignment(mod).toLlvm(); + if (isByRef(inst_ty, pt)) { + const result_alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(inst_ty, result_alignment); { const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, ""); @@ -8135,7 +8179,7 @@ pub const FuncGen = struct { return o.builder.addFunction( try o.builder.fnType(return_type, param_types, .normal), fn_name, - toLlvmAddressSpace(.generic, o.module.getTarget()), + toLlvmAddressSpace(.generic, o.pt.zcu.getTarget()), ); } @@ -8149,8 +8193,8 @@ pub const FuncGen = struct { params: [2]Builder.Value, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; - const target = o.module.getTarget(); + const mod = o.pt.zcu; + const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const scalar_llvm_ty = try o.lowerType(scalar_ty); @@ -8255,7 +8299,7 @@ pub const FuncGen = struct { params: [params_len]Builder.Value, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const llvm_ty = try o.lowerType(ty); @@ -8396,7 +8440,8 @@ pub const FuncGen = struct { fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8422,8 +8467,8 @@ pub const FuncGen = struct { const result_index = o.llvmFieldIndex(dest_ty, 0).?; const overflow_index = o.llvmFieldIndex(dest_ty, 1).?; - if (isByRef(dest_ty, mod)) { - const result_alignment = dest_ty.abiAlignment(mod).toLlvm(); + if (isByRef(dest_ty, pt)) { + const result_alignment = dest_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(dest_ty, result_alignment); { const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); @@ -8466,7 +8511,7 @@ pub const FuncGen = struct { fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8497,7 +8542,8 @@ pub const FuncGen = struct { fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8505,7 +8551,7 @@ pub const FuncGen = struct { const lhs_ty = self.typeOf(bin_op.lhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const lhs_bits = lhs_scalar_ty.bitSize(mod); + const lhs_bits = lhs_scalar_ty.bitSize(pt); const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); @@ -8539,7 +8585,7 @@ pub const FuncGen = struct { fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8558,7 +8604,7 @@ pub const FuncGen = struct { fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8580,7 +8626,7 @@ pub const FuncGen = struct { fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dest_ty = self.typeOfIndex(inst); const dest_llvm_ty = try o.lowerType(dest_ty); @@ -8604,7 +8650,7 @@ pub const FuncGen = struct { fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8638,7 +8684,7 @@ pub const FuncGen = struct { fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -8696,9 +8742,10 @@ pub const FuncGen = struct { fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value { const o = self.dg.object; - const mod = o.module; - const operand_is_ref = isByRef(operand_ty, mod); - const result_is_ref = isByRef(inst_ty, mod); + const pt = o.pt; + const mod = pt.zcu; + const operand_is_ref = isByRef(operand_ty, pt); + const result_is_ref = isByRef(inst_ty, pt); const llvm_dest_ty = try o.lowerType(inst_ty); if (operand_is_ref and result_is_ref) { @@ -8721,9 +8768,9 @@ pub const FuncGen = struct { if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } - const alignment = inst_ty.abiAlignment(mod).toLlvm(); + const alignment = inst_ty.abiAlignment(pt).toLlvm(); const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); - const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; + const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { _ = try self.wip.store(.normal, operand, array_ptr, alignment); } else { @@ -8748,11 +8795,11 @@ pub const FuncGen = struct { const llvm_vector_ty = try o.lowerType(inst_ty); if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{}); - const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; + const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - const alignment = elem_ty.abiAlignment(mod).toLlvm(); + const alignment = elem_ty.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_vector_ty, operand, alignment, ""); } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8777,24 +8824,25 @@ pub const FuncGen = struct { } if (operand_is_ref) { - const alignment = operand_ty.abiAlignment(mod).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).toLlvm(); return self.wip.load(.normal, llvm_dest_ty, operand, alignment, ""); } if (result_is_ref) { - const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); _ = try self.wip.store(.normal, operand, result_ptr, alignment); return result_ptr; } if (llvm_dest_ty.isStruct(&o.builder) or - ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and operand_ty.bitSize(mod) != inst_ty.bitSize(mod))) + ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and + operand_ty.bitSize(pt) != inst_ty.bitSize(pt))) { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values or vectors with padding bits. // Therefore, we store operand to alloca, then load for result. - const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm(); + const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); _ = try self.wip.store(.normal, operand, result_ptr, alignment); return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, ""); @@ -8811,7 +8859,8 @@ pub const FuncGen = struct { fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const arg_val = self.args[self.arg_index]; self.arg_index += 1; @@ -8847,7 +8896,7 @@ pub const FuncGen = struct { }; const owner_mod = self.dg.ownerModule(); - if (isByRef(inst_ty, mod)) { + if (isByRef(inst_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, .none, @@ -8861,7 +8910,7 @@ pub const FuncGen = struct { "", ); } else if (owner_mod.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(mod).toLlvm(); + const alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_val, alloca, alignment); _ = try self.wip.callIntrinsic( @@ -8897,27 +8946,29 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const pointee_type = ptr_ty.childType(mod); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); //const pointee_llvm_ty = try o.lowerType(pointee_type); - const alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = ptr_ty.ptrAlignment(pt).toLlvm(); return self.buildAllocaWorkaround(pointee_type, alignment); } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); if (self.ret_ptr != .none) return self.ret_ptr; //const ret_llvm_ty = try o.lowerType(ret_ty); - const alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const alignment = ptr_ty.ptrAlignment(pt).toLlvm(); return self.buildAllocaWorkaround(ret_ty, alignment); } @@ -8928,7 +8979,7 @@ pub const FuncGen = struct { llvm_ty: Builder.Type, alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { - const target = self.dg.object.module.getTarget(); + const target = self.dg.object.pt.zcu.getTarget(); return buildAllocaInner(&self.wip, llvm_ty, alignment, target); } @@ -8939,18 +8990,19 @@ pub const FuncGen = struct { alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { const o = self.dg.object; - return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.module), .i8), alignment); + return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment); } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { const ptr_info = ptr_ty.ptrInfo(mod); const needs_bitmask = (ptr_info.packed_offset.host_size != 0); @@ -8964,10 +9016,10 @@ pub const FuncGen = struct { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. - const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(mod)); + const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(pt)); _ = try self.wip.callMemSet( dest_ptr, - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8), len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, @@ -8992,7 +9044,7 @@ pub const FuncGen = struct { /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { @@ -9008,7 +9060,8 @@ pub const FuncGen = struct { fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = fg.typeOf(ty_op.operand); @@ -9016,7 +9069,7 @@ pub const FuncGen = struct { const ptr = try fg.resolveInst(ty_op.operand); elide: { - if (!isByRef(Type.fromInterned(ptr_info.child), mod)) break :elide; + if (!isByRef(Type.fromInterned(ptr_info.child), pt)) break :elide; if (!canElideLoad(fg, body_tail)) break :elide; return ptr; } @@ -9040,7 +9093,7 @@ pub const FuncGen = struct { _ = inst; const o = self.dg.object; const llvm_usize = try o.lowerType(Type.usize); - if (!target_util.supportsReturnAddress(o.module.getTarget())) { + if (!target_util.supportsReturnAddress(o.pt.zcu.getTarget())) { // https://github.com/ziglang/zig/issues/11946 return o.builder.intValue(llvm_usize, 0); } @@ -9068,7 +9121,8 @@ pub const FuncGen = struct { kind: Builder.Function.Instruction.CmpXchg.Kind, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); @@ -9095,7 +9149,7 @@ pub const FuncGen = struct { self.sync_scope, toLlvmAtomicOrdering(extra.successOrder()), toLlvmAtomicOrdering(extra.failureOrder()), - ptr_ty.ptrAlignment(mod).toLlvm(), + ptr_ty.ptrAlignment(pt).toLlvm(), "", ); @@ -9118,7 +9172,8 @@ pub const FuncGen = struct { fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); @@ -9134,7 +9189,7 @@ pub const FuncGen = struct { const access_kind: Builder.MemoryAccessKind = if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; - const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm(); if (llvm_abi_ty != .none) { // operand needs widening and truncating or bitcasting. @@ -9181,19 +9236,20 @@ pub const FuncGen = struct { fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; const ordering = toLlvmAtomicOrdering(atomic_load.order); const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false); const ptr_alignment = (if (info.flags.alignment != .none) @as(InternPool.Alignment, info.flags.alignment) else - Type.fromInterned(info.child).abiAlignment(mod)).toLlvm(); + Type.fromInterned(info.child).abiAlignment(pt)).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(elem_ty); @@ -9228,11 +9284,12 @@ pub const FuncGen = struct { ordering: Builder.AtomicOrdering, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .none; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); @@ -9252,12 +9309,13 @@ pub const FuncGen = struct { fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const dest_ptr_align = ptr_ty.ptrAlignment(mod).toLlvm(); + const dest_ptr_align = ptr_ty.ptrAlignment(pt).toLlvm(); const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty); const access_kind: Builder.MemoryAccessKind = if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; @@ -9270,7 +9328,7 @@ pub const FuncGen = struct { ptr_ty.isSlice(mod) and std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory); - if (try self.air.value(bin_op.rhs, mod)) |elem_val| { + if (try self.air.value(bin_op.rhs, pt)) |elem_val| { if (elem_val.isUndefDeep(mod)) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -9296,7 +9354,7 @@ pub const FuncGen = struct { // repeating byte pattern, for example, `@as(u64, 0)` has a // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. - if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { + if (try elem_val.hasRepeatedByteRepr(elem_ty, pt)) |byte_val| { const fill_byte = try o.builder.intValue(.i8, byte_val); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { @@ -9309,7 +9367,7 @@ pub const FuncGen = struct { } const value = try self.resolveInst(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(mod); + const elem_abi_size = elem_ty.abiSize(pt); if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. @@ -9361,9 +9419,9 @@ pub const FuncGen = struct { _ = try self.wip.brCond(end, body_block, end_block); self.wip.cursor = .{ .block = body_block }; - const elem_abi_align = elem_ty.abiAlignment(mod); + const elem_abi_align = elem_ty.abiAlignment(pt); const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm(); - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { _ = try self.wip.callMemCpy( it_ptr.toValue(), it_ptr_align, @@ -9405,7 +9463,8 @@ pub const FuncGen = struct { fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const dest_ptr_ty = self.typeOf(bin_op.lhs); @@ -9434,9 +9493,9 @@ pub const FuncGen = struct { self.wip.cursor = .{ .block = memcpy_block }; _ = try self.wip.callMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(mod).toLlvm(), + dest_ptr_ty.ptrAlignment(pt).toLlvm(), src_ptr, - src_ptr_ty.ptrAlignment(mod).toLlvm(), + src_ptr_ty.ptrAlignment(pt).toLlvm(), len, access_kind, ); @@ -9447,9 +9506,9 @@ pub const FuncGen = struct { _ = try self.wip.callMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(mod).toLlvm(), + dest_ptr_ty.ptrAlignment(pt).toLlvm(), src_ptr, - src_ptr_ty.ptrAlignment(mod).toLlvm(), + src_ptr_ty.ptrAlignment(pt).toLlvm(), len, access_kind, ); @@ -9458,10 +9517,11 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ty = self.typeOf(bin_op.lhs).childType(mod); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); @@ -9479,13 +9539,13 @@ pub const FuncGen = struct { fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); - const layout = un_ty.unionGetLayout(mod); + const layout = un_ty.unionGetLayout(pt); if (layout.tag_size == 0) return .none; const union_handle = try self.resolveInst(ty_op.operand); - if (isByRef(un_ty, mod)) { + if (isByRef(un_ty, pt)) { const llvm_un_ty = try o.lowerType(un_ty); if (layout.payload_size == 0) return self.wip.load(.normal, llvm_un_ty, union_handle, .default, ""); @@ -9554,7 +9614,7 @@ pub const FuncGen = struct { fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); var bits = operand_ty.intInfo(mod).bits; @@ -9588,7 +9648,7 @@ pub const FuncGen = struct { fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const ip = &mod.intern_pool; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -9638,7 +9698,8 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { const o = self.dg.object; - const zcu = o.module; + const pt = o.pt; + const zcu = pt.zcu; const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern()); // TODO: detect when the type changes and re-emit this function. @@ -9678,7 +9739,7 @@ pub const FuncGen = struct { for (0..enum_type.names.len) |field_index| { const this_tag_int_value = try o.lowerValue( - (try zcu.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + (try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), ); try wip_switch.addCase(this_tag_int_value, named_block, &wip); } @@ -9745,7 +9806,8 @@ pub const FuncGen = struct { fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); @@ -9763,11 +9825,11 @@ pub const FuncGen = struct { defer self.gpa.free(values); for (values, 0..) |*val, i| { - const elem = try mask.elemValue(mod, i); + const elem = try mask.elemValue(pt, i); if (elem.isUndef(mod)) { val.* = try o.builder.undefConst(.i32); } else { - const int = elem.toSignedInt(mod); + const int = elem.toSignedInt(pt); const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len); val.* = try o.builder.intConst(.i32, unsigned); } @@ -9854,7 +9916,7 @@ pub const FuncGen = struct { fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; @@ -9964,7 +10026,8 @@ pub const FuncGen = struct { fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const result_ty = self.typeOfIndex(inst); @@ -9986,16 +10049,16 @@ pub const FuncGen = struct { if (mod.typeToPackedStruct(result_ty)) |struct_type| { const backing_int_ty = struct_type.backingIntType(ip).*; assert(backing_int_ty != .none); - const big_bits = Type.fromInterned(backing_int_ty).bitSize(mod); + const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt); const int_ty = try o.builder.intType(@intCast(big_bits)); comptime assert(Type.packed_struct_layout_version == 2); var running_int = try o.builder.intValue(int_ty, 0); var running_bits: u16 = 0; for (elements, struct_type.field_types.get(ip)) |elem, field_ty| { - if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod)); + const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt)); const small_int_ty = try o.builder.intType(ty_bit_size); const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(mod)) try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") @@ -10013,23 +10076,23 @@ pub const FuncGen = struct { assert(result_ty.containerLayout(mod) != .@"packed"); - if (isByRef(result_ty, mod)) { + if (isByRef(result_ty, pt)) { // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alignment = result_ty.abiAlignment(mod).toLlvm(); + const alignment = result_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment); for (elements, 0..) |elem, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = o.llvmFieldIndex(result_ty, i).?; const field_ptr = try self.wip.gepStruct(llvm_result_ty, alloca_inst, llvm_i, ""); - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = self.typeOf(elem).toIntern(), .flags = .{ - .alignment = result_ty.structFieldAlign(i, mod), + .alignment = result_ty.structFieldAlign(i, pt), }, }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .none); @@ -10039,7 +10102,7 @@ pub const FuncGen = struct { } else { var result = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = o.llvmFieldIndex(result_ty, i).?; @@ -10049,15 +10112,15 @@ pub const FuncGen = struct { } }, .Array => { - assert(isByRef(result_ty, mod)); + assert(isByRef(result_ty, pt)); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); - const alignment = result_ty.abiAlignment(mod).toLlvm(); + const alignment = result_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment); const array_info = result_ty.arrayInfo(mod); - const elem_ptr_ty = try mod.ptrType(.{ + const elem_ptr_ty = try pt.ptrType(.{ .child = array_info.elem_type.toIntern(), }); @@ -10084,21 +10147,22 @@ pub const FuncGen = struct { fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try o.lowerType(union_ty); - const layout = union_ty.unionGetLayout(mod); + const layout = union_ty.unionGetLayout(pt); const union_obj = mod.typeToUnion(union_ty).?; if (union_obj.getLayout(ip) == .@"packed") { - const big_bits = union_ty.bitSize(mod); + const big_bits = union_ty.bitSize(pt); const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const non_int_val = try self.resolveInst(extra.init); - const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(pt))); const small_int_val = if (field_ty.isPtrAtRuntime(mod)) try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") else @@ -10110,19 +10174,19 @@ pub const FuncGen = struct { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index]; const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; - const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); - break :blk try tag_val.intFromEnum(tag_ty, mod); + const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index); + break :blk try tag_val.intFromEnum(tag_ty, pt); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { return .none; } - assert(!isByRef(union_ty, mod)); + assert(!isByRef(union_ty, pt)); var big_int_space: Value.BigIntSpace = undefined; - const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod); + const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt); return try o.builder.bigIntValue(union_llvm_ty, tag_big_int); } - assert(isByRef(union_ty, mod)); + assert(isByRef(union_ty, pt)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set @@ -10132,14 +10196,14 @@ pub const FuncGen = struct { const llvm_payload = try self.resolveInst(extra.init); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); const field_llvm_ty = try o.lowerType(field_ty); - const field_size = field_ty.abiSize(mod); - const field_align = mod.unionFieldNormalAlignment(union_obj, extra.field_index); + const field_size = field_ty.abiSize(pt); + const field_align = pt.unionFieldNormalAlignment(union_obj, extra.field_index); const llvm_usize = try o.lowerType(Type.usize); const usize_zero = try o.builder.intValue(llvm_usize, 0); const llvm_union_ty = t: { const payload_ty = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { const padding_len = layout.payload_size; break :p try o.builder.arrayType(padding_len, .i8); } @@ -10169,7 +10233,7 @@ pub const FuncGen = struct { // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. - const field_ptr_ty = try mod.ptrType(.{ + const field_ptr_ty = try pt.ptrType(.{ .child = field_ty.toIntern(), .flags = .{ .alignment = field_align }, }); @@ -10195,9 +10259,9 @@ pub const FuncGen = struct { const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, ""); const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty)); var big_int_space: Value.BigIntSpace = undefined; - const tag_big_int = tag_int_val.toBigInt(&big_int_space, mod); + const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt); const llvm_tag = try o.builder.bigIntValue(tag_ty, tag_big_int); - const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(mod).toLlvm(); + const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(pt).toLlvm(); _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment); } @@ -10223,7 +10287,7 @@ pub const FuncGen = struct { // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 - const mod = o.module; + const mod = o.pt.zcu; const target = mod.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { @@ -10279,7 +10343,7 @@ pub const FuncGen = struct { fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10289,7 +10353,7 @@ pub const FuncGen = struct { fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10312,7 +10376,7 @@ pub const FuncGen = struct { fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -10322,7 +10386,7 @@ pub const FuncGen = struct { fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; const table = o.error_name_table; if (table != .none) return table; @@ -10334,7 +10398,7 @@ pub const FuncGen = struct { variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); variable_index.setAlignment( - Type.slice_const_u8_sentinel_0.abiAlignment(mod).toLlvm(), + Type.slice_const_u8_sentinel_0.abiAlignment(pt).toLlvm(), &o.builder, ); @@ -10372,15 +10436,16 @@ pub const FuncGen = struct { can_elide_load: bool, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_ty = opt_ty.optionalChild(mod); - if (isByRef(opt_ty, mod)) { + if (isByRef(opt_ty, pt)) { // We have a pointer and we need to return a pointer to the first field. const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, ""); - const payload_alignment = payload_ty.abiAlignment(mod).toLlvm(); - if (isByRef(payload_ty, mod)) { + const payload_alignment = payload_ty.abiAlignment(pt).toLlvm(); + if (isByRef(payload_ty, pt)) { if (can_elide_load) return payload_ptr; @@ -10389,7 +10454,7 @@ pub const FuncGen = struct { return fg.loadTruncate(.normal, payload_ty, payload_ptr, payload_alignment); } - assert(!isByRef(payload_ty, mod)); + assert(!isByRef(payload_ty, pt)); return fg.wip.extractValue(opt_handle, &.{0}, ""); } @@ -10400,12 +10465,12 @@ pub const FuncGen = struct { non_null_bit: Builder.Value, ) !Builder.Value { const o = self.dg.object; + const pt = o.pt; const optional_llvm_ty = try o.lowerType(optional_ty); const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, ""); - const mod = o.module; - if (isByRef(optional_ty, mod)) { - const payload_alignment = optional_ty.abiAlignment(mod).toLlvm(); + if (isByRef(optional_ty, pt)) { + const payload_alignment = optional_ty.abiAlignment(pt).toLlvm(); const alloca_inst = try self.buildAllocaWorkaround(optional_ty, payload_alignment); { @@ -10432,7 +10497,8 @@ pub const FuncGen = struct { field_index: u32, ) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout(mod)) { @@ -10452,7 +10518,7 @@ pub const FuncGen = struct { // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. - const byte_offset = @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); + const byte_offset = @divExact(pt.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8); if (byte_offset == 0) return struct_ptr; const usize_ty = try o.lowerType(Type.usize); const llvm_index = try o.builder.intValue(usize_ty, byte_offset); @@ -10470,14 +10536,14 @@ pub const FuncGen = struct { // the struct. const llvm_index = try o.builder.intValue( try o.lowerType(Type.usize), - @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), + @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(pt)), ); return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, ""); } }, }, .Union => { - const layout = struct_ty.unionGetLayout(mod); + const layout = struct_ty.unionGetLayout(pt); if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr; const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); const union_llvm_ty = try o.lowerType(struct_ty); @@ -10500,9 +10566,10 @@ pub const FuncGen = struct { // => so load the byte aligned value and trunc the unwanted bits. const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const payload_llvm_ty = try o.lowerType(payload_ty); - const abi_size = payload_ty.abiSize(mod); + const abi_size = payload_ty.abiSize(pt); // llvm bug workarounds: const workaround_explicit_mask = o.target.cpu.arch == .powerpc and abi_size >= 4; @@ -10522,7 +10589,7 @@ pub const FuncGen = struct { const shifted = if (payload_llvm_ty != load_llvm_ty and o.target.cpu.arch.endian() == .big) try fg.wip.bin(.lshr, loaded, try o.builder.intValue( load_llvm_ty, - (payload_ty.abiSize(mod) - (std.math.divCeil(u64, payload_ty.bitSize(mod), 8) catch unreachable)) * 8, + (payload_ty.abiSize(pt) - (std.math.divCeil(u64, payload_ty.bitSize(pt), 8) catch unreachable)) * 8, ), "") else loaded; @@ -10546,11 +10613,11 @@ pub const FuncGen = struct { access_kind: Builder.MemoryAccessKind, ) !Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; //const pointee_llvm_ty = try o.lowerType(pointee_type); - const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(mod)).toLlvm(); + const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm(); const result_ptr = try fg.buildAllocaWorkaround(pointee_type, result_align); - const size_bytes = pointee_type.abiSize(mod); + const size_bytes = pointee_type.abiSize(pt); _ = try fg.wip.callMemCpy( result_ptr, result_align, @@ -10567,15 +10634,16 @@ pub const FuncGen = struct { /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none; const ptr_alignment = (if (info.flags.alignment != .none) @as(InternPool.Alignment, info.flags.alignment) else - elem_ty.abiAlignment(mod)).toLlvm(); + elem_ty.abiAlignment(pt)).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; @@ -10591,7 +10659,7 @@ pub const FuncGen = struct { } if (info.packed_offset.host_size == 0) { - if (isByRef(elem_ty, mod)) { + if (isByRef(elem_ty, pt)) { return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind); } return self.loadTruncate(access_kind, elem_ty, ptr, ptr_alignment); @@ -10601,13 +10669,13 @@ pub const FuncGen = struct { const containing_int = try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); - const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const elem_bits = ptr_ty.childType(mod).bitSize(pt); const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset); const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(elem_ty); - if (isByRef(elem_ty, mod)) { - const result_align = elem_ty.abiAlignment(mod).toLlvm(); + if (isByRef(elem_ty, pt)) { + const result_align = elem_ty.abiAlignment(pt).toLlvm(); const result_ptr = try self.buildAllocaWorkaround(elem_ty, result_align); const same_size_int = try o.builder.intType(@intCast(elem_bits)); @@ -10639,13 +10707,14 @@ pub const FuncGen = struct { ordering: Builder.AtomicOrdering, ) !void { const o = self.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { return; } - const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm(); + const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm(); const access_kind: Builder.MemoryAccessKind = if (info.flags.is_volatile) .@"volatile" else .normal; @@ -10669,7 +10738,7 @@ pub const FuncGen = struct { assert(ordering == .none); const containing_int = try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); - const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const elem_bits = ptr_ty.childType(mod).bitSize(pt); const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store @@ -10704,7 +10773,7 @@ pub const FuncGen = struct { _ = try self.wip.store(access_kind, ored_value, ptr, ptr_alignment); return; } - if (!isByRef(elem_ty, mod)) { + if (!isByRef(elem_ty, pt)) { _ = try self.wip.storeAtomic( access_kind, elem, @@ -10720,8 +10789,8 @@ pub const FuncGen = struct { ptr, ptr_alignment, elem, - elem_ty.abiAlignment(mod).toLlvm(), - try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(mod)), + elem_ty.abiAlignment(pt).toLlvm(), + try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(pt)), access_kind, ); } @@ -10747,12 +10816,13 @@ pub const FuncGen = struct { a5: Builder.Value, ) Allocator.Error!Builder.Value { const o = fg.dg.object; - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const llvm_usize = try o.lowerType(Type.usize); - const usize_alignment = Type.usize.abiAlignment(mod).toLlvm(); + const usize_alignment = Type.usize.abiAlignment(pt).toLlvm(); const array_llvm_ty = try o.builder.arrayType(6, llvm_usize); const array_ptr = if (fg.valgrind_client_request_array == .none) a: { @@ -10813,13 +10883,13 @@ pub const FuncGen = struct { fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { const o = fg.dg.object; - const mod = o.module; + const mod = o.pt.zcu; return fg.air.typeOfIndex(inst, &mod.intern_pool); } }; @@ -10990,12 +11060,12 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ }; } -fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool { - if (isByRef(ty, zcu)) { +fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool { + if (isByRef(ty, pt)) { return true; } else if (target.cpu.arch.isX86() and !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and - ty.totalVectorBits(zcu) >= 512) + ty.totalVectorBits(pt) >= 512) { // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns // "512-bit vector arguments require 'evex512' for AVX512" @@ -11005,38 +11075,38 @@ fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool { } } -fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool { +fn firstParamSRet(fn_info: InternPool.Key.FuncType, pt: Zcu.PerThread, target: std.Target) bool { const return_type = Type.fromInterned(fn_info.return_type); - if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false; + if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) return false; return switch (fn_info.cc) { - .Unspecified, .Inline => returnTypeByRef(zcu, target, return_type), + .Unspecified, .Inline => returnTypeByRef(pt, target, return_type), .C => switch (target.cpu.arch) { .mips, .mipsel => false, - .x86 => isByRef(return_type, zcu), + .x86 => isByRef(return_type, pt), .x86_64 => switch (target.os.tag) { - .windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory, - else => firstParamSRetSystemV(return_type, zcu, target), + .windows => x86_64_abi.classifyWindows(return_type, pt) == .memory, + else => firstParamSRetSystemV(return_type, pt, target), }, - .wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect, - .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) { + .wasm32 => wasm_c_abi.classifyType(return_type, pt)[0] == .indirect, + .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, pt) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(return_type, pt, .ret)) { .memory, .i64_array => true, .i32_array => |size| size != 1, .byval => false, }, - .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory, + .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, pt) == .memory, else => false, // TODO investigate C ABI for other architectures }, - .SysV => firstParamSRetSystemV(return_type, zcu, target), - .Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory, - .Stdcall => !isScalar(zcu, return_type), + .SysV => firstParamSRetSystemV(return_type, pt, target), + .Win64 => x86_64_abi.classifyWindows(return_type, pt) == .memory, + .Stdcall => !isScalar(pt.zcu, return_type), else => false, }; } -fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret); +fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool { + const class = x86_64_abi.classifySystemV(ty, pt, target, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -11046,9 +11116,10 @@ fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool { /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const return_type = Type.fromInterned(fn_info.return_type); - if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. @@ -11058,12 +11129,12 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu switch (fn_info.cc) { .Unspecified, .Inline, - => return if (returnTypeByRef(mod, target, return_type)) .void else o.lowerType(return_type), + => return if (returnTypeByRef(pt, target, return_type)) .void else o.lowerType(return_type), .C => { switch (target.cpu.arch) { .mips, .mipsel => return o.lowerType(return_type), - .x86 => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type), + .x86 => return if (isByRef(return_type, pt)) .void else o.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(o, fn_info), else => return lowerSystemVFnRetTy(o, fn_info), @@ -11072,36 +11143,36 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu if (isScalar(mod, return_type)) { return o.lowerType(return_type); } - const classes = wasm_c_abi.classifyType(return_type, mod); + const classes = wasm_c_abi.classifyType(return_type, pt); if (classes[0] == .indirect or classes[0] == .none) { return .void; } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(return_type, mod); - return o.builder.intType(@intCast(scalar_type.abiSize(mod) * 8)); + const scalar_type = wasm_c_abi.scalarType(return_type, pt); + return o.builder.intType(@intCast(scalar_type.abiSize(pt) * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(return_type, mod)) { + switch (aarch64_c_abi.classifyType(return_type, pt)) { .memory => return .void, .float_array => return o.lowerType(return_type), .byval => return o.lowerType(return_type), - .integer => return o.builder.intType(@intCast(return_type.bitSize(mod))), + .integer => return o.builder.intType(@intCast(return_type.bitSize(pt))), .double_integer => return o.builder.arrayType(2, .i64), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(return_type, mod, .ret)) { + switch (arm_c_abi.classifyType(return_type, pt, .ret)) { .memory, .i64_array => return .void, .i32_array => |len| return if (len == 1) .i32 else .void, .byval => return o.lowerType(return_type), } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(return_type, mod)) { + switch (riscv_c_abi.classifyType(return_type, pt)) { .memory => return .void, .integer => { - return o.builder.intType(@intCast(return_type.bitSize(mod))); + return o.builder.intType(@intCast(return_type.bitSize(pt))); }, .double_integer => { return o.builder.structType(.normal, &.{ .i64, .i64 }); @@ -11112,7 +11183,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu var types: [8]Builder.Type = undefined; for (0..return_type.structFieldCount(mod)) |field_index| { const field_ty = return_type.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; types[types_len] = try o.lowerType(field_ty); types_len += 1; } @@ -11132,14 +11203,14 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu } fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; const return_type = Type.fromInterned(fn_info.return_type); - switch (x86_64_abi.classifyWindows(return_type, mod)) { + switch (x86_64_abi.classifyWindows(return_type, pt)) { .integer => { - if (isScalar(mod, return_type)) { + if (isScalar(pt.zcu, return_type)) { return o.lowerType(return_type); } else { - return o.builder.intType(@intCast(return_type.abiSize(mod) * 8)); + return o.builder.intType(@intCast(return_type.abiSize(pt) * 8)); } }, .win_i128 => return o.builder.vectorType(.normal, 2, .i64), @@ -11150,14 +11221,15 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err } fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { - const mod = o.module; + const pt = o.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const return_type = Type.fromInterned(fn_info.return_type); if (isScalar(mod, return_type)) { return o.lowerType(return_type); } const target = mod.getTarget(); - const classes = x86_64_abi.classifySystemV(return_type, mod, target, .ret); + const classes = x86_64_abi.classifySystemV(return_type, pt, target, .ret); if (classes[0] == .memory) return .void; var types_index: u32 = 0; var types_buffer: [8]Builder.Type = undefined; @@ -11249,8 +11321,7 @@ const ParamTypeIterator = struct { pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering { if (it.zig_index >= it.fn_info.param_types.len) return null; - const zcu = it.object.module; - const ip = &zcu.intern_pool; + const ip = &it.object.pt.zcu.intern_pool; const ty = it.fn_info.param_types.get(ip)[it.zig_index]; it.byval_attr = false; return nextInner(it, Type.fromInterned(ty)); @@ -11258,8 +11329,7 @@ const ParamTypeIterator = struct { /// `airCall` uses this instead of `next` so that it can take into account variadic functions. pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering { - const zcu = it.object.module; - const ip = &zcu.intern_pool; + const ip = &it.object.pt.zcu.intern_pool; if (it.zig_index >= it.fn_info.param_types.len) { if (it.zig_index >= args.len) { return null; @@ -11272,10 +11342,11 @@ const ParamTypeIterator = struct { } fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { - const zcu = it.object.module; + const pt = it.object.pt; + const zcu = pt.zcu; const target = zcu.getTarget(); - if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { + if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { it.zig_index += 1; return .no_bits; } @@ -11288,11 +11359,11 @@ const ParamTypeIterator = struct { { it.llvm_index += 1; return .slice; - } else if (isByRef(ty, zcu)) { + } else if (isByRef(ty, pt)) { return .byref; } else if (target.cpu.arch.isX86() and !std.Target.x86.featureSetHas(target.cpu.features, .evex512) and - ty.totalVectorBits(zcu) >= 512) + ty.totalVectorBits(pt) >= 512) { // As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns // "512-bit vector arguments require 'evex512' for AVX512" @@ -11320,7 +11391,7 @@ const ParamTypeIterator = struct { if (isScalar(zcu, ty)) { return .byval; } - const classes = wasm_c_abi.classifyType(ty, zcu); + const classes = wasm_c_abi.classifyType(ty, pt); if (classes[0] == .indirect) { return .byref; } @@ -11329,7 +11400,7 @@ const ParamTypeIterator = struct { .aarch64, .aarch64_be => { it.zig_index += 1; it.llvm_index += 1; - switch (aarch64_c_abi.classifyType(ty, zcu)) { + switch (aarch64_c_abi.classifyType(ty, pt)) { .memory => return .byref_mut, .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, @@ -11344,7 +11415,7 @@ const ParamTypeIterator = struct { .arm, .armeb => { it.zig_index += 1; it.llvm_index += 1; - switch (arm_c_abi.classifyType(ty, zcu, .arg)) { + switch (arm_c_abi.classifyType(ty, pt, .arg)) { .memory => { it.byval_attr = true; return .byref; @@ -11359,7 +11430,7 @@ const ParamTypeIterator = struct { it.llvm_index += 1; if (ty.toIntern() == .f16_type and !std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16; - switch (riscv_c_abi.classifyType(ty, zcu)) { + switch (riscv_c_abi.classifyType(ty, pt)) { .memory => return .byref_mut, .byval => return .byval, .integer => return .abi_sized_int, @@ -11368,7 +11439,7 @@ const ParamTypeIterator = struct { it.types_len = 0; for (0..ty.structFieldCount(zcu)) |field_index| { const field_ty = ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; it.types_buffer[it.types_len] = try it.object.lowerType(field_ty); it.types_len += 1; } @@ -11406,10 +11477,10 @@ const ParamTypeIterator = struct { } fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering { - const zcu = it.object.module; - switch (x86_64_abi.classifyWindows(ty, zcu)) { + const pt = it.object.pt; + switch (x86_64_abi.classifyWindows(ty, pt)) { .integer => { - if (isScalar(zcu, ty)) { + if (isScalar(pt.zcu, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -11439,17 +11510,17 @@ const ParamTypeIterator = struct { } fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { - const zcu = it.object.module; - const ip = &zcu.intern_pool; - const target = zcu.getTarget(); - const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg); + const pt = it.object.pt; + const ip = &pt.zcu.intern_pool; + const target = pt.zcu.getTarget(); + const classes = x86_64_abi.classifySystemV(ty, pt, target, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; it.byval_attr = true; return .byref; } - if (isScalar(zcu, ty)) { + if (isScalar(pt.zcu, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -11550,7 +11621,7 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - mod: *Module, + mod: *Zcu, ty: Type, ) ?std.builtin.Signedness { const target = mod.getTarget(); @@ -11598,13 +11669,13 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type, mod: *Module) bool { +fn isByRef(ty: Type, pt: Zcu.PerThread) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; - const ip = &mod.intern_pool; + const ip = &pt.zcu.intern_pool; - switch (ty.zigTypeTag(mod)) { + switch (ty.zigTypeTag(pt.zcu)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -11627,17 +11698,17 @@ fn isByRef(ty: Type, mod: *Module) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasRuntimeBits(mod), + .Array, .Frame => return ty.hasRuntimeBits(pt), .Struct => { const struct_type = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var count: usize = 0; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(Type.fromInterned(field_ty), mod)) return true; + if (isByRef(Type.fromInterned(field_ty), pt)) return true; } return false; }, @@ -11655,27 +11726,27 @@ fn isByRef(ty: Type, mod: *Module) bool { count += 1; if (count > max_fields_byval) return true; const field_ty = Type.fromInterned(field_types[field_index]); - if (isByRef(field_ty, mod)) return true; + if (isByRef(field_ty, pt)) return true; } return false; }, - .Union => switch (ty.containerLayout(mod)) { + .Union => switch (ty.containerLayout(pt.zcu)) { .@"packed" => return false, - else => return ty.hasRuntimeBits(mod), + else => return ty.hasRuntimeBits(pt), }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = ty.errorUnionPayload(pt.zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return false; } return true; }, .Optional => { - const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const payload_ty = ty.optionalChild(pt.zcu); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return false; } - if (ty.optionalReprIsPayload(mod)) { + if (ty.optionalReprIsPayload(pt.zcu)) { return false; } return true; @@ -11683,7 +11754,7 @@ fn isByRef(ty: Type, mod: *Module) bool { } } -fn isScalar(mod: *Module, ty: Type) bool { +fn isScalar(mod: *Zcu, ty: Type) bool { return switch (ty.zigTypeTag(mod)) { .Void, .Bool, @@ -11774,7 +11845,7 @@ const lt_errors_fn_name = "__zig_lt_errors_len"; /// Without this workaround, LLVM crashes with "unknown codeview register H1" /// https://github.com/llvm/llvm-project/issues/56484 fn needDbgVarWorkaround(o: *Object) bool { - const target = o.module.getTarget(); + const target = o.pt.zcu.getTarget(); if (target.os.tag == .windows and target.cpu.arch == .aarch64) { return true; } @@ -11817,14 +11888,14 @@ fn buildAllocaInner( return wip.conv(.unneeded, alloca, .ptr, ""); } -fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) !u1 { - const err_int_ty = try mod.errorIntType(); - return @intFromBool(err_int_ty.abiAlignment(mod).compare(.gt, payload_ty.abiAlignment(mod))); +fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 { + const err_int_ty = try pt.errorIntType(); + return @intFromBool(err_int_ty.abiAlignment(pt).compare(.gt, payload_ty.abiAlignment(pt))); } -fn errUnionErrorOffset(payload_ty: Type, mod: *Module) !u1 { - const err_int_ty = try mod.errorIntType(); - return @intFromBool(err_int_ty.abiAlignment(mod).compare(.lte, payload_ty.abiAlignment(mod))); +fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 { + const err_int_ty = try pt.errorIntType(); + return @intFromBool(err_int_ty.abiAlignment(pt).compare(.lte, payload_ty.abiAlignment(pt))); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 2fbe9097d6..95874a5d65 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -6,9 +6,7 @@ const assert = std.debug.assert; const Signedness = std.builtin.Signedness; const Zcu = @import("../Zcu.zig"); -/// Deprecated. -const Module = Zcu; -const Decl = Module.Decl; +const Decl = Zcu.Decl; const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); @@ -188,12 +186,13 @@ pub const Object = struct { fn genDecl( self: *Object, - zcu: *Zcu, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, air: Air, liveness: Liveness, ) !void { - const gpa = self.gpa; + const zcu = pt.zcu; + const gpa = zcu.gpa; const decl = zcu.declPtr(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); const structured_cfg = namespace.fileScope(zcu).mod.structured_cfg; @@ -201,7 +200,7 @@ pub const Object = struct { var decl_gen = DeclGen{ .gpa = gpa, .object = self, - .module = zcu, + .pt = pt, .spv = &self.spv, .decl_index = decl_index, .air = air, @@ -235,34 +234,34 @@ pub const Object = struct { pub fn updateFunc( self: *Object, - mod: *Module, + pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { - const decl_index = mod.funcInfo(func_index).owner_decl; + const decl_index = pt.zcu.funcInfo(func_index).owner_decl; // TODO: Separate types for generating decls and functions? - try self.genDecl(mod, decl_index, air, liveness); + try self.genDecl(pt, decl_index, air, liveness); } pub fn updateDecl( self: *Object, - mod: *Module, + pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, ) !void { - try self.genDecl(mod, decl_index, undefined, undefined); + try self.genDecl(pt, decl_index, undefined, undefined); } /// Fetch or allocate a result id for decl index. This function also marks the decl as alive. /// Note: Function does not actually generate the decl, it just allocates an index. - pub fn resolveDecl(self: *Object, mod: *Module, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index { - const decl = mod.declPtr(decl_index); + pub fn resolveDecl(self: *Object, zcu: *Zcu, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index { + const decl = zcu.declPtr(decl_index); assert(decl.has_tv); // TODO: Do we need to handle a situation where this is false? const entry = try self.decl_link.getOrPut(self.gpa, decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(mod)) + const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(zcu)) .func else switch (decl.@"addrspace") { .generic => .invocation_global, @@ -285,7 +284,7 @@ const DeclGen = struct { object: *Object, /// The Zig module that we are generating decls for. - module: *Module, + pt: Zcu.PerThread, /// The SPIR-V module that instructions should be emitted into. /// This is the same as `self.object.spv`, repeated here for brevity. @@ -333,7 +332,7 @@ const DeclGen = struct { /// If `gen` returned `Error.CodegenFail`, this contains an explanatory message. /// Memory is owned by `module.gpa`. - error_msg: ?*Module.ErrorMsg = null, + error_msg: ?*Zcu.ErrorMsg = null, /// Possible errors the `genDecl` function may return. const Error = error{ CodegenFail, OutOfMemory }; @@ -410,15 +409,15 @@ const DeclGen = struct { /// Return the target which we are currently compiling for. pub fn getTarget(self: *DeclGen) std.Target { - return self.module.getTarget(); + return self.pt.zcu.getTarget(); } pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); - const mod = self.module; - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); + const zcu = self.pt.zcu; + const src_loc = zcu.declPtr(self.decl_index).navSrcLoc(zcu); assert(self.error_msg == null); - self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); + self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, format, args); return error.CodegenFail; } @@ -439,8 +438,9 @@ const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { - const mod = self.module; - if (try self.air.value(inst, mod)) |val| { + const pt = self.pt; + const mod = pt.zcu; + if (try self.air.value(inst, pt)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { @@ -462,7 +462,7 @@ const DeclGen = struct { fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index) !IdRef { // TODO: This cannot be a function at this point, but it should probably be handled anyway. - const mod = self.module; + const mod = self.pt.zcu; const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); const decl_ptr_ty_id = try self.ptrType(ty, .Generic); @@ -642,7 +642,7 @@ const DeclGen = struct { /// Checks whether the type can be directly translated to SPIR-V vectors fn isSpvVector(self: *DeclGen, ty: Type) bool { - const mod = self.module; + const mod = self.pt.zcu; const target = self.getTarget(); if (ty.zigTypeTag(mod) != .Vector) return false; @@ -668,7 +668,7 @@ const DeclGen = struct { } fn arithmeticTypeInfo(self: *DeclGen, ty: Type) ArithmeticTypeInfo { - const mod = self.module; + const mod = self.pt.zcu; const target = self.getTarget(); var scalar_ty = ty.scalarType(mod); if (scalar_ty.zigTypeTag(mod) == .Enum) { @@ -744,7 +744,7 @@ const DeclGen = struct { /// the value to an unsigned int first for Kernels. fn constInt(self: *DeclGen, ty: Type, value: anytype, repr: Repr) !IdRef { // TODO: Cache? - const mod = self.module; + const mod = self.pt.zcu; const scalar_ty = ty.scalarType(mod); const int_info = scalar_ty.intInfo(mod); // Use backing bits so that negatives are sign extended @@ -824,7 +824,7 @@ const DeclGen = struct { /// Construct a vector at runtime. /// ty must be an vector type. fn constructVector(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; assert(ty.vectorLen(mod) == constituents.len); // Note: older versions of the Khronos SPRIV-LLVM translator crash on this instruction @@ -848,7 +848,7 @@ const DeclGen = struct { /// Construct a vector at runtime with all lanes set to the same value. /// ty must be an vector type. fn constructVectorSplat(self: *DeclGen, ty: Type, constituent: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const n = ty.vectorLen(mod); const constituents = try self.gpa.alloc(IdRef, n); @@ -886,12 +886,13 @@ const DeclGen = struct { return id; } - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const target = self.getTarget(); const result_ty_id = try self.resolveType(ty, repr); const ip = &mod.intern_pool; - log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod, null) }); + log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(pt), val.fmtValue(pt, null) }); if (val.isUndefDeep(mod)) { return self.spv.constUndef(result_ty_id); } @@ -940,16 +941,16 @@ const DeclGen = struct { }, .int => { if (ty.isSignedInt(mod)) { - break :cache try self.constInt(ty, val.toSignedInt(mod), repr); + break :cache try self.constInt(ty, val.toSignedInt(pt), repr); } else { - break :cache try self.constInt(ty, val.toUnsignedInt(mod), repr); + break :cache try self.constInt(ty, val.toUnsignedInt(pt), repr); } }, .float => { const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) { - 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, mod))) }, - 32 => .{ .float32 = val.toFloat(f32, mod) }, - 64 => .{ .float64 = val.toFloat(f64, mod) }, + 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, pt))) }, + 32 => .{ .float32 = val.toFloat(f32, pt) }, + 64 => .{ .float64 = val.toFloat(f64, pt) }, 80, 128 => unreachable, // TODO else => unreachable, }; @@ -968,17 +969,17 @@ const DeclGen = struct { .error_union => |error_union| { // TODO: Error unions may be constructed with constant instructions if the payload type // allows it. For now, just generate it here regardless. - const err_int_ty = try mod.errorIntType(); + const err_int_ty = try pt.errorIntType(); const err_ty = switch (error_union.val) { .err_name => ty.errorUnionSet(mod), .payload => err_int_ty, }; const err_val = switch (error_union.val) { - .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{ + .err_name => |err_name| Value.fromInterned(try pt.intern(.{ .err = .{ .ty = ty.errorUnionSet(mod).toIntern(), .name = err_name, - } }))), - .payload => try mod.intValue(err_int_ty, 0), + } })), + .payload => try pt.intValue(err_int_ty, 0), }; const payload_ty = ty.errorUnionPayload(mod); const eu_layout = self.errorUnionLayout(payload_ty); @@ -988,7 +989,7 @@ const DeclGen = struct { } const payload_val = Value.fromInterned(switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }), .payload => |payload| payload, }); @@ -1007,7 +1008,7 @@ const DeclGen = struct { return try self.constructStruct(ty, &types, &constituents); }, .enum_tag => { - const int_val = try val.intFromEnum(ty, mod); + const int_val = try val.intFromEnum(ty, pt); const int_ty = ty.intTagType(mod); break :cache try self.constant(int_ty, int_val, repr); }, @@ -1026,7 +1027,7 @@ const DeclGen = struct { const payload_ty = ty.optionalChild(mod); const maybe_payload_val = val.optionalValue(mod); - if (!payload_ty.hasRuntimeBits(mod)) { + if (!payload_ty.hasRuntimeBits(pt)) { break :cache try self.constBool(maybe_payload_val != null, .indirect); } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. @@ -1104,13 +1105,13 @@ const DeclGen = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field - we only needed it for the alignment. continue; } // TODO: Padding? - const field_val = try val.fieldValue(mod, field_index); + const field_val = try val.fieldValue(pt, field_index); const field_id = try self.constant(field_ty, field_val, .indirect); try types.append(field_ty); @@ -1126,7 +1127,7 @@ const DeclGen = struct { const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?; const union_obj = mod.typeToUnion(ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]); - const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod)) + const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.constant(field_ty, Value.fromInterned(un.val), .direct) else null; @@ -1144,10 +1145,10 @@ const DeclGen = struct { fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef { // TODO: Caching?? - const zcu = self.module; + const pt = self.pt; - if (ptr_val.isUndef(zcu)) { - const result_ty = ptr_val.typeOf(zcu); + if (ptr_val.isUndef(pt.zcu)) { + const result_ty = ptr_val.typeOf(pt.zcu); const result_ty_id = try self.resolveType(result_ty, .direct); return self.spv.constUndef(result_ty_id); } @@ -1155,12 +1156,13 @@ const DeclGen = struct { var arena = std.heap.ArenaAllocator.init(self.gpa); defer arena.deinit(); - const derivation = try ptr_val.pointerDerivation(arena.allocator(), zcu); + const derivation = try ptr_val.pointerDerivation(arena.allocator(), pt); return self.derivePtr(derivation); } fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef { - const zcu = self.module; + const pt = self.pt; + const zcu = pt.zcu; switch (derivation) { .comptime_alloc_ptr, .comptime_field_ptr => unreachable, .int => |int| { @@ -1172,12 +1174,12 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = result_ty_id, .id_result = result_ptr_id, - .integer_value = try self.constant(Type.usize, try zcu.intValue(Type.usize, int.addr), .direct), + .integer_value = try self.constant(Type.usize, try pt.intValue(Type.usize, int.addr), .direct), }); return result_ptr_id; }, .decl_ptr => |decl| { - const result_ptr_ty = try zcu.declPtr(decl).declPtrType(zcu); + const result_ptr_ty = try zcu.declPtr(decl).declPtrType(pt); return self.constantDeclRef(result_ptr_ty, decl); }, .anon_decl_ptr => |ad| { @@ -1188,18 +1190,18 @@ const DeclGen = struct { .opt_payload_ptr => @panic("TODO"), .field_ptr => |field| { const parent_ptr_id = try self.derivePtr(field.parent.*); - const parent_ptr_ty = try field.parent.ptrType(zcu); + const parent_ptr_ty = try field.parent.ptrType(pt); return self.structFieldPtr(field.result_ptr_ty, parent_ptr_ty, parent_ptr_id, field.field_idx); }, .elem_ptr => |elem| { const parent_ptr_id = try self.derivePtr(elem.parent.*); - const parent_ptr_ty = try elem.parent.ptrType(zcu); + const parent_ptr_ty = try elem.parent.ptrType(pt); const index_id = try self.constInt(Type.usize, elem.elem_idx, .direct); return self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id); }, .offset_and_cast => |oac| { const parent_ptr_id = try self.derivePtr(oac.parent.*); - const parent_ptr_ty = try oac.parent.ptrType(zcu); + const parent_ptr_ty = try oac.parent.ptrType(pt); disallow: { if (oac.byte_offset != 0) break :disallow; // Allow changing the pointer type child only to restructure arrays. @@ -1218,8 +1220,8 @@ const DeclGen = struct { return result_ptr_id; } return self.fail("Cannot perform pointer cast: '{}' to '{}'", .{ - parent_ptr_ty.fmt(zcu), - oac.new_ptr_ty.fmt(zcu), + parent_ptr_ty.fmt(pt), + oac.new_ptr_ty.fmt(pt), }); }, } @@ -1232,7 +1234,8 @@ const DeclGen = struct { ) !IdRef { // TODO: Merge this function with constantDeclRef. - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_id = try self.resolveType(ty, .direct); const decl_val = anon_decl.val; @@ -1247,7 +1250,7 @@ const DeclGen = struct { } // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { // Pointer to nothing - return undefoined return self.spv.constUndef(ty_id); } @@ -1276,7 +1279,8 @@ const DeclGen = struct { } fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_id = try self.resolveType(ty, .direct); const decl = mod.declPtr(decl_index); @@ -1290,7 +1294,7 @@ const DeclGen = struct { else => {}, } - if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(pt)) { // Pointer to nothing - return undefined. return self.spv.constUndef(ty_id); } @@ -1331,7 +1335,7 @@ const DeclGen = struct { fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 { var name = std.ArrayList(u8).init(self.gpa); defer name.deinit(); - try ty.print(name.writer(), self.module); + try ty.print(name.writer(), self.pt); return try name.toOwnedSlice(); } @@ -1424,14 +1428,14 @@ const DeclGen = struct { } fn zigScalarOrVectorTypeLike(self: *DeclGen, new_ty: Type, base_ty: Type) !Type { - const mod = self.module; - const new_scalar_ty = new_ty.scalarType(mod); - if (!base_ty.isVector(mod)) { + const pt = self.pt; + const new_scalar_ty = new_ty.scalarType(pt.zcu); + if (!base_ty.isVector(pt.zcu)) { return new_scalar_ty; } - return try mod.vectorType(.{ - .len = base_ty.vectorLen(mod), + return try pt.vectorType(.{ + .len = base_ty.vectorLen(pt.zcu), .child = new_scalar_ty.toIntern(), }); } @@ -1455,7 +1459,7 @@ const DeclGen = struct { /// } /// If any of the fields' size is 0, it will be omitted. fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; @@ -1506,12 +1510,12 @@ const DeclGen = struct { } fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef { - const mod = self.module; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const pt = self.pt; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (ret_ty.isError(mod)) { + if (ret_ty.isError(pt.zcu)) { return self.resolveType(Type.anyerror, .direct); } else { return self.resolveType(Type.void, .direct); @@ -1533,9 +1537,10 @@ const DeclGen = struct { } fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - log.debug("resolveType: ty = {}", .{ty.fmt(mod)}); + log.debug("resolveType: ty = {}", .{ty.fmt(pt)}); const target = self.getTarget(); const section = &self.spv.sections.types_globals_constants; @@ -1607,7 +1612,7 @@ const DeclGen = struct { return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); }; - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) { // The size of the array would be 0, but that is not allowed in SPIR-V. // This path can be reached when the backend is asked to generate a pointer to // an array of some zero-bit type. This should always be an indirect path. @@ -1655,7 +1660,7 @@ const DeclGen = struct { var param_index: usize = 0; for (fn_info.param_types.get(ip)) |param_ty_index| { const param_ty = Type.fromInterned(param_ty_index); - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; param_ty_ids[param_index] = try self.resolveType(param_ty, .direct); param_index += 1; @@ -1713,7 +1718,7 @@ const DeclGen = struct { var member_index: usize = 0; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue; + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue; member_types[member_index] = try self.resolveType(Type.fromInterned(field_ty), .indirect); member_index += 1; @@ -1742,7 +1747,7 @@ const DeclGen = struct { var it = struct_type.iterateRuntimeOrder(ip); while (it.next()) |field_index| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) { // This is a zero-bit field - we only needed it for the alignment. continue; } @@ -1761,7 +1766,7 @@ const DeclGen = struct { }, .Optional => { const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity // Perform the conversion to a direct bool when the field is extracted. @@ -1878,14 +1883,14 @@ const DeclGen = struct { }; fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout { - const mod = self.module; + const pt = self.pt; - const error_align = Type.anyerror.abiAlignment(mod); - const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(pt); + const payload_align = payload_ty.abiAlignment(pt); const error_first = error_align.compare(.gt, payload_align); return .{ - .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod), + .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt), .error_first = error_first, }; } @@ -1909,9 +1914,10 @@ const DeclGen = struct { }; fn unionLayout(self: *DeclGen, ty: Type) UnionLayout { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; - const layout = ty.unionGetLayout(self.module); + const layout = ty.unionGetLayout(pt); const union_obj = mod.typeToUnion(ty).?; var union_layout = UnionLayout{ @@ -1932,7 +1938,7 @@ const DeclGen = struct { const most_aligned_field = layout.most_aligned_field; const most_aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[most_aligned_field]); union_layout.payload_ty = most_aligned_field_ty; - union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(mod)); + union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(pt)); } else { union_layout.payload_size = 0; } @@ -1999,7 +2005,7 @@ const DeclGen = struct { } fn materialize(self: Temporary, dg: *DeclGen) !IdResult { - const mod = dg.module; + const mod = dg.pt.zcu; switch (self.value) { .singleton => |id| return id, .exploded_vector => |range| { @@ -2029,12 +2035,12 @@ const DeclGen = struct { /// 'Explode' a temporary into separate elements. This turns a vector /// into a bag of elements. fn explode(self: Temporary, dg: *DeclGen) !IdRange { - const mod = dg.module; + const mod = dg.pt.zcu; // If the value is a scalar, then this is a no-op. if (!self.ty.isVector(mod)) { return switch (self.value) { - .singleton => |id| IdRange{ .base = @intFromEnum(id), .len = 1 }, + .singleton => |id| .{ .base = @intFromEnum(id), .len = 1 }, .exploded_vector => |range| range, }; } @@ -2088,7 +2094,7 @@ const DeclGen = struct { /// only checks the size, but the source-of-truth is implemented /// by `isSpvVector()`. fn fromType(ty: Type, dg: *DeclGen) Vectorization { - const mod = dg.module; + const mod = dg.pt.zcu; if (!ty.isVector(mod)) { return .scalar; } else if (dg.isSpvVector(ty)) { @@ -2164,11 +2170,11 @@ const DeclGen = struct { /// Turns `ty` into the result-type of an individual vector operation. /// `ty` may be a scalar or vector, it doesn't matter. fn operationType(self: Vectorization, dg: *DeclGen, ty: Type) !Type { - const mod = dg.module; - const scalar_ty = ty.scalarType(mod); + const pt = dg.pt; + const scalar_ty = ty.scalarType(pt.zcu); return switch (self) { .scalar, .unrolled => scalar_ty, - .spv_vectorized => |n| try mod.vectorType(.{ + .spv_vectorized => |n| try pt.vectorType(.{ .len = n, .child = scalar_ty.toIntern(), }), @@ -2178,11 +2184,11 @@ const DeclGen = struct { /// Turns `ty` into the result-type of the entire operation. /// `ty` may be a scalar or vector, it doesn't matter. fn resultType(self: Vectorization, dg: *DeclGen, ty: Type) !Type { - const mod = dg.module; - const scalar_ty = ty.scalarType(mod); + const pt = dg.pt; + const scalar_ty = ty.scalarType(pt.zcu); return switch (self) { .scalar => scalar_ty, - .unrolled, .spv_vectorized => |n| try mod.vectorType(.{ + .unrolled, .spv_vectorized => |n| try pt.vectorType(.{ .len = n, .child = scalar_ty.toIntern(), }), @@ -2193,8 +2199,8 @@ const DeclGen = struct { /// this setup, and returns a new type that holds the relevant information on how to access /// elements of the input. fn prepare(self: Vectorization, dg: *DeclGen, tmp: Temporary) !PreparedOperand { - const mod = dg.module; - const is_vector = tmp.ty.isVector(mod); + const pt = dg.pt; + const is_vector = tmp.ty.isVector(pt.zcu); const is_spv_vector = dg.isSpvVector(tmp.ty); const value: PreparedOperand.Value = switch (tmp.value) { .singleton => |id| switch (self) { @@ -2209,7 +2215,7 @@ const DeclGen = struct { } // Broadcast scalar into vector. - const vector_ty = try mod.vectorType(.{ + const vector_ty = try pt.vectorType(.{ .len = self.components(), .child = tmp.ty.toIntern(), }); @@ -2340,7 +2346,7 @@ const DeclGen = struct { /// This function builds an OpSConvert of OpUConvert depending on the /// signedness of the types. fn buildIntConvert(self: *DeclGen, dst_ty: Type, src: Temporary) !Temporary { - const mod = self.module; + const mod = self.pt.zcu; const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct); const src_ty_id = try self.resolveType(src.ty.scalarType(mod), .direct); @@ -2419,7 +2425,7 @@ const DeclGen = struct { } fn buildSelect(self: *DeclGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary { - const mod = self.module; + const mod = self.pt.zcu; const v = self.vectorization(.{ condition, lhs, rhs }); const ops = v.operations(); @@ -2764,7 +2770,8 @@ const DeclGen = struct { lhs: Temporary, rhs: Temporary, ) !struct { Temporary, Temporary } { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const target = self.getTarget(); const ip = &mod.intern_pool; @@ -2814,7 +2821,7 @@ const DeclGen = struct { // where T is maybe vectorized. const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; - const index = try ip.getAnonStructType(mod.gpa, .{ + const index = try ip.getAnonStructType(mod.gpa, pt.tid, .{ .types = &types, .values = &values, .names = &.{}, @@ -2888,7 +2895,7 @@ const DeclGen = struct { /// the name of an error in the text executor. fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void { const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct); - const ptr_anyerror_ty = try self.module.ptrType(.{ + const ptr_anyerror_ty = try self.pt.ptrType(.{ .child = Type.anyerror.toIntern(), .flags = .{ .address_space = .global }, }); @@ -2940,7 +2947,8 @@ const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.object.resolveDecl(mod, self.decl_index); @@ -2967,7 +2975,7 @@ const DeclGen = struct { try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); for (fn_info.param_types.get(ip)) |param_ty_index| { const param_ty = Type.fromInterned(param_ty_index); - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const param_type_id = try self.resolveType(param_ty, .direct); const arg_result_id = self.spv.allocId(); @@ -3004,11 +3012,11 @@ const DeclGen = struct { // Append the actual code into the functions section. try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.module); + const fqn = try decl.fullyQualifiedName(self.pt.zcu); try self.spv.debugName(result_id, fqn.toSlice(ip)); // Temporarily generate a test kernel declaration if this is a test function. - if (self.module.test_functions.contains(self.decl_index)) { + if (self.pt.zcu.test_functions.contains(self.decl_index)) { try self.generateTestEntryPoint(fqn.toSlice(ip), spv_decl_index); } }, @@ -3033,7 +3041,7 @@ const DeclGen = struct { .storage_class = final_storage_class, }); - const fqn = try decl.fullyQualifiedName(self.module); + const fqn = try decl.fullyQualifiedName(self.pt.zcu); try self.spv.debugName(result_id, fqn.toSlice(ip)); try self.spv.declareDeclDeps(spv_decl_index, &.{}); }, @@ -3078,7 +3086,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.fullyQualifiedName(self.module); + const fqn = try decl.fullyQualifiedName(self.pt.zcu); try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ @@ -3119,7 +3127,7 @@ const DeclGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .Bool => { const false_id = try self.constBool(false, .indirect); @@ -3145,7 +3153,7 @@ const DeclGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .Bool => { const result = try self.intFromBool(Temporary.init(ty, operand_id)); @@ -3222,7 +3230,7 @@ const DeclGen = struct { } fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const mod = self.pt.zcu; const ip = &mod.intern_pool; if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; @@ -3402,7 +3410,7 @@ const DeclGen = struct { } fn airShift(self: *DeclGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const base = try self.temporary(bin_op.lhs); @@ -3480,7 +3488,7 @@ const DeclGen = struct { /// All other values are returned unmodified (this makes strange integer /// wrapping easier to use in generic operations). fn normalize(self: *DeclGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary { - const mod = self.module; + const mod = self.pt.zcu; const ty = value.ty; switch (info.class) { .integer, .bool, .float => return value, @@ -3721,7 +3729,7 @@ const DeclGen = struct { fn airMulOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const target = self.getTarget(); - const mod = self.module; + const pt = self.pt; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3758,7 +3766,7 @@ const DeclGen = struct { const result, const overflowed = switch (info.signedness) { .unsigned => blk: { if (maybe_op_ty_bits) |op_ty_bits| { - const op_ty = try mod.intType(.unsigned, op_ty_bits); + const op_ty = try pt.intType(.unsigned, op_ty_bits); const casted_lhs = try self.buildIntConvert(op_ty, lhs); const casted_rhs = try self.buildIntConvert(op_ty, rhs); @@ -3828,7 +3836,7 @@ const DeclGen = struct { ); if (maybe_op_ty_bits) |op_ty_bits| { - const op_ty = try mod.intType(.signed, op_ty_bits); + const op_ty = try pt.intType(.signed, op_ty_bits); // Assume normalized; sign bit is set. We want a sign extend. const casted_lhs = try self.buildIntConvert(op_ty, lhs); const casted_rhs = try self.buildIntConvert(op_ty, rhs); @@ -3900,7 +3908,7 @@ const DeclGen = struct { } fn airShlOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3958,7 +3966,7 @@ const DeclGen = struct { fn airClzCtz(self: *DeclGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef { if (self.liveness.isUnused(inst)) return null; - const mod = self.module; + const mod = self.pt.zcu; const target = self.getTarget(); const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.temporary(ty_op.operand); @@ -4007,7 +4015,7 @@ const DeclGen = struct { } fn airReduce(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const operand = try self.resolve(reduce.operand); const operand_ty = self.typeOf(reduce.operand); @@ -4082,7 +4090,8 @@ const DeclGen = struct { } fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); @@ -4108,14 +4117,14 @@ const DeclGen = struct { const a_len = a_ty.vectorLen(mod); for (components, 0..) |*component, i| { - const elem = try mask.elemValue(mod, i); + const elem = try mask.elemValue(pt, i); if (elem.isUndef(mod)) { // This is explicitly valid for OpVectorShuffle, it indicates undefined. component.* = 0xFFFF_FFFF; continue; } - const index = elem.toSignedInt(mod); + const index = elem.toSignedInt(pt); if (index >= 0) { component.* = @intCast(index); } else { @@ -4140,13 +4149,13 @@ const DeclGen = struct { defer self.gpa.free(components); for (components, 0..) |*id, i| { - const elem = try mask.elemValue(mod, i); + const elem = try mask.elemValue(pt, i); if (elem.isUndef(mod)) { id.* = try self.spv.constUndef(scalar_ty_id); continue; } - const index = elem.toSignedInt(mod); + const index = elem.toSignedInt(pt); if (index >= 0) { id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index)); } else { @@ -4220,7 +4229,7 @@ const DeclGen = struct { } fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const result_ty_id = try self.resolveType(result_ty, .direct); switch (ptr_ty.ptrSize(mod)) { @@ -4276,7 +4285,8 @@ const DeclGen = struct { lhs: Temporary, rhs: Temporary, ) !Temporary { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const scalar_ty = lhs.ty.scalarType(mod); const is_vector = lhs.ty.isVector(mod); @@ -4324,7 +4334,7 @@ const DeclGen = struct { const payload_ty = ty.optionalChild(mod); if (ty.optionalReprIsPayload(mod)) { - assert(payload_ty.hasRuntimeBitsIgnoreComptime(mod)); + assert(payload_ty.hasRuntimeBitsIgnoreComptime(pt)); assert(!payload_ty.isSlice(mod)); return try self.cmp(op, lhs.pun(payload_ty), rhs.pun(payload_ty)); @@ -4333,12 +4343,12 @@ const DeclGen = struct { const lhs_id = try lhs.materialize(self); const rhs_id = try rhs.materialize(self); - const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.extractField(Type.bool, lhs_id, 1) else try self.convertToDirect(Type.bool, lhs_id); - const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.extractField(Type.bool, rhs_id, 1) else try self.convertToDirect(Type.bool, rhs_id); @@ -4346,7 +4356,7 @@ const DeclGen = struct { const lhs_valid = Temporary.init(Type.bool, lhs_valid_id); const rhs_valid = Temporary.init(Type.bool, rhs_valid_id); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return try self.cmp(op, lhs_valid, rhs_valid); } @@ -4466,7 +4476,7 @@ const DeclGen = struct { src_ty: Type, src_id: IdRef, ) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; const src_ty_id = try self.resolveType(src_ty, .direct); const dst_ty_id = try self.resolveType(dst_ty, .direct); @@ -4675,7 +4685,8 @@ const DeclGen = struct { } fn airArrayToSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const array_ptr_ty = self.typeOf(ty_op.operand); const array_ty = array_ptr_ty.childType(mod); @@ -4687,7 +4698,7 @@ const DeclGen = struct { const array_ptr_id = try self.resolve(ty_op.operand); const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct); - const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) + const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(pt)) // Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type. try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id) else @@ -4719,7 +4730,8 @@ const DeclGen = struct { } fn airAggregateInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const result_ty = self.typeOfIndex(inst); @@ -4742,8 +4754,8 @@ const DeclGen = struct { switch (ip.indexToKey(result_ty.toIntern())) { .anon_struct_type => |tuple| { for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| { - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; - assert(Type.fromInterned(field_ty).hasRuntimeBits(mod)); + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; + assert(Type.fromInterned(field_ty).hasRuntimeBits(pt)); const id = try self.resolve(element); types[index] = Type.fromInterned(field_ty); @@ -4756,9 +4768,9 @@ const DeclGen = struct { var it = struct_type.iterateRuntimeOrder(ip); for (elements, 0..) |element, i| { const field_index = it.next().?; - if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; + if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - assert(field_ty.hasRuntimeBitsIgnoreComptime(mod)); + assert(field_ty.hasRuntimeBitsIgnoreComptime(pt)); const id = try self.resolve(element); types[index] = field_ty; @@ -4808,13 +4820,14 @@ const DeclGen = struct { } fn sliceOrArrayLen(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; switch (ty.ptrSize(mod)) { .Slice => return self.extractField(Type.usize, operand_id, 1), .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); - const abi_size = elem_ty.abiSize(mod); + const abi_size = elem_ty.abiSize(pt); const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size; return try self.constInt(Type.usize, size, .direct); }, @@ -4823,7 +4836,7 @@ const DeclGen = struct { } fn sliceOrArrayPtr(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; if (ty.isSlice(mod)) { const ptr_ty = ty.slicePtrFieldType(mod); return self.extractField(ptr_ty, operand_id, 0); @@ -4855,7 +4868,7 @@ const DeclGen = struct { } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); @@ -4872,7 +4885,7 @@ const DeclGen = struct { } fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; @@ -4889,7 +4902,7 @@ const DeclGen = struct { } fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { - const mod = self.module; + const mod = self.pt.zcu; // Construct new pointer type for the resulting pointer const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod))); @@ -4904,14 +4917,15 @@ const DeclGen = struct { } fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const src_ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = src_ptr_ty.childType(mod); const ptr_id = try self.resolve(bin_op.lhs); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) { const dst_ptr_ty = self.typeOfIndex(inst); return try self.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id); } @@ -4921,7 +4935,7 @@ const DeclGen = struct { } fn airArrayElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const array_ty = self.typeOf(bin_op.lhs); const elem_ty = array_ty.childType(mod); @@ -4982,7 +4996,7 @@ const DeclGen = struct { } fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOfIndex(inst); @@ -4993,7 +5007,7 @@ const DeclGen = struct { } fn airVectorStoreElem(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const mod = self.pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -5015,7 +5029,7 @@ const DeclGen = struct { } fn airSetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ptr_ty = self.typeOf(bin_op.lhs); const un_ty = un_ptr_ty.childType(mod); @@ -5041,7 +5055,7 @@ const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); - const mod = self.module; + const mod = self.pt.zcu; const layout = self.unionLayout(un_ty); if (layout.tag_size == 0) return null; @@ -5064,7 +5078,8 @@ const DeclGen = struct { // Note: The result here is not cached, because it generates runtime code. - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const union_ty = mod.typeToUnion(ty).?; const tag_ty = Type.fromInterned(union_ty.enum_tag_ty); @@ -5076,9 +5091,9 @@ const DeclGen = struct { const layout = self.unionLayout(ty); const tag_int = if (layout.tag_size != 0) blk: { - const tag_val = try mod.enumValueFieldIndex(tag_ty, active_field); - const tag_int_val = try tag_val.intFromEnum(tag_ty, mod); - break :blk tag_int_val.toUnsignedInt(mod); + const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field); + const tag_int_val = try tag_val.intFromEnum(tag_ty, pt); + break :blk tag_int_val.toUnsignedInt(pt); } else 0; if (!layout.has_payload) { @@ -5095,7 +5110,7 @@ const DeclGen = struct { } const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]); - if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function); const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index}); const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function); @@ -5118,7 +5133,8 @@ const DeclGen = struct { } fn airUnionInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ip = &mod.intern_pool; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; @@ -5126,7 +5142,7 @@ const DeclGen = struct { const union_obj = mod.typeToUnion(ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); - const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod)) + const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.resolve(extra.init) else null; @@ -5134,7 +5150,8 @@ const DeclGen = struct { } fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5143,7 +5160,7 @@ const DeclGen = struct { const field_index = struct_field.field_index; const field_ty = object_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return null; switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout(mod)) { @@ -5178,7 +5195,8 @@ const DeclGen = struct { } fn airFieldParentPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -5187,7 +5205,7 @@ const DeclGen = struct { const field_ptr = try self.resolve(extra.field_ptr); const field_ptr_int = try self.intFromPtr(field_ptr); - const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, pt); const base_ptr_int = base_ptr_int: { if (field_offset == 0) break :base_ptr_int field_ptr_int; @@ -5218,7 +5236,7 @@ const DeclGen = struct { ) !IdRef { const result_ty_id = try self.resolveType(result_ptr_ty, .direct); - const zcu = self.module; + const zcu = self.pt.zcu; const object_ty = object_ptr_ty.childType(zcu); switch (object_ty.zigTypeTag(zcu)) { .Pointer => { @@ -5312,7 +5330,7 @@ const DeclGen = struct { } fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ptr_ty = self.typeOfIndex(inst); assert(ptr_ty.ptrAddressSpace(mod) == .generic); const child_ty = ptr_ty.childType(mod); @@ -5486,9 +5504,10 @@ const DeclGen = struct { // of the block, then a label, and then generate the rest of the current // ir.Block in a different SPIR-V block. - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty = self.typeOfIndex(inst); - const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(mod); + const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(pt); const cf = switch (self.control_flow) { .structured => |*cf| cf, @@ -5618,13 +5637,13 @@ const DeclGen = struct { } fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const pt = self.pt; const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const operand_ty = self.typeOf(br.operand); switch (self.control_flow) { .structured => |*cf| { - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const operand_id = try self.resolve(br.operand); const block_result_var_id = cf.block_results.get(br.block_inst).?; try self.store(operand_ty, block_result_var_id, operand_id, .{}); @@ -5635,7 +5654,7 @@ const DeclGen = struct { }, .unstructured => |cf| { const block = cf.blocks.get(br.block_inst).?; - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { const operand_id = try self.resolve(br.operand); // current_block_label should not be undefined here, lest there // is a br or br_void in the function's body. @@ -5762,7 +5781,7 @@ const DeclGen = struct { } fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = self.typeOf(ty_op.operand); const elem_ty = self.typeOfIndex(inst); @@ -5773,20 +5792,22 @@ const DeclGen = struct { } fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(self.module); + const elem_ty = ptr_ty.childType(mod); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); - try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(self.module) }); + try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); } fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const pt = self.pt; + const mod = pt.zcu; const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(operand); - const mod = self.module; - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { const decl = mod.declPtr(self.decl_index); const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; if (Type.fromInterned(fn_info.return_type).isError(mod)) { @@ -5805,12 +5826,13 @@ const DeclGen = struct { } fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { const decl = mod.declPtr(self.decl_index); const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; if (Type.fromInterned(fn_info.return_type).isError(mod)) { @@ -5832,7 +5854,7 @@ const DeclGen = struct { } fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const err_union_id = try self.resolve(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); @@ -5902,7 +5924,7 @@ const DeclGen = struct { } fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand); @@ -5938,7 +5960,7 @@ const DeclGen = struct { } fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_union_ty = self.typeOfIndex(inst); const payload_ty = err_union_ty.errorUnionPayload(mod); @@ -5985,7 +6007,8 @@ const DeclGen = struct { } fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); const operand_ty = self.typeOf(un_op); @@ -6026,7 +6049,7 @@ const DeclGen = struct { const is_non_null_id = blk: { if (is_pointer) { - if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod)); const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class); const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1}); @@ -6036,7 +6059,7 @@ const DeclGen = struct { break :blk try self.load(Type.bool, operand_id, .{}); } - break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) try self.extractField(Type.bool, operand_id, 1) else // Optional representation is bool indicating whether the optional is set @@ -6061,7 +6084,7 @@ const DeclGen = struct { } fn airIsErr(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); const err_union_ty = self.typeOf(un_op); @@ -6094,13 +6117,14 @@ const DeclGen = struct { } fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return null; if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; @@ -6110,7 +6134,8 @@ const DeclGen = struct { } fn airUnwrapOptionalPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -6119,7 +6144,7 @@ const DeclGen = struct { const result_ty = self.typeOfIndex(inst); const result_ty_id = try self.resolveType(result_ty, .direct); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { // There is no payload, but we still need to return a valid pointer. // We can just return anything here, so just return a pointer to the operand. return try self.bitCast(result_ty, operand_ty, operand_id); @@ -6134,11 +6159,12 @@ const DeclGen = struct { } fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) { return try self.constBool(true, .indirect); } @@ -6156,7 +6182,8 @@ const DeclGen = struct { } fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const target = self.getTarget(); const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond_ty = self.typeOf(pl_op.operand); @@ -6240,15 +6267,15 @@ const DeclGen = struct { const label = case_labels.at(case_i); for (items) |item| { - const value = (try self.air.value(item, mod)) orelse unreachable; + const value = (try self.air.value(item, pt)) orelse unreachable; const int_val: u64 = switch (cond_ty.zigTypeTag(mod)) { - .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(mod)) else value.toUnsignedInt(mod), + .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(pt)) else value.toUnsignedInt(pt), .Enum => blk: { // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants + break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(pt); // TODO: composite integer constants }, .ErrorSet => value.getErrorInt(mod), - .Pointer => value.toUnsignedInt(mod), + .Pointer => value.toUnsignedInt(pt), else => unreachable, }; const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) { @@ -6328,8 +6355,9 @@ const DeclGen = struct { } fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + const pt = self.pt; + const mod = pt.zcu; const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - const mod = self.module; const decl = mod.declPtr(self.decl_index); const path = decl.getFileScope(mod).sub_file_path; try self.func.body.emit(self.spv.gpa, .OpLine, .{ @@ -6340,7 +6368,7 @@ const DeclGen = struct { } fn airDbgInlineBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload); const decl = mod.funcOwnerDeclPtr(extra.data.func); @@ -6358,7 +6386,7 @@ const DeclGen = struct { } fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { - const mod = self.module; + const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); @@ -6440,20 +6468,20 @@ const DeclGen = struct { // TODO: Translate proper error locations. assert(as.errors.items.len != 0); assert(self.error_msg == null); - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); - self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); - const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); + const src_loc = mod.declPtr(self.decl_index).navSrcLoc(mod); + self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); + const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len); // Sub-scope to prevent `return error.CodegenFail` from running the errdefers. { - errdefer self.module.gpa.free(notes); + errdefer mod.gpa.free(notes); var i: usize = 0; errdefer for (notes[0..i]) |*note| { - note.deinit(self.module.gpa); + note.deinit(mod.gpa); }; while (i < as.errors.items.len) : (i += 1) { - notes[i] = try Module.ErrorMsg.init(self.module.gpa, src_loc, "{s}", .{as.errors.items[i].msg}); + notes[i] = try Zcu.ErrorMsg.init(mod.gpa, src_loc, "{s}", .{as.errors.items[i].msg}); } } self.error_msg.?.notes = notes; @@ -6489,7 +6517,8 @@ const DeclGen = struct { fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { _ = modifier; - const mod = self.module; + const pt = self.pt; + const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); @@ -6515,7 +6544,7 @@ const DeclGen = struct { // before starting to emit OpFunctionCall instructions. Hence the // temporary params buffer. const arg_ty = self.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue; const arg_id = try self.resolve(arg); params[n_params] = arg_id; @@ -6533,7 +6562,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(pt)) { return null; } @@ -6541,11 +6570,10 @@ const DeclGen = struct { } fn builtin3D(self: *DeclGen, result_ty: Type, builtin: spec.BuiltIn, dimension: u32, out_of_range_value: anytype) !IdRef { - const mod = self.module; if (dimension >= 3) { return try self.constInt(result_ty, out_of_range_value, .direct); } - const vec_ty = try mod.vectorType(.{ + const vec_ty = try self.pt.vectorType(.{ .len = 3, .child = result_ty.toIntern(), }); @@ -6591,12 +6619,12 @@ const DeclGen = struct { } fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { - const mod = self.module; + const mod = self.pt.zcu; return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { - const mod = self.module; + const mod = self.pt.zcu; return self.air.typeOfIndex(inst, &mod.intern_pool); } }; |
