aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig237
-rw-r--r--src/codegen/c/Type.zig25
-rw-r--r--src/codegen/llvm.zig1671
-rw-r--r--src/codegen/spirv.zig667
4 files changed, 1308 insertions, 1292 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 397cb071b6..d188435c3e 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -334,7 +334,7 @@ pub const Function = struct {
const writer = f.object.codeHeaderWriter();
const decl_c_value = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)),
});
const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false);
@@ -372,7 +372,7 @@ pub const Function = struct {
fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue {
return f.allocAlignedLocal(inst, .{
.ctype = try f.ctypeFromType(ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt.zcu)),
});
}
@@ -648,7 +648,7 @@ pub const DeclGen = struct {
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const ptr_ty = Type.fromInterned(uav.orig_ty);
- if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(pt)) {
+ if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
@@ -688,7 +688,7 @@ pub const DeclGen = struct {
// alignment. If there is already an entry, keep the greater alignment.
const explicit_alignment = ptr_type.flags.alignment;
if (explicit_alignment != .none) {
- const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt);
+ const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu);
if (explicit_alignment.order(abi_alignment).compare(.gt)) {
const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val);
aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
@@ -722,7 +722,7 @@ pub const DeclGen = struct {
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip));
const ptr_ty = try pt.navPtrType(owner_nav);
- if (!nav_ty.isFnOrHasRuntimeBits(pt)) {
+ if (!nav_ty.isFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(writer, .{ .undef = ptr_ty });
}
@@ -805,7 +805,7 @@ pub const DeclGen = struct {
}
},
- .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(pt)) {
+ .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(zcu)) {
// Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer.
const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete);
try writer.writeByte('(');
@@ -923,7 +923,7 @@ pub const DeclGen = struct {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
try writer.print("){x})", .{try dg.fmtIntLiteral(
- try pt.intValue(Type.usize, val.toUnsignedInt(pt)),
+ try pt.intValue(Type.usize, val.toUnsignedInt(zcu)),
.Other,
)});
},
@@ -970,7 +970,7 @@ pub const DeclGen = struct {
.enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location),
.float => {
const bits = ty.floatBits(target.*);
- const f128_val = val.toFloat(f128, pt);
+ const f128_val = val.toFloat(f128, zcu);
// All unsigned ints matching float types are pre-allocated.
const repr_ty = pt.intType(.unsigned, bits) catch unreachable;
@@ -984,10 +984,10 @@ pub const DeclGen = struct {
};
switch (bits) {
- 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, pt)))),
- 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, pt)))),
- 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, pt)))),
- 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, pt)))),
+ 16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, zcu)))),
+ 32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, zcu)))),
+ 64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, zcu)))),
+ 80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, zcu)))),
128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
else => unreachable,
}
@@ -998,10 +998,10 @@ pub const DeclGen = struct {
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
switch (bits) {
- 16 => try writer.print("{x}", .{val.toFloat(f16, pt)}),
- 32 => try writer.print("{x}", .{val.toFloat(f32, pt)}),
- 64 => try writer.print("{x}", .{val.toFloat(f64, pt)}),
- 80 => try writer.print("{x}", .{val.toFloat(f80, pt)}),
+ 16 => try writer.print("{x}", .{val.toFloat(f16, zcu)}),
+ 32 => try writer.print("{x}", .{val.toFloat(f32, zcu)}),
+ 64 => try writer.print("{x}", .{val.toFloat(f64, zcu)}),
+ 80 => try writer.print("{x}", .{val.toFloat(f80, zcu)}),
128 => try writer.print("{x}", .{f128_val}),
else => unreachable,
}
@@ -1041,10 +1041,10 @@ pub const DeclGen = struct {
if (std.math.isNan(f128_val)) switch (bits) {
// We only actually need to pass the significand, but it will get
// properly masked anyway, so just pass the whole value.
- 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, pt)))}),
- 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, pt)))}),
- 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, pt)))}),
- 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, pt)))}),
+ 16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, zcu)))}),
+ 32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, zcu)))}),
+ 64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, zcu)))}),
+ 80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, zcu)))}),
128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
else => unreachable,
};
@@ -1167,11 +1167,11 @@ pub const DeclGen = struct {
const elem_val_u8: u8 = if (elem_val.isUndef(zcu))
undefPattern(u8)
else
- @intCast(elem_val.toUnsignedInt(pt));
+ @intCast(elem_val.toUnsignedInt(zcu));
try literal.writeChar(elem_val_u8);
}
if (ai.sentinel) |s| {
- const s_u8: u8 = @intCast(s.toUnsignedInt(pt));
+ const s_u8: u8 = @intCast(s.toUnsignedInt(zcu));
if (s_u8 != 0) try literal.writeChar(s_u8);
}
try literal.end();
@@ -1203,7 +1203,7 @@ pub const DeclGen = struct {
const comptime_val = tuple.values.get(ip)[field_index];
if (comptime_val != .none) continue;
const field_ty = Type.fromInterned(tuple.types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeByte(',');
@@ -1238,7 +1238,7 @@ pub const DeclGen = struct {
var need_comma = false;
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try writer.writeByte(',');
need_comma = true;
@@ -1265,7 +1265,7 @@ pub const DeclGen = struct {
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
eff_num_fields += 1;
}
@@ -1273,7 +1273,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderUndefValue(writer, ty, location);
try writer.writeByte(')');
- } else if (ty.bitSize(pt) > 64) {
+ } else if (ty.bitSize(zcu) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
@@ -1286,7 +1286,7 @@ pub const DeclGen = struct {
var needs_closing_paren = false;
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{
@@ -1312,7 +1312,7 @@ pub const DeclGen = struct {
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
- bit_offset += field_ty.bitSize(pt);
+ bit_offset += field_ty.bitSize(zcu);
needs_closing_paren = true;
eff_index += 1;
}
@@ -1322,7 +1322,7 @@ pub const DeclGen = struct {
var empty = true;
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
@@ -1346,7 +1346,7 @@ pub const DeclGen = struct {
try dg.renderValue(writer, Value.fromInterned(field_val), .Other);
}
- bit_offset += field_ty.bitSize(pt);
+ bit_offset += field_ty.bitSize(zcu);
empty = false;
}
try writer.writeByte(')');
@@ -1396,7 +1396,7 @@ pub const DeclGen = struct {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
- if (field_ty.hasRuntimeBits(pt)) {
+ if (field_ty.hasRuntimeBits(zcu)) {
if (field_ty.isPtrAtRuntime(zcu)) {
try writer.writeByte('(');
try dg.renderCType(writer, ctype);
@@ -1427,7 +1427,7 @@ pub const DeclGen = struct {
),
.payload => {
try writer.writeByte('{');
- if (field_ty.hasRuntimeBits(pt)) {
+ if (field_ty.hasRuntimeBits(zcu)) {
try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))});
try dg.renderValue(
writer,
@@ -1439,7 +1439,7 @@ pub const DeclGen = struct {
const inner_field_ty = Type.fromInterned(
loaded_union.field_types.get(ip)[inner_field_index],
);
- if (!inner_field_ty.hasRuntimeBits(pt)) continue;
+ if (!inner_field_ty.hasRuntimeBits(zcu)) continue;
try dg.renderUndefValue(writer, inner_field_ty, initializer_type);
break;
}
@@ -1588,7 +1588,7 @@ pub const DeclGen = struct {
var need_comma = false;
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try writer.writeByte(',');
need_comma = true;
@@ -1613,7 +1613,7 @@ pub const DeclGen = struct {
for (0..anon_struct_info.types.len) |field_index| {
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try writer.writeByte(',');
need_comma = true;
@@ -1651,7 +1651,7 @@ pub const DeclGen = struct {
const inner_field_ty = Type.fromInterned(
loaded_union.field_types.get(ip)[inner_field_index],
);
- if (!inner_field_ty.hasRuntimeBits(pt)) continue;
+ if (!inner_field_ty.hasRuntimeBits(pt.zcu)) continue;
try dg.renderUndefValue(
writer,
inner_field_ty,
@@ -1902,7 +1902,8 @@ pub const DeclGen = struct {
};
fn intCastIsNoop(dg: *DeclGen, dest_ty: Type, src_ty: Type) bool {
const pt = dg.pt;
- const dest_bits = dest_ty.bitSize(pt);
+ const zcu = pt.zcu;
+ const dest_bits = dest_ty.bitSize(zcu);
const dest_int_info = dest_ty.intInfo(pt.zcu);
const src_is_ptr = src_ty.isPtrAtRuntime(pt.zcu);
@@ -1911,7 +1912,7 @@ pub const DeclGen = struct {
.signed => Type.isize,
} else src_ty;
- const src_bits = src_eff_ty.bitSize(pt);
+ const src_bits = src_eff_ty.bitSize(zcu);
const src_int_info = if (src_eff_ty.isAbiInt(pt.zcu)) src_eff_ty.intInfo(pt.zcu) else null;
if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or
@@ -1943,7 +1944,7 @@ pub const DeclGen = struct {
) !void {
const pt = dg.pt;
const zcu = pt.zcu;
- const dest_bits = dest_ty.bitSize(pt);
+ const dest_bits = dest_ty.bitSize(zcu);
const dest_int_info = dest_ty.intInfo(zcu);
const src_is_ptr = src_ty.isPtrAtRuntime(zcu);
@@ -1952,7 +1953,7 @@ pub const DeclGen = struct {
.signed => Type.isize,
} else src_ty;
- const src_bits = src_eff_ty.bitSize(pt);
+ const src_bits = src_eff_ty.bitSize(zcu);
const src_int_info = if (src_eff_ty.isAbiInt(zcu)) src_eff_ty.intInfo(zcu) else null;
if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or
@@ -2033,7 +2034,7 @@ pub const DeclGen = struct {
qualifiers,
CType.AlignAs.fromAlignment(.{
.@"align" = alignment,
- .abi = ty.abiAlignment(dg.pt),
+ .abi = ty.abiAlignment(dg.pt.zcu),
}),
);
}
@@ -2239,9 +2240,10 @@ pub const DeclGen = struct {
}
const pt = dg.pt;
- const int_info = if (ty.isAbiInt(pt.zcu)) ty.intInfo(pt.zcu) else std.builtin.Type.Int{
+ const zcu = pt.zcu;
+ const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @as(u16, @intCast(ty.bitSize(pt))),
+ .bits = @as(u16, @intCast(ty.bitSize(zcu))),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@@ -2891,7 +2893,7 @@ pub fn genDecl(o: *Object) !void {
const nav = ip.getNav(o.dg.pass.nav);
const nav_ty = Type.fromInterned(nav.typeOf(ip));
- if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return;
+ if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return;
switch (ip.indexToKey(nav.status.resolved.val)) {
.@"extern" => |@"extern"| {
if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{
@@ -3420,10 +3422,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
}
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const pt = f.object.dg.pt;
+ const zcu = f.object.dg.pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3453,7 +3455,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(bin_op.lhs);
- const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(pt);
+ const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu);
const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3482,10 +3484,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const pt = f.object.dg.pt;
+ const zcu = f.object.dg.pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3516,7 +3518,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const slice_ty = f.typeOf(bin_op.lhs);
const elem_ty = slice_ty.elemType2(zcu);
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(pt);
+ const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu);
const slice = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3539,10 +3541,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const pt = f.object.dg.pt;
+ const zcu = f.object.dg.pt.zcu;
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3569,13 +3571,13 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu);
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete),
.alignas = CType.AlignAs.fromAlignment(.{
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
- .abi = elem_ty.abiAlignment(pt),
+ .abi = elem_ty.abiAlignment(zcu),
}),
});
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
@@ -3588,13 +3590,13 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu);
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .{ .undef = inst_ty };
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete),
.alignas = CType.AlignAs.fromAlignment(.{
.@"align" = inst_ty.ptrInfo(zcu).flags.alignment,
- .abi = elem_ty.abiAlignment(pt),
+ .abi = elem_ty.abiAlignment(zcu),
}),
});
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
@@ -3636,7 +3638,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
const src_ty = Type.fromInterned(ptr_info.child);
- if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
@@ -3646,7 +3648,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const is_aligned = if (ptr_info.flags.alignment != .none)
- ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
+ ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
const is_array = lowersToArray(src_ty, pt);
@@ -3674,7 +3676,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(pt))));
+ const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
@@ -3685,9 +3687,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("((");
try f.renderType(writer, field_ty);
try writer.writeByte(')');
- const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
+ const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) {
- if (field_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3735,7 +3737,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
const ret_val = if (is_array) ret_val: {
const array_local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype,
- .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
});
try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@@ -3926,7 +3928,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
const is_aligned = if (ptr_info.flags.alignment != .none)
- ptr_info.flags.alignment.order(src_ty.abiAlignment(pt)).compare(.gte)
+ ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
const is_array = lowersToArray(Type.fromInterned(ptr_info.child), pt);
@@ -3976,7 +3978,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
- const src_bits = src_ty.bitSize(pt);
+ const src_bits = src_ty.bitSize(zcu);
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
var stack align(@alignOf(ExpectedContents)) =
@@ -4006,9 +4008,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
- const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(pt) > 64;
+ const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) {
- if (src_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_make_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(0, ");
@@ -4130,7 +4132,7 @@ fn airBinOp(
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
const scalar_ty = operand_ty.scalarType(zcu);
- if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(pt) > 64) or scalar_ty.isRuntimeFloat())
+ if ((scalar_ty.isInt(zcu) and scalar_ty.bitSize(zcu) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -4169,7 +4171,7 @@ fn airCmpOp(
const lhs_ty = f.typeOf(data.lhs);
const scalar_ty = lhs_ty.scalarType(zcu);
- const scalar_bits = scalar_ty.bitSize(pt);
+ const scalar_bits = scalar_ty.bitSize(zcu);
if (scalar_ty.isInt(zcu) and scalar_bits > 64)
return airCmpBuiltinCall(
f,
@@ -4219,7 +4221,7 @@ fn airEquality(
const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const operand_ty = f.typeOf(bin_op.lhs);
- const operand_bits = operand_ty.bitSize(pt);
+ const operand_bits = operand_ty.bitSize(zcu);
if (operand_ty.isAbiInt(zcu) and operand_bits > 64)
return airCmpBuiltinCall(
f,
@@ -4312,7 +4314,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
const elem_ty = inst_scalar_ty.elemType2(zcu);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return f.moveCValue(inst, inst_ty, lhs);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs);
const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
const local = try f.allocLocal(inst, inst_ty);
@@ -4351,7 +4353,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
- if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(pt) > 64) or inst_scalar_ty.isRuntimeFloat())
+ if ((inst_scalar_ty.isInt(zcu) and inst_scalar_ty.bitSize(zcu) > 64) or inst_scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, .none);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -4446,7 +4448,7 @@ fn airCall(
if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) {
const array_local = try f.allocAlignedLocal(inst, .{
.ctype = arg_ctype,
- .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)),
});
try writer.writeAll("memcpy(");
try f.writeCValueMember(writer, array_local, .{ .identifier = "array" });
@@ -4493,7 +4495,7 @@ fn airCall(
} else {
const local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype,
- .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)),
});
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
@@ -4618,7 +4620,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst);
- const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt) and !f.liveness.isUnused(inst))
+ const result = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu) and !f.liveness.isUnused(inst))
try f.allocLocal(inst, inst_ty)
else
.none;
@@ -4681,7 +4683,7 @@ fn lowerTry(
const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
const payload_ty = err_union_ty.errorUnionPayload(zcu);
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
try writer.writeAll("if (");
@@ -4820,7 +4822,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
try writer.writeAll(", sizeof(");
try f.renderType(
writer,
- if (dest_ty.abiSize(pt) <= operand_ty.abiSize(pt)) dest_ty else operand_ty,
+ if (dest_ty.abiSize(zcu) <= operand_ty.abiSize(zcu)) dest_ty else operand_ty,
);
try writer.writeAll("));\n");
@@ -5030,7 +5032,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.indent_writer.insertNewline();
try writer.writeAll("case ");
const item_value = try f.air.value(item, pt);
- if (item_value.?.getUnsignedInt(pt)) |item_int| try writer.print("{}\n", .{
+ if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{
try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)),
}) else {
if (condition_ty.isPtrAtRuntime(zcu)) {
@@ -5112,10 +5114,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const result = result: {
const writer = f.object.writer();
const inst_ty = f.typeOfIndex(inst);
- const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(pt)) local: {
+ const inst_local = if (inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) local: {
const inst_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(inst_ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)),
});
if (f.wantSafety()) {
try f.writeCValue(writer, inst_local, .Other);
@@ -5148,7 +5150,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("register ");
const output_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(output_ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)),
});
try f.allocs.put(gpa, output_local.new_local, false);
try f.object.dg.renderTypeAndName(writer, output_ty, output_local, .{}, .none, .complete);
@@ -5183,7 +5185,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (is_reg) try writer.writeAll("register ");
const input_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(input_ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(pt)),
+ .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)),
});
try f.allocs.put(gpa, input_local.new_local, false);
try f.object.dg.renderTypeAndName(writer, input_ty, input_local, Const, .none, .complete);
@@ -5526,9 +5528,9 @@ fn fieldLocation(
.struct_type => {
const loaded_struct = ip.loadStructType(container_ty.toIntern());
return switch (loaded_struct.layout) {
- .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
+ .auto, .@"extern" => if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
.begin
- else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
+ else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
.{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
else
.{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@@ -5542,10 +5544,10 @@ fn fieldLocation(
.begin,
};
},
- .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(pt))
+ .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
.begin
- else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(pt))
- .{ .byte_offset = container_ty.structFieldOffset(field_index, pt) }
+ else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
+ .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) }
else
.{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
.{ .identifier = field_name.toSlice(ip) }
@@ -5556,8 +5558,8 @@ fn fieldLocation(
switch (loaded_union.flagsUnordered(ip).layout) {
.auto, .@"extern" => {
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt))
- return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(pt))
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu))
.{ .field = .{ .identifier = "payload" } }
else
.begin;
@@ -5706,7 +5708,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{extra.struct_operand});
return .none;
}
@@ -5738,7 +5740,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
inst_ty.intInfo(zcu).signedness
else
.unsigned;
- const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(pt))));
+ const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
@@ -5749,7 +5751,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
- if (field_int_ty.bitSize(pt) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
@@ -5857,7 +5859,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const payload_ty = error_union_ty.errorUnionPayload(zcu);
const local = try f.allocLocal(inst, inst_ty);
- if (!payload_ty.hasRuntimeBits(pt) and operand == .local and operand.local == local.new_local) {
+ if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) {
// The store will be 'x = x'; elide it.
return local;
}
@@ -5866,7 +5868,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (!payload_ty.hasRuntimeBits(pt))
+ if (!payload_ty.hasRuntimeBits(zcu))
try f.writeCValue(writer, operand, .Other)
else if (error_ty.errorSetIsEmpty(zcu))
try writer.print("{}", .{
@@ -5892,7 +5894,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty;
const writer = f.object.writer();
- if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(pt)) {
+ if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) {
if (!is_ptr) return .none;
const local = try f.allocLocal(inst, inst_ty);
@@ -5963,7 +5965,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload(zcu);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
const err_ty = inst_ty.errorUnionSet(zcu);
const err = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -6012,7 +6014,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
// First, set the non-error value.
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const a = try Assignment.start(f, writer, try f.ctypeFromType(operand_ty, .complete));
try f.writeCValueDeref(writer, operand);
try a.assign(f, writer);
@@ -6064,7 +6066,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload(zcu);
const payload = try f.resolveInst(ty_op.operand);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
const err_ty = inst_ty.errorUnionSet(zcu);
try reap(f, inst, &.{ty_op.operand});
@@ -6109,7 +6111,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
try a.assign(f, writer);
const err_int_ty = try pt.errorIntType();
if (!error_ty.errorSetIsEmpty(zcu))
- if (payload_ty.hasRuntimeBits(pt))
+ if (payload_ty.hasRuntimeBits(zcu))
if (is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
else
@@ -6430,7 +6432,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
const repr_ty = if (ty.isRuntimeFloat())
- pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
+ pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else
ty;
@@ -6534,7 +6536,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const repr_bits = @as(u16, @intCast(ty.abiSize(pt) * 8));
+ const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8));
const is_float = ty.isRuntimeFloat();
const is_128 = repr_bits == 128;
const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty;
@@ -6585,7 +6587,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ty = ptr_ty.childType(zcu);
const repr_ty = if (ty.isRuntimeFloat())
- pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
+ pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else
ty;
@@ -6626,7 +6628,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const repr_ty = if (ty.isRuntimeFloat())
- pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(pt) * 8))) catch unreachable
+ pt.intType(.unsigned, @as(u16, @intCast(ty.abiSize(zcu) * 8))) catch unreachable
else
ty;
@@ -6666,7 +6668,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const dest_slice = try f.resolveInst(bin_op.lhs);
const value = try f.resolveInst(bin_op.rhs);
const elem_ty = f.typeOf(bin_op.rhs);
- const elem_abi_size = elem_ty.abiSize(pt);
+ const elem_abi_size = elem_ty.abiSize(zcu);
const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
const writer = f.object.writer();
@@ -6831,7 +6833,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const union_ty = f.typeOf(bin_op.lhs).childType(zcu);
- const layout = union_ty.unionGetLayout(pt);
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none;
const tag_ty = union_ty.unionTagTypeSafety(zcu).?;
@@ -6846,13 +6848,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const union_ty = f.typeOf(ty_op.operand);
- const layout = union_ty.unionGetLayout(pt);
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none;
const inst_ty = f.typeOfIndex(inst);
@@ -6960,6 +6963,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -6978,7 +6982,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, try pt.intValue(Type.usize, index), .Other);
try writer.writeAll("] = ");
- const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt);
+ const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
const src_val = try pt.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
@@ -7001,7 +7005,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.typeOf(reduce.operand);
const writer = f.object.writer();
- const use_operator = scalar_ty.bitSize(pt) <= 64;
+ const use_operator = scalar_ty.bitSize(zcu) <= 64;
const op: union(enum) {
const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
builtin: Func,
@@ -7178,7 +7182,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
var field_it = loaded_struct.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
@@ -7203,7 +7207,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (0..elements.len) |field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.structFieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) {
try writer.writeAll("zig_or_");
@@ -7216,7 +7220,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (resolved_elements, 0..) |element, field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.structFieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try writer.writeAll(", ");
// TODO: Skip this entire shift if val is 0?
@@ -7248,7 +7252,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
if (!empty) try writer.writeByte(')');
- bit_offset += field_ty.bitSize(pt);
+ bit_offset += field_ty.bitSize(zcu);
empty = false;
}
try writer.writeAll(";\n");
@@ -7258,7 +7262,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
.anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| {
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
@@ -7294,7 +7298,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
- const layout = union_ty.unionGetLayout(pt);
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size != 0) {
const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
@@ -7818,7 +7822,7 @@ fn formatIntLiteral(
};
undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
- } else data.val.toBigInt(&int_buf, pt);
+ } else data.val.toBigInt(&int_buf, zcu);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8);
@@ -8062,9 +8066,10 @@ const Vectorize = struct {
};
fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
- return switch (ty.zigTypeTag(pt.zcu)) {
+ const zcu = pt.zcu;
+ return switch (ty.zigTypeTag(zcu)) {
.Array, .Vector => return true,
- else => return ty.isAbiInt(pt.zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(pt)))) == null,
+ else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
};
}
diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig
index 63b7e4fd52..018b0586d0 100644
--- a/src/codegen/c/Type.zig
+++ b/src/codegen/c/Type.zig
@@ -1344,6 +1344,7 @@ pub const Pool = struct {
kind: Kind,
) !CType {
const ip = &pt.zcu.intern_pool;
+ const zcu = pt.zcu;
switch (ty.toIntern()) {
.u0_type,
.i0_type,
@@ -1476,7 +1477,7 @@ pub const Pool = struct {
),
.alignas = AlignAs.fromAlignment(.{
.@"align" = ptr_info.flags.alignment,
- .abi = Type.fromInterned(ptr_info.child).abiAlignment(pt),
+ .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu),
}),
};
break :elem_ctype if (elem.alignas.abiOrder().compare(.gte))
@@ -1552,7 +1553,7 @@ pub const Pool = struct {
.{
.name = .{ .index = .array },
.ctype = array_ctype,
- .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
+ .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1578,7 +1579,7 @@ pub const Pool = struct {
.{
.name = .{ .index = .array },
.ctype = vector_ctype,
- .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(pt)),
+ .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1613,7 +1614,7 @@ pub const Pool = struct {
.name = .{ .index = .payload },
.ctype = payload_ctype,
.alignas = AlignAs.fromAbiAlignment(
- Type.fromInterned(payload_type).abiAlignment(pt),
+ Type.fromInterned(payload_type).abiAlignment(zcu),
),
},
};
@@ -1649,7 +1650,7 @@ pub const Pool = struct {
.{
.name = .{ .index = .payload },
.ctype = payload_ctype,
- .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(pt)),
+ .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)),
},
};
return pool.fromFields(allocator, .@"struct", &fields, kind);
@@ -1663,7 +1664,7 @@ pub const Pool = struct {
.tag = .@"struct",
.name = .{ .index = ip_index },
});
- if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
+ if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
fwd_decl
else
CType.void;
@@ -1696,7 +1697,7 @@ pub const Pool = struct {
String.fromUnnamed(@intCast(field_index));
const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_struct.fieldAlign(ip, field_index),
- .abi = field_type.abiAlignment(pt),
+ .abi = field_type.abiAlignment(zcu),
});
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
.name = field_name.index,
@@ -1758,7 +1759,7 @@ pub const Pool = struct {
.name = field_name.index,
.ctype = field_ctype.index,
.flags = .{ .alignas = AlignAs.fromAbiAlignment(
- field_type.abiAlignment(pt),
+ field_type.abiAlignment(zcu),
) },
});
}
@@ -1802,7 +1803,7 @@ pub const Pool = struct {
.tag = if (has_tag) .@"struct" else .@"union",
.name = .{ .index = ip_index },
});
- if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt))
+ if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu))
fwd_decl
else
CType.void;
@@ -1836,7 +1837,7 @@ pub const Pool = struct {
);
const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_union.fieldAlign(ip, field_index),
- .abi = field_type.abiAlignment(pt),
+ .abi = field_type.abiAlignment(zcu),
});
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
.name = field_name.index,
@@ -1881,7 +1882,7 @@ pub const Pool = struct {
struct_fields[struct_fields_len] = .{
.name = .{ .index = .tag },
.ctype = tag_ctype,
- .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(pt)),
+ .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)),
};
struct_fields_len += 1;
}
@@ -1929,7 +1930,7 @@ pub const Pool = struct {
},
.@"packed" => return pool.fromIntInfo(allocator, .{
.signedness = .unsigned,
- .bits = @intCast(ty.bitSize(pt)),
+ .bits = @intCast(ty.bitSize(zcu)),
}, mod, kind),
}
},
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 233cf7e3eb..5256442561 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1001,12 +1001,12 @@ pub const Object = struct {
if (o.error_name_table == .none) return;
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const error_name_list = ip.global_error_set.getNamesFromMainThread();
- const llvm_errors = try mod.gpa.alloc(Builder.Constant, 1 + error_name_list.len);
- defer mod.gpa.free(llvm_errors);
+ const llvm_errors = try zcu.gpa.alloc(Builder.Constant, 1 + error_name_list.len);
+ defer zcu.gpa.free(llvm_errors);
// TODO: Address space
const slice_ty = Type.slice_const_u8_sentinel_0;
@@ -1041,7 +1041,7 @@ pub const Object = struct {
table_variable_index.setMutability(.constant, &o.builder);
table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
table_variable_index.setAlignment(
- slice_ty.abiAlignment(pt).toLlvm(),
+ slice_ty.abiAlignment(zcu).toLlvm(),
&o.builder,
);
@@ -1428,7 +1428,7 @@ pub const Object = struct {
var llvm_arg_i: u32 = 0;
// This gets the LLVM values from the function and stores them in `ng.args`.
- const sret = firstParamSRet(fn_info, pt, target);
+ const sret = firstParamSRet(fn_info, zcu, target);
const ret_ptr: Builder.Value = if (sret) param: {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1469,8 +1469,8 @@ pub const Object = struct {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
const param = wip.arg(llvm_arg_i);
- if (isByRef(param_ty, pt)) {
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(param_ty, zcu)) {
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const param_llvm_ty = param.typeOfWip(&wip);
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1486,12 +1486,12 @@ pub const Object = struct {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
llvm_arg_i += 1;
- if (isByRef(param_ty, pt)) {
+ if (isByRef(param_ty, zcu)) {
args.appendAssumeCapacity(param);
} else {
args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
@@ -1501,12 +1501,12 @@ pub const Object = struct {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder);
llvm_arg_i += 1;
- if (isByRef(param_ty, pt)) {
+ if (isByRef(param_ty, zcu)) {
args.appendAssumeCapacity(param);
} else {
args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
@@ -1519,11 +1519,11 @@ pub const Object = struct {
llvm_arg_i += 1;
const param_llvm_ty = try o.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
- args.appendAssumeCapacity(if (isByRef(param_ty, pt))
+ args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
arg_ptr
else
try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1547,7 +1547,7 @@ pub const Object = struct {
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
- Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm();
+ Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
const ptr_param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1564,7 +1564,7 @@ pub const Object = struct {
const field_types = it.types_buffer[0..it.types_len];
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
- const param_alignment = param_ty.abiAlignment(pt).toLlvm();
+ const param_alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, param_alignment, target);
const llvm_ty = try o.builder.structType(.normal, field_types);
for (0..field_types.len) |field_i| {
@@ -1576,7 +1576,7 @@ pub const Object = struct {
_ = try wip.store(.normal, param, field_ptr, alignment);
}
- const is_by_ref = isByRef(param_ty, pt);
+ const is_by_ref = isByRef(param_ty, zcu);
args.appendAssumeCapacity(if (is_by_ref)
arg_ptr
else
@@ -1594,11 +1594,11 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
- args.appendAssumeCapacity(if (isByRef(param_ty, pt))
+ args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
arg_ptr
else
try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1609,11 +1609,11 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, param.typeOfWip(&wip), alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
- args.appendAssumeCapacity(if (isByRef(param_ty, pt))
+ args.appendAssumeCapacity(if (isByRef(param_ty, zcu))
arg_ptr
else
try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
@@ -1738,13 +1738,13 @@ pub const Object = struct {
fn updateExportedValue(
o: *Object,
- mod: *Zcu,
+ zcu: *Zcu,
exported_value: InternPool.Index,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
- const gpa = mod.gpa;
- const ip = &mod.intern_pool;
- const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip));
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const main_exp_name = try o.builder.strtabString(zcu.all_exports.items[export_indices[0]].opts.name.toSlice(ip));
const global_index = i: {
const gop = try o.uav_map.getOrPut(gpa, exported_value);
if (gop.found_existing) {
@@ -1768,18 +1768,18 @@ pub const Object = struct {
try variable_index.setInitializer(init_val, &o.builder);
break :i global_index;
};
- return updateExportedGlobal(o, mod, global_index, export_indices);
+ return updateExportedGlobal(o, zcu, global_index, export_indices);
}
fn updateExportedGlobal(
o: *Object,
- mod: *Zcu,
+ zcu: *Zcu,
global_index: Builder.Global.Index,
export_indices: []const u32,
) link.File.UpdateExportsError!void {
- const comp = mod.comp;
- const ip = &mod.intern_pool;
- const first_export = mod.all_exports.items[export_indices[0]];
+ const comp = zcu.comp;
+ const ip = &zcu.intern_pool;
+ const first_export = zcu.all_exports.items[export_indices[0]];
// We will rename this global to have a name matching `first_export`.
// Successive exports become aliases.
@@ -1836,7 +1836,7 @@ pub const Object = struct {
// Until then we iterate over existing aliases and make them point
// to the correct decl, or otherwise add a new alias. Old aliases are leaked.
for (export_indices[1..]) |export_idx| {
- const exp = mod.all_exports.items[export_idx];
+ const exp = zcu.all_exports.items[export_idx];
const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip));
if (o.builder.getGlobal(exp_name)) |global| {
switch (global.ptrConst(&o.builder).kind) {
@@ -1923,7 +1923,7 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const builder_name = try o.builder.metadataString(name);
- const debug_bits = ty.abiSize(pt) * 8; // lldb cannot handle non-byte sized types
+ const debug_bits = ty.abiSize(zcu) * 8; // lldb cannot handle non-byte sized types
const debug_int_type = switch (info.signedness) {
.signed => try o.builder.debugSignedType(builder_name, debug_bits),
.unsigned => try o.builder.debugUnsignedType(builder_name, debug_bits),
@@ -1932,7 +1932,7 @@ pub const Object = struct {
return debug_int_type;
},
.Enum => {
- if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const debug_enum_type = try o.makeEmptyNamespaceDebugType(ty);
try o.debug_type_map.put(gpa, ty, debug_enum_type);
return debug_enum_type;
@@ -1949,7 +1949,7 @@ pub const Object = struct {
for (enum_type.names.get(ip), 0..) |field_name_ip, i| {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = if (enum_type.values.len != 0)
- Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, pt)
+ Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, zcu)
else
std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
@@ -1976,8 +1976,8 @@ pub const Object = struct {
scope,
ty.typeDeclSrcLine(zcu).? + 1, // Line
try o.lowerDebugType(int_ty),
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(enumerators),
);
@@ -2017,10 +2017,10 @@ pub const Object = struct {
ptr_info.flags.is_const or
ptr_info.flags.is_volatile or
ptr_info.flags.size == .Many or ptr_info.flags.size == .C or
- !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt))
+ !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu))
{
const bland_ptr_ty = try pt.ptrType(.{
- .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(pt))
+ .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(zcu))
.anyopaque_type
else
ptr_info.child,
@@ -2050,10 +2050,10 @@ pub const Object = struct {
defer gpa.free(name);
const line = 0;
- const ptr_size = ptr_ty.abiSize(pt);
- const ptr_align = ptr_ty.abiAlignment(pt);
- const len_size = len_ty.abiSize(pt);
- const len_align = len_ty.abiAlignment(pt);
+ const ptr_size = ptr_ty.abiSize(zcu);
+ const ptr_align = ptr_ty.abiAlignment(zcu);
+ const len_size = len_ty.abiSize(zcu);
+ const len_align = len_ty.abiAlignment(zcu);
const len_offset = len_align.forward(ptr_size);
@@ -2085,8 +2085,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
line,
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_ptr_type,
debug_len_type,
@@ -2114,7 +2114,7 @@ pub const Object = struct {
0, // Line
debug_elem_ty,
target.ptrBitWidth(),
- (ty.ptrAlignment(pt).toByteUnits() orelse 0) * 8,
+ (ty.ptrAlignment(zcu).toByteUnits() orelse 0) * 8,
0, // Offset
);
@@ -2165,8 +2165,8 @@ pub const Object = struct {
.none, // Scope
0, // Line
try o.lowerDebugType(ty.childType(zcu)),
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2208,8 +2208,8 @@ pub const Object = struct {
.none, // Scope
0, // Line
debug_elem_type,
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@@ -2225,7 +2225,7 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const child_ty = ty.optionalChild(zcu);
- if (!child_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!child_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const debug_bool_type = try o.builder.debugBoolType(
try o.builder.metadataString(name),
8,
@@ -2252,10 +2252,10 @@ pub const Object = struct {
}
const non_null_ty = Type.u8;
- const payload_size = child_ty.abiSize(pt);
- const payload_align = child_ty.abiAlignment(pt);
- const non_null_size = non_null_ty.abiSize(pt);
- const non_null_align = non_null_ty.abiAlignment(pt);
+ const payload_size = child_ty.abiSize(zcu);
+ const payload_align = child_ty.abiAlignment(zcu);
+ const non_null_size = non_null_ty.abiSize(zcu);
+ const non_null_align = non_null_ty.abiAlignment(zcu);
const non_null_offset = non_null_align.forward(payload_size);
const debug_data_type = try o.builder.debugMemberType(
@@ -2286,8 +2286,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_data_type,
debug_some_type,
@@ -2304,7 +2304,7 @@ pub const Object = struct {
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(zcu);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// TODO: Maybe remove?
const debug_error_union_type = try o.lowerDebugType(Type.anyerror);
try o.debug_type_map.put(gpa, ty, debug_error_union_type);
@@ -2314,10 +2314,10 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
- const error_size = Type.anyerror.abiSize(pt);
- const error_align = Type.anyerror.abiAlignment(pt);
- const payload_size = payload_ty.abiSize(pt);
- const payload_align = payload_ty.abiAlignment(pt);
+ const error_size = Type.anyerror.abiSize(zcu);
+ const error_align = Type.anyerror.abiAlignment(zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+ const payload_align = payload_ty.abiAlignment(zcu);
var error_index: u32 = undefined;
var payload_index: u32 = undefined;
@@ -2365,8 +2365,8 @@ pub const Object = struct {
o.debug_compile_unit, // Sope
0, // Line
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&fields),
);
@@ -2393,8 +2393,8 @@ pub const Object = struct {
const info = Type.fromInterned(backing_int_ty).intInfo(zcu);
const builder_name = try o.builder.metadataString(name);
const debug_int_type = switch (info.signedness) {
- .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(pt) * 8),
- .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(pt) * 8),
+ .signed => try o.builder.debugSignedType(builder_name, ty.abiSize(zcu) * 8),
+ .unsigned => try o.builder.debugUnsignedType(builder_name, ty.abiSize(zcu) * 8),
};
try o.debug_type_map.put(gpa, ty, debug_int_type);
return debug_int_type;
@@ -2414,10 +2414,10 @@ pub const Object = struct {
const debug_fwd_ref = try o.builder.debugForwardReference();
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
- const field_size = Type.fromInterned(field_ty).abiSize(pt);
- const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
+ const field_size = Type.fromInterned(field_ty).abiSize(zcu);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
const field_offset = field_align.forward(offset);
offset = field_offset + field_size;
@@ -2445,8 +2445,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2472,7 +2472,7 @@ pub const Object = struct {
else => {},
}
- if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty);
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
@@ -2494,14 +2494,14 @@ pub const Object = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
- const field_size = field_ty.abiSize(pt);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ const field_size = field_ty.abiSize(zcu);
const field_align = pt.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
);
- const field_offset = ty.structFieldOffset(field_index, pt);
+ const field_offset = ty.structFieldOffset(field_index, zcu);
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
@@ -2524,8 +2524,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2543,7 +2543,7 @@ pub const Object = struct {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.haveFieldTypes(ip) or
- !ty.hasRuntimeBitsIgnoreComptime(pt) or
+ !ty.hasRuntimeBitsIgnoreComptime(zcu) or
!union_type.haveLayout(ip))
{
const debug_union_type = try o.makeEmptyNamespaceDebugType(ty);
@@ -2551,7 +2551,7 @@ pub const Object = struct {
return debug_union_type;
}
- const layout = pt.getUnionLayout(union_type);
+ const layout = Type.getUnionLayout(union_type, zcu);
const debug_fwd_ref = try o.builder.debugForwardReference();
@@ -2565,8 +2565,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(
&.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
),
@@ -2593,12 +2593,12 @@ pub const Object = struct {
for (0..tag_type.names.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
- const field_size = Type.fromInterned(field_ty).abiSize(pt);
+ const field_size = Type.fromInterned(field_ty).abiSize(zcu);
const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) {
.@"packed" => .none,
- .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)),
+ .auto, .@"extern" => Type.unionFieldNormalAlignment(union_type, @intCast(field_index), zcu),
};
const field_name = tag_type.names.get(ip)[field_index];
@@ -2627,8 +2627,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@@ -2686,8 +2686,8 @@ pub const Object = struct {
o.debug_compile_unit, // Scope
0, // Line
.none, // Underlying type
- ty.abiSize(pt) * 8,
- (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8,
+ ty.abiSize(zcu) * 8,
+ (ty.abiAlignment(zcu).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&full_fields),
);
@@ -2708,8 +2708,8 @@ pub const Object = struct {
try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len);
// Return type goes first.
- if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(pt)) {
- const sret = firstParamSRet(fn_info, pt, target);
+ if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(zcu)) {
+ const sret = firstParamSRet(fn_info, zcu, target);
const ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ret_ty));
@@ -2730,9 +2730,9 @@ pub const Object = struct {
for (0..fn_info.param_types.len) |i| {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]);
- if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
- if (isByRef(param_ty, pt)) {
+ if (isByRef(param_ty, zcu)) {
const ptr_ty = try pt.singleMutPtrType(param_ty);
debug_param_types.appendAssumeCapacity(try o.lowerDebugType(ptr_ty));
} else {
@@ -2842,7 +2842,7 @@ pub const Object = struct {
const fn_info = zcu.typeToFunc(ty).?;
const target = owner_mod.resolved_target.result;
- const sret = firstParamSRet(fn_info, pt, target);
+ const sret = firstParamSRet(fn_info, zcu, target);
const is_extern, const lib_name = switch (ip.indexToKey(val.toIntern())) {
.variable => |variable| .{ false, variable.lib_name },
@@ -2934,14 +2934,14 @@ pub const Object = struct {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
- if (!isByRef(param_ty, pt)) {
+ if (!isByRef(param_ty, zcu)) {
try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(pt);
+ const alignment = param_ty.abiAlignment(zcu);
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty);
},
.byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -3042,8 +3042,8 @@ pub const Object = struct {
}
errdefer assert(o.uav_map.remove(uav));
- const mod = o.pt.zcu;
- const decl_ty = mod.intern_pool.typeOf(uav);
+ const zcu = o.pt.zcu;
+ const decl_ty = zcu.intern_pool.typeOf(uav);
const variable_index = try o.builder.addVariable(
try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(uav)}),
@@ -3106,9 +3106,9 @@ pub const Object = struct {
fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type {
const pt = o.pt;
- const mod = pt.zcu;
- const target = mod.getTarget();
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const target = zcu.getTarget();
+ const ip = &zcu.intern_pool;
return switch (t.toIntern()) {
.u0_type, .i0_type => unreachable,
inline .u1_type,
@@ -3230,16 +3230,16 @@ pub const Object = struct {
),
.opt_type => |child_ty| {
// Must stay in sync with `opt_payload` logic in `lowerPtr`.
- if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(pt)) return .i8;
+ if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(zcu)) return .i8;
const payload_ty = try o.lowerType(Type.fromInterned(child_ty));
- if (t.optionalReprIsPayload(mod)) return payload_ty;
+ if (t.optionalReprIsPayload(zcu)) return payload_ty;
comptime assert(optional_layout_version == 3);
var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined };
var fields_len: usize = 2;
- const offset = Type.fromInterned(child_ty).abiSize(pt) + 1;
- const abi_size = t.abiSize(pt);
+ const offset = Type.fromInterned(child_ty).abiSize(zcu) + 1;
+ const abi_size = t.abiSize(zcu);
const padding_len = abi_size - offset;
if (padding_len > 0) {
fields[2] = try o.builder.arrayType(padding_len, .i8);
@@ -3252,16 +3252,16 @@ pub const Object = struct {
// Must stay in sync with `codegen.errUnionPayloadOffset`.
// See logic in `lowerPtr`.
const error_type = try o.errorIntType();
- if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(pt))
+ if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(zcu))
return error_type;
const payload_type = try o.lowerType(Type.fromInterned(error_union_type.payload_type));
const err_int_ty = try o.pt.errorIntType();
- const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(pt);
- const error_align = err_int_ty.abiAlignment(pt);
+ const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(zcu);
+ const error_align = err_int_ty.abiAlignment(zcu);
- const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(pt);
- const error_size = err_int_ty.abiSize(pt);
+ const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(zcu);
+ const error_size = err_int_ty.abiSize(zcu);
var fields: [3]Builder.Type = undefined;
var fields_len: usize = 2;
@@ -3320,7 +3320,7 @@ pub const Object = struct {
field_ty,
struct_type.layout,
);
- const field_ty_align = field_ty.abiAlignment(pt);
+ const field_ty_align = field_ty.abiAlignment(zcu);
if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed";
big_align = big_align.max(field_align);
const prev_offset = offset;
@@ -3332,7 +3332,7 @@ pub const Object = struct {
try o.builder.arrayType(padding_len, .i8),
);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
@@ -3351,7 +3351,7 @@ pub const Object = struct {
}, @intCast(llvm_field_types.items.len));
try llvm_field_types.append(o.gpa, try o.lowerType(field_ty));
- offset += field_ty.abiSize(pt);
+ offset += field_ty.abiSize(zcu);
}
{
const prev_offset = offset;
@@ -3384,7 +3384,7 @@ pub const Object = struct {
var offset: u64 = 0;
var big_align: InternPool.Alignment = .none;
- const struct_size = t.abiSize(pt);
+ const struct_size = t.abiSize(zcu);
for (
anon_struct_type.types.get(ip),
@@ -3393,7 +3393,7 @@ pub const Object = struct {
) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
- const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = field_align.forward(offset);
@@ -3403,7 +3403,7 @@ pub const Object = struct {
o.gpa,
try o.builder.arrayType(padding_len, .i8),
);
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
@@ -3421,7 +3421,7 @@ pub const Object = struct {
}, @intCast(llvm_field_types.items.len));
try llvm_field_types.append(o.gpa, try o.lowerType(Type.fromInterned(field_ty)));
- offset += Type.fromInterned(field_ty).abiSize(pt);
+ offset += Type.fromInterned(field_ty).abiSize(zcu);
}
{
const prev_offset = offset;
@@ -3438,10 +3438,10 @@ pub const Object = struct {
if (o.type_map.get(t.toIntern())) |value| return value;
const union_obj = ip.loadUnionType(t.toIntern());
- const layout = pt.getUnionLayout(union_obj);
+ const layout = Type.getUnionLayout(union_obj, zcu);
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
- const int_ty = try o.builder.intType(@intCast(t.bitSize(pt)));
+ const int_ty = try o.builder.intType(@intCast(t.bitSize(zcu)));
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
return int_ty;
}
@@ -3547,32 +3547,32 @@ pub const Object = struct {
/// There are other similar cases handled here as well.
fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type {
const pt = o.pt;
- const mod = pt.zcu;
- const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const lower_elem_ty = switch (elem_ty.zigTypeTag(zcu)) {
.Opaque => true,
- .Fn => !mod.typeToFunc(elem_ty).?.is_generic,
- .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(pt),
- else => elem_ty.hasRuntimeBitsIgnoreComptime(pt),
+ .Fn => !zcu.typeToFunc(elem_ty).?.is_generic,
+ .Array => elem_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu),
+ else => elem_ty.hasRuntimeBitsIgnoreComptime(zcu),
};
return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8;
}
fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const target = mod.getTarget();
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
const ret_ty = try lowerFnRetTy(o, fn_info);
var llvm_params = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_params.deinit(o.gpa);
- if (firstParamSRet(fn_info, pt, target)) {
+ if (firstParamSRet(fn_info, zcu, target)) {
try llvm_params.append(o.gpa, .ptr);
}
- if (Type.fromInterned(fn_info.return_type).isError(mod) and
- mod.comp.config.any_error_tracing)
+ if (Type.fromInterned(fn_info.return_type).isError(zcu) and
+ zcu.comp.config.any_error_tracing)
{
const ptr_ty = try pt.singleMutPtrType(try o.getStackTraceType());
try llvm_params.append(o.gpa, try o.lowerType(ptr_ty));
@@ -3591,13 +3591,13 @@ pub const Object = struct {
.abi_sized_int => {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
try llvm_params.append(o.gpa, try o.builder.intType(
- @intCast(param_ty.abiSize(pt) * 8),
+ @intCast(param_ty.abiSize(zcu) * 8),
));
},
.slice => {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
try llvm_params.appendSlice(o.gpa, &.{
- try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(mod), target)),
+ try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(zcu), target)),
try o.lowerType(Type.usize),
});
},
@@ -3609,7 +3609,7 @@ pub const Object = struct {
},
.float_array => |count| {
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
- const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
+ const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, zcu).?);
try llvm_params.append(o.gpa, try o.builder.arrayType(count, float_ty));
},
.i32_array, .i64_array => |arr_len| {
@@ -3630,14 +3630,14 @@ pub const Object = struct {
fn lowerValueToInt(o: *Object, llvm_int_ty: Builder.Type, arg_val: InternPool.Index) Error!Builder.Constant {
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const target = mod.getTarget();
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
const val = Value.fromInterned(arg_val);
const val_key = ip.indexToKey(val.toIntern());
- if (val.isUndefDeep(mod)) return o.builder.undefConst(llvm_int_ty);
+ if (val.isUndefDeep(zcu)) return o.builder.undefConst(llvm_int_ty);
const ty = Type.fromInterned(val_key.typeOf());
switch (val_key) {
@@ -3661,7 +3661,7 @@ pub const Object = struct {
var running_int = try o.builder.intConst(llvm_int_ty, 0);
var running_bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| {
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
const shift_rhs = try o.builder.intConst(llvm_int_ty, running_bits);
const field_val = try o.lowerValueToInt(llvm_int_ty, (try val.fieldValue(pt, field_index)).toIntern());
@@ -3669,7 +3669,7 @@ pub const Object = struct {
running_int = try o.builder.binConst(.xor, running_int, shifted);
- const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt));
+ const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(zcu));
running_bits += ty_bit_size;
}
return running_int;
@@ -3678,10 +3678,10 @@ pub const Object = struct {
else => unreachable,
},
.un => |un| {
- const layout = ty.unionGetLayout(pt);
+ const layout = ty.unionGetLayout(zcu);
if (layout.payload_size == 0) return o.lowerValue(un.tag);
- const union_obj = mod.typeToUnion(ty).?;
+ const union_obj = zcu.typeToUnion(ty).?;
const container_layout = union_obj.flagsUnordered(ip).layout;
assert(container_layout == .@"packed");
@@ -3694,9 +3694,9 @@ pub const Object = struct {
need_unnamed = true;
return union_val;
}
- const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+ const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(llvm_int_ty, 0);
+ if (!field_ty.hasRuntimeBits(zcu)) return o.builder.intConst(llvm_int_ty, 0);
return o.lowerValueToInt(llvm_int_ty, un.val);
},
.simple_value => |simple_value| switch (simple_value) {
@@ -3710,7 +3710,7 @@ pub const Object = struct {
.opt => {}, // pointer like optional expected
else => unreachable,
}
- const bits = ty.bitSize(pt);
+ const bits = ty.bitSize(zcu);
const bytes: usize = @intCast(std.mem.alignForward(u64, bits, 8) / 8);
var stack = std.heap.stackFallback(32, o.gpa);
@@ -3743,14 +3743,14 @@ pub const Object = struct {
fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant {
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const target = mod.getTarget();
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
const val = Value.fromInterned(arg_val);
const val_key = ip.indexToKey(val.toIntern());
- if (val.isUndefDeep(mod)) {
+ if (val.isUndefDeep(zcu)) {
return o.builder.undefConst(try o.lowerType(Type.fromInterned(val_key.typeOf())));
}
@@ -3800,7 +3800,7 @@ pub const Object = struct {
},
.int => {
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_space, pt);
+ const bigint = val.toBigInt(&bigint_space, zcu);
return lowerBigInt(o, ty, bigint);
},
.err => |err| {
@@ -3811,20 +3811,20 @@ pub const Object = struct {
.error_union => |error_union| {
const err_val = switch (error_union.val) {
.err_name => |err_name| try pt.intern(.{ .err = .{
- .ty = ty.errorUnionSet(mod).toIntern(),
+ .ty = ty.errorUnionSet(zcu).toIntern(),
.name = err_name,
} }),
.payload => (try pt.intValue(try pt.errorIntType(), 0)).toIntern(),
};
const err_int_ty = try pt.errorIntType();
- const payload_type = ty.errorUnionPayload(mod);
- if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
+ const payload_type = ty.errorUnionPayload(zcu);
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
// We use the error type directly as the type.
return o.lowerValue(err_val);
}
- const payload_align = payload_type.abiAlignment(pt);
- const error_align = err_int_ty.abiAlignment(pt);
+ const payload_align = payload_type.abiAlignment(zcu);
+ const error_align = err_int_ty.abiAlignment(zcu);
const llvm_error_value = try o.lowerValue(err_val);
const llvm_payload_value = try o.lowerValue(switch (error_union.val) {
.err_name => try pt.intern(.{ .undef = payload_type.toIntern() }),
@@ -3858,16 +3858,16 @@ pub const Object = struct {
.enum_tag => |enum_tag| o.lowerValue(enum_tag.int),
.float => switch (ty.floatBits(target)) {
16 => if (backendSupportsF16(target))
- try o.builder.halfConst(val.toFloat(f16, pt))
+ try o.builder.halfConst(val.toFloat(f16, zcu))
else
- try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, pt)))),
- 32 => try o.builder.floatConst(val.toFloat(f32, pt)),
- 64 => try o.builder.doubleConst(val.toFloat(f64, pt)),
+ try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, zcu)))),
+ 32 => try o.builder.floatConst(val.toFloat(f32, zcu)),
+ 64 => try o.builder.doubleConst(val.toFloat(f64, zcu)),
80 => if (backendSupportsF80(target))
- try o.builder.x86_fp80Const(val.toFloat(f80, pt))
+ try o.builder.x86_fp80Const(val.toFloat(f80, zcu))
else
- try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, pt)))),
- 128 => try o.builder.fp128Const(val.toFloat(f128, pt)),
+ try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, zcu)))),
+ 128 => try o.builder.fp128Const(val.toFloat(f128, zcu)),
else => unreachable,
},
.ptr => try o.lowerPtr(arg_val, 0),
@@ -3877,14 +3877,14 @@ pub const Object = struct {
}),
.opt => |opt| {
comptime assert(optional_layout_version == 3);
- const payload_ty = ty.optionalChild(mod);
+ const payload_ty = ty.optionalChild(zcu);
const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none));
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return non_null_bit;
}
const llvm_ty = try o.lowerType(ty);
- if (ty.optionalReprIsPayload(mod)) return switch (opt.val) {
+ if (ty.optionalReprIsPayload(zcu)) return switch (opt.val) {
.none => switch (llvm_ty.tag(&o.builder)) {
.integer => try o.builder.intConst(llvm_ty, 0),
.pointer => try o.builder.nullConst(llvm_ty),
@@ -3893,7 +3893,7 @@ pub const Object = struct {
},
else => |payload| try o.lowerValue(payload),
};
- assert(payload_ty.zigTypeTag(mod) != .Fn);
+ assert(payload_ty.zigTypeTag(zcu) != .Fn);
var fields: [3]Builder.Type = undefined;
var vals: [3]Builder.Constant = undefined;
@@ -4047,9 +4047,9 @@ pub const Object = struct {
0..,
) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
- const field_align = Type.fromInterned(field_ty).abiAlignment(pt);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = field_align.forward(offset);
@@ -4071,7 +4071,7 @@ pub const Object = struct {
need_unnamed = true;
llvm_index += 1;
- offset += Type.fromInterned(field_ty).abiSize(pt);
+ offset += Type.fromInterned(field_ty).abiSize(zcu);
}
{
const prev_offset = offset;
@@ -4098,7 +4098,7 @@ pub const Object = struct {
if (struct_type.layout == .@"packed") {
comptime assert(Type.packed_struct_layout_version == 2);
- const bits = ty.bitSize(pt);
+ const bits = ty.bitSize(zcu);
const llvm_int_ty = try o.builder.intType(@intCast(bits));
return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4147,7 +4147,7 @@ pub const Object = struct {
llvm_index += 1;
}
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
@@ -4160,7 +4160,7 @@ pub const Object = struct {
need_unnamed = true;
llvm_index += 1;
- offset += field_ty.abiSize(pt);
+ offset += field_ty.abiSize(zcu);
}
{
const prev_offset = offset;
@@ -4184,19 +4184,19 @@ pub const Object = struct {
},
.un => |un| {
const union_ty = try o.lowerType(ty);
- const layout = ty.unionGetLayout(pt);
+ const layout = ty.unionGetLayout(zcu);
if (layout.payload_size == 0) return o.lowerValue(un.tag);
- const union_obj = mod.typeToUnion(ty).?;
+ const union_obj = zcu.typeToUnion(ty).?;
const container_layout = union_obj.flagsUnordered(ip).layout;
var need_unnamed = false;
const payload = if (un.tag != .none) p: {
- const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+ const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (container_layout == .@"packed") {
- if (!field_ty.hasRuntimeBits(pt)) return o.builder.intConst(union_ty, 0);
- const bits = ty.bitSize(pt);
+ if (!field_ty.hasRuntimeBits(zcu)) return o.builder.intConst(union_ty, 0);
+ const bits = ty.bitSize(zcu);
const llvm_int_ty = try o.builder.intType(@intCast(bits));
return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4208,7 +4208,7 @@ pub const Object = struct {
// must pointer cast to the expected type before accessing the union.
need_unnamed = layout.most_aligned_field != field_index;
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const padding_len = layout.payload_size;
break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8));
}
@@ -4217,7 +4217,7 @@ pub const Object = struct {
if (payload_ty != union_ty.structFields(&o.builder)[
@intFromBool(layout.tag_align.compare(.gte, layout.payload_align))
]) need_unnamed = true;
- const field_size = field_ty.abiSize(pt);
+ const field_size = field_ty.abiSize(zcu);
if (field_size == layout.payload_size) break :p payload;
const padding_len = layout.payload_size - field_size;
const padding_ty = try o.builder.arrayType(padding_len, .i8);
@@ -4228,7 +4228,7 @@ pub const Object = struct {
} else p: {
assert(layout.tag_size == 0);
if (container_layout == .@"packed") {
- const bits = ty.bitSize(pt);
+ const bits = ty.bitSize(zcu);
const llvm_int_ty = try o.builder.intType(@intCast(bits));
return o.lowerValueToInt(llvm_int_ty, arg_val);
@@ -4275,8 +4275,8 @@ pub const Object = struct {
ty: Type,
bigint: std.math.big.int.Const,
) Allocator.Error!Builder.Constant {
- const mod = o.pt.zcu;
- return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint);
+ const zcu = o.pt.zcu;
+ return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(zcu).bits), bigint);
}
fn lowerPtr(
@@ -4310,7 +4310,7 @@ pub const Object = struct {
eu_ptr,
offset + @import("../codegen.zig").errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
- pt,
+ zcu,
),
),
.opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset),
@@ -4326,7 +4326,7 @@ pub const Object = struct {
};
},
.Struct, .Union => switch (agg_ty.containerLayout(zcu)) {
- .auto => agg_ty.structFieldOffset(@intCast(field.index), pt),
+ .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
else => unreachable,
@@ -4344,11 +4344,11 @@ pub const Object = struct {
uav: InternPool.Key.Ptr.BaseAddr.Uav,
) Error!Builder.Constant {
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const uav_val = uav.val;
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
- const target = mod.getTarget();
+ const target = zcu.getTarget();
switch (ip.indexToKey(uav_val)) {
.func => @panic("TODO"),
@@ -4358,15 +4358,15 @@ pub const Object = struct {
const ptr_ty = Type.fromInterned(uav.orig_ty);
- const is_fn_body = uav_ty.zigTypeTag(mod) == .Fn;
- if ((!is_fn_body and !uav_ty.hasRuntimeBits(pt)) or
- (is_fn_body and mod.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
+ const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn;
+ if ((!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) or
+ (is_fn_body and zcu.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
if (is_fn_body)
@panic("TODO");
- const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target);
- const alignment = ptr_ty.ptrAlignment(pt);
+ const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(zcu), target);
+ const alignment = ptr_ty.ptrAlignment(zcu);
const llvm_global = (try o.resolveGlobalUav(uav.val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
const llvm_val = try o.builder.convConst(
@@ -4398,7 +4398,7 @@ pub const Object = struct {
const ptr_ty = try pt.navPtrType(owner_nav_index);
const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
- if ((!is_fn_body and !nav_ty.hasRuntimeBits(pt)) or
+ if ((!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) or
(is_fn_body and zcu.typeToFunc(nav_ty).?.is_generic))
{
return o.lowerPtrToVoid(ptr_ty);
@@ -4418,19 +4418,19 @@ pub const Object = struct {
}
fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant {
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
// Even though we are pointing at something which has zero bits (e.g. `void`),
// Pointers are defined to have bits. So we must return something here.
// The value cannot be undefined, because we use the `nonnull` annotation
// for non-optional pointers. We also need to respect the alignment, even though
// the address will never be dereferenced.
- const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnits() orelse
+ const int: u64 = ptr_ty.ptrInfo(zcu).flags.alignment.toByteUnits() orelse
// Note that these 0xaa values are appropriate even in release-optimized builds
// because we need a well-defined value that is not null, and LLVM does not
// have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR
// instruction is followed by a `wrap_optional`, it will return this value
// verbatim, and the result should test as non-null.
- switch (mod.getTarget().ptrBitWidth()) {
+ switch (zcu.getTarget().ptrBitWidth()) {
16 => 0xaaaa,
32 => 0xaaaaaaaa,
64 => 0xaaaaaaaa_aaaaaaaa,
@@ -4447,20 +4447,20 @@ pub const Object = struct {
/// types to work around a LLVM deficiency when targeting ARM/AArch64.
fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type {
const pt = o.pt;
- const mod = pt.zcu;
- const int_ty = switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const int_ty = switch (ty.zigTypeTag(zcu)) {
.Int => ty,
- .Enum => ty.intTagType(mod),
+ .Enum => ty.intTagType(zcu),
.Float => {
if (!is_rmw_xchg) return .none;
- return o.builder.intType(@intCast(ty.abiSize(pt) * 8));
+ return o.builder.intType(@intCast(ty.abiSize(zcu) * 8));
},
.Bool => return .i8,
else => return .none,
};
- const bit_count = int_ty.intInfo(mod).bits;
+ const bit_count = int_ty.intInfo(zcu).bits;
if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
- return o.builder.intType(@intCast(int_ty.abiSize(pt) * 8));
+ return o.builder.intType(@intCast(int_ty.abiSize(zcu) * 8));
} else {
return .none;
}
@@ -4475,15 +4475,15 @@ pub const Object = struct {
llvm_arg_i: u32,
) Allocator.Error!void {
const pt = o.pt;
- const mod = pt.zcu;
- if (param_ty.isPtrAtRuntime(mod)) {
- const ptr_info = param_ty.ptrInfo(mod);
+ const zcu = pt.zcu;
+ if (param_ty.isPtrAtRuntime(zcu)) {
+ const ptr_info = param_ty.ptrInfo(zcu);
if (math.cast(u5, param_index)) |i| {
if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder);
}
}
- if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.flags.is_allowzero) {
+ if (!param_ty.isPtrLikeOptional(zcu) and !ptr_info.flags.is_allowzero) {
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
}
if (fn_info.cc == .Interrupt) {
@@ -4496,9 +4496,9 @@ pub const Object = struct {
const elem_align = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
- Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1");
+ Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1");
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder);
- } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
+ } else if (ccAbiPromoteInt(fn_info.cc, zcu, param_ty)) |s| switch (s) {
.signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder),
.unsigned => try attributes.addParamAttr(llvm_arg_i, .zeroext, &o.builder),
};
@@ -4814,14 +4814,14 @@ pub const FuncGen = struct {
fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant {
const o = self.ng.object;
- const pt = o.pt;
- const ty = val.typeOf(pt.zcu);
+ const zcu = o.pt.zcu;
+ const ty = val.typeOf(zcu);
const llvm_val = try o.lowerValue(val.toIntern());
- if (!isByRef(ty, pt)) return llvm_val;
+ if (!isByRef(ty, zcu)) return llvm_val;
// We have an LLVM value but we need to create a global constant and
// set the value as its initializer, and then return a pointer to the global.
- const target = pt.zcu.getTarget();
+ const target = zcu.getTarget();
const variable_index = try o.builder.addVariable(
.empty,
llvm_val.typeOf(&o.builder),
@@ -4831,7 +4831,7 @@ pub const FuncGen = struct {
variable_index.setLinkage(.private, &o.builder);
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
- variable_index.setAlignment(ty.abiAlignment(pt).toLlvm(), &o.builder);
+ variable_index.setAlignment(ty.abiAlignment(zcu).toLlvm(), &o.builder);
return o.builder.convConst(
variable_index.toConst(&o.builder),
try o.builder.ptrType(toLlvmAddressSpace(.generic, target)),
@@ -4852,8 +4852,8 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const o = self.ng.object;
- const mod = o.pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = o.pt.zcu;
+ const ip = &zcu.intern_pool;
const air_tags = self.air.instructions.items(.tag);
for (body, 0..) |inst, i| {
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue;
@@ -5200,19 +5200,19 @@ pub const FuncGen = struct {
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const callee_ty = self.typeOf(pl_op.operand);
- const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
+ const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
.Fn => callee_ty,
- .Pointer => callee_ty.childType(mod),
+ .Pointer => callee_ty.childType(zcu),
else => unreachable,
};
- const fn_info = mod.typeToFunc(zig_fn_ty).?;
+ const fn_info = zcu.typeToFunc(zig_fn_ty).?;
const return_type = Type.fromInterned(fn_info.return_type);
const llvm_fn = try self.resolveInst(pl_op.operand);
- const target = mod.getTarget();
- const sret = firstParamSRet(fn_info, pt, target);
+ const target = zcu.getTarget();
+ const sret = firstParamSRet(fn_info, zcu, target);
var llvm_args = std.ArrayList(Builder.Value).init(self.gpa);
defer llvm_args.deinit();
@@ -5230,13 +5230,13 @@ pub const FuncGen = struct {
const llvm_ret_ty = try o.lowerType(return_type);
try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder);
- const alignment = return_type.abiAlignment(pt).toLlvm();
+ const alignment = return_type.abiAlignment(zcu).toLlvm();
const ret_ptr = try self.buildAllocaWorkaround(return_type, alignment);
try llvm_args.append(ret_ptr);
break :blk ret_ptr;
};
- const err_return_tracing = return_type.isError(mod) and mod.comp.config.any_error_tracing;
+ const err_return_tracing = return_type.isError(zcu) and zcu.comp.config.any_error_tracing;
if (err_return_tracing) {
assert(self.err_ret_trace != .none);
try llvm_args.append(self.err_ret_trace);
@@ -5250,8 +5250,8 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
const llvm_param_ty = try o.lowerType(param_ty);
- if (isByRef(param_ty, pt)) {
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(param_ty, zcu)) {
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, "");
try llvm_args.append(loaded);
} else {
@@ -5262,10 +5262,10 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- if (isByRef(param_ty, pt)) {
+ if (isByRef(param_ty, zcu)) {
try llvm_args.append(llvm_arg);
} else {
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const param_llvm_ty = llvm_arg.typeOfWip(&self.wip);
const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
_ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment);
@@ -5277,10 +5277,10 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const param_llvm_ty = try o.lowerType(param_ty);
const arg_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
- if (isByRef(param_ty, pt)) {
+ if (isByRef(param_ty, zcu)) {
const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, "");
_ = try self.wip.store(.normal, loaded, arg_ptr, alignment);
} else {
@@ -5292,16 +5292,16 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(pt) * 8));
+ const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(zcu) * 8));
- if (isByRef(param_ty, pt)) {
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(param_ty, zcu)) {
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, "");
try llvm_args.append(loaded);
} else {
// LLVM does not allow bitcasting structs so we must allocate
// a local, store as one type, and then load as another type.
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const int_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
_ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
@@ -5320,9 +5320,9 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_types = it.types_buffer[0..it.types_len];
const llvm_arg = try self.resolveInst(arg);
- const is_by_ref = isByRef(param_ty, pt);
+ const is_by_ref = isByRef(param_ty, zcu);
const arg_ptr = if (is_by_ref) llvm_arg else ptr: {
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
break :ptr ptr;
@@ -5348,14 +5348,14 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- const alignment = arg_ty.abiAlignment(pt).toLlvm();
- if (!isByRef(arg_ty, pt)) {
+ const alignment = arg_ty.abiAlignment(zcu).toLlvm();
+ if (!isByRef(arg_ty, zcu)) {
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
llvm_arg = ptr;
}
- const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?);
+ const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, zcu).?);
const array_ty = try o.builder.arrayType(count, float_ty);
const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, "");
@@ -5366,8 +5366,8 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- const alignment = arg_ty.abiAlignment(pt).toLlvm();
- if (!isByRef(arg_ty, pt)) {
+ const alignment = arg_ty.abiAlignment(zcu).toLlvm();
+ if (!isByRef(arg_ty, zcu)) {
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
llvm_arg = ptr;
@@ -5389,7 +5389,7 @@ pub const FuncGen = struct {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
- if (!isByRef(param_ty, pt)) {
+ if (!isByRef(param_ty, zcu)) {
try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
@@ -5397,7 +5397,7 @@ pub const FuncGen = struct {
const param_index = it.zig_index - 1;
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
const param_llvm_ty = try o.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(pt).toLlvm();
+ const alignment = param_ty.abiAlignment(zcu).toLlvm();
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -5414,7 +5414,7 @@ pub const FuncGen = struct {
.slice => {
assert(!it.byval_attr);
const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
- const ptr_info = param_ty.ptrInfo(mod);
+ const ptr_info = param_ty.ptrInfo(zcu);
const llvm_arg_i = it.llvm_index - 2;
if (math.cast(u5, it.zig_index - 1)) |i| {
@@ -5422,7 +5422,7 @@ pub const FuncGen = struct {
try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder);
}
}
- if (param_ty.zigTypeTag(mod) != .Optional) {
+ if (param_ty.zigTypeTag(zcu) != .Optional) {
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
}
if (ptr_info.flags.is_const) {
@@ -5431,7 +5431,7 @@ pub const FuncGen = struct {
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
- Type.fromInterned(ptr_info.child).abiAlignment(pt).max(.@"1")).toLlvm();
+ Type.fromInterned(ptr_info.child).abiAlignment(zcu).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
},
};
@@ -5456,17 +5456,17 @@ pub const FuncGen = struct {
return .none;
}
- if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
return .none;
}
const llvm_ret_ty = try o.lowerType(return_type);
if (ret_ptr) |rp| {
- if (isByRef(return_type, pt)) {
+ if (isByRef(return_type, zcu)) {
return rp;
} else {
// our by-ref status disagrees with sret so we must load.
- const return_alignment = return_type.abiAlignment(pt).toLlvm();
+ const return_alignment = return_type.abiAlignment(zcu).toLlvm();
return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, "");
}
}
@@ -5477,19 +5477,19 @@ pub const FuncGen = struct {
// In this case the function return type is honoring the calling convention by having
// a different LLVM type than the usual one. We solve this here at the callsite
// by using our canonical type, then loading it if necessary.
- const alignment = return_type.abiAlignment(pt).toLlvm();
+ const alignment = return_type.abiAlignment(zcu).toLlvm();
const rp = try self.buildAlloca(abi_ret_ty, alignment);
_ = try self.wip.store(.normal, call, rp, alignment);
- return if (isByRef(return_type, pt))
+ return if (isByRef(return_type, zcu))
rp
else
try self.wip.load(.normal, llvm_ret_ty, rp, alignment, "");
}
- if (isByRef(return_type, pt)) {
+ if (isByRef(return_type, zcu)) {
// our by-ref status disagrees with sret so we must allocate, store,
// and return the allocation pointer.
- const alignment = return_type.abiAlignment(pt).toLlvm();
+ const alignment = return_type.abiAlignment(zcu).toLlvm();
const rp = try self.buildAlloca(llvm_ret_ty, alignment);
_ = try self.wip.store(.normal, call, rp, alignment);
return rp;
@@ -5540,8 +5540,8 @@ pub const FuncGen = struct {
fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ret_ty = self.typeOf(un_op);
@@ -5549,9 +5549,9 @@ pub const FuncGen = struct {
const ptr_ty = try pt.singleMutPtrType(ret_ty);
const operand = try self.resolveInst(un_op);
- const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false;
+ const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
if (val_is_undef and safety) undef: {
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
if (needs_bitmask) {
// TODO: only some bits are to be undef, we cannot write with a simple memset.
@@ -5559,13 +5559,13 @@ pub const FuncGen = struct {
// https://github.com/ziglang/zig/issues/15337
break :undef;
}
- const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt));
+ const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(zcu));
_ = try self.wip.callMemSet(
self.ret_ptr,
- ptr_ty.ptrAlignment(pt).toLlvm(),
+ ptr_ty.ptrAlignment(zcu).toLlvm(),
try o.builder.intValue(.i8, 0xaa),
len,
- if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
+ if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal,
);
const owner_mod = self.ng.ownerModule();
if (owner_mod.valgrind) {
@@ -5588,9 +5588,9 @@ pub const FuncGen = struct {
_ = try self.wip.retVoid();
return .none;
}
- const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+ const fn_info = zcu.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5603,13 +5603,13 @@ pub const FuncGen = struct {
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
const operand = try self.resolveInst(un_op);
- const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(mod) else false;
- const alignment = ret_ty.abiAlignment(pt).toLlvm();
+ const val_is_undef = if (try self.air.value(un_op, pt)) |val| val.isUndefDeep(zcu) else false;
+ const alignment = ret_ty.abiAlignment(zcu).toLlvm();
if (val_is_undef and safety) {
const llvm_ret_ty = operand.typeOfWip(&self.wip);
const rp = try self.buildAlloca(llvm_ret_ty, alignment);
- const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(pt));
+ const len = try o.builder.intValue(try o.lowerType(Type.usize), ret_ty.abiSize(zcu));
_ = try self.wip.callMemSet(
rp,
alignment,
@@ -5625,7 +5625,7 @@ pub const FuncGen = struct {
return .none;
}
- if (isByRef(ret_ty, pt)) {
+ if (isByRef(ret_ty, zcu)) {
// operand is a pointer however self.ret_ptr is null so that means
// we need to return a value.
_ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, ""));
@@ -5647,14 +5647,14 @@ pub const FuncGen = struct {
fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr_ty = self.typeOf(un_op);
- const ret_ty = ptr_ty.childType(mod);
- const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+ const ret_ty = ptr_ty.childType(zcu);
+ const fn_info = zcu.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5670,7 +5670,7 @@ pub const FuncGen = struct {
}
const ptr = try self.resolveInst(un_op);
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
- const alignment = ret_ty.abiAlignment(pt).toLlvm();
+ const alignment = ret_ty.abiAlignment(zcu).toLlvm();
_ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, ""));
return .none;
}
@@ -5688,16 +5688,17 @@ pub const FuncGen = struct {
fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_list = try self.resolveInst(ty_op.operand);
const va_list_ty = ty_op.ty.toType();
const llvm_va_list_ty = try o.lowerType(va_list_ty);
- const result_alignment = va_list_ty.abiAlignment(pt).toLlvm();
+ const result_alignment = va_list_ty.abiAlignment(pt.zcu).toLlvm();
const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
_ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, "");
- return if (isByRef(va_list_ty, pt))
+ return if (isByRef(va_list_ty, zcu))
dest_list
else
try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, "");
@@ -5714,14 +5715,15 @@ pub const FuncGen = struct {
fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
+ const zcu = pt.zcu;
const va_list_ty = self.typeOfIndex(inst);
const llvm_va_list_ty = try o.lowerType(va_list_ty);
- const result_alignment = va_list_ty.abiAlignment(pt).toLlvm();
+ const result_alignment = va_list_ty.abiAlignment(pt.zcu).toLlvm();
const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
_ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, "");
- return if (isByRef(va_list_ty, pt))
+ return if (isByRef(va_list_ty, zcu))
dest_list
else
try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, "");
@@ -5779,21 +5781,21 @@ pub const FuncGen = struct {
) Allocator.Error!Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const scalar_ty = operand_ty.scalarType(mod);
- const int_ty = switch (scalar_ty.zigTypeTag(mod)) {
- .Enum => scalar_ty.intTagType(mod),
+ const zcu = pt.zcu;
+ const scalar_ty = operand_ty.scalarType(zcu);
+ const int_ty = switch (scalar_ty.zigTypeTag(zcu)) {
+ .Enum => scalar_ty.intTagType(zcu),
.Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
.Optional => blk: {
- const payload_ty = operand_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or
- operand_ty.optionalReprIsPayload(mod))
+ const payload_ty = operand_ty.optionalChild(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or
+ operand_ty.optionalReprIsPayload(zcu))
{
break :blk operand_ty;
}
// We need to emit instructions to check for equality/inequality
// of optionals that are not pointers.
- const is_by_ref = isByRef(scalar_ty, pt);
+ const is_by_ref = isByRef(scalar_ty, zcu);
const opt_llvm_ty = try o.lowerType(scalar_ty);
const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref);
const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref);
@@ -5860,7 +5862,7 @@ pub const FuncGen = struct {
.Float => return self.buildFloatCmp(fast, op, operand_ty, .{ lhs, rhs }),
else => unreachable,
};
- const is_signed = int_ty.isSignedInt(mod);
+ const is_signed = int_ty.isSignedInt(zcu);
const cond: Builder.IntegerCondition = switch (op) {
.eq => .eq,
.neq => .ne,
@@ -5886,15 +5888,15 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst_ty = self.typeOfIndex(inst);
- if (inst_ty.isNoReturn(mod)) {
+ if (inst_ty.isNoReturn(zcu)) {
try self.genBodyDebugScope(maybe_inline_func, body);
return .none;
}
- const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt);
+ const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu);
var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 };
defer if (have_block_result) breaks.list.deinit(self.gpa);
@@ -5918,7 +5920,7 @@ pub const FuncGen = struct {
// a pointer to it. LLVM IR allows the call instruction to use function bodies instead
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
- if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, pt)) {
+ if (inst_ty.zigTypeTag(zcu) == .Fn or isByRef(inst_ty, zcu)) {
break :ty .ptr;
}
break :ty raw_llvm_ty;
@@ -5936,13 +5938,13 @@ pub const FuncGen = struct {
fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const pt = o.pt;
+ const zcu = o.pt.zcu;
const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block = self.blocks.get(branch.block_inst).?;
// Add the values to the lists only if the break provides a value.
const operand_ty = self.typeOf(branch.operand);
- if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
@@ -5977,6 +5979,7 @@ pub const FuncGen = struct {
fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
@@ -5984,19 +5987,19 @@ pub const FuncGen = struct {
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(pl_op.operand);
const payload_ty = self.typeOfIndex(inst);
- const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false;
+ const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused);
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try self.resolveInst(extra.data.ptr);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
- const err_union_ty = self.typeOf(extra.data.ptr).childType(mod);
+ const err_union_ty = self.typeOf(extra.data.ptr).childType(zcu);
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused);
}
@@ -6012,13 +6015,13 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = fg.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const payload_ty = err_union_ty.errorUnionPayload(mod);
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt);
+ const zcu = pt.zcu;
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu);
const err_union_llvm_ty = try o.lowerType(err_union_ty);
const error_type = try o.errorIntType();
- if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
const loaded = loaded: {
if (!payload_has_bits) {
// TODO add alignment to this load
@@ -6028,7 +6031,7 @@ pub const FuncGen = struct {
err_union;
}
const err_field_index = try errUnionErrorOffset(payload_ty, pt);
- if (operand_is_ptr or isByRef(err_union_ty, pt)) {
+ if (operand_is_ptr or isByRef(err_union_ty, zcu)) {
const err_field_ptr =
try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, "");
// TODO add alignment to this load
@@ -6059,10 +6062,10 @@ pub const FuncGen = struct {
const offset = try errUnionPayloadOffset(payload_ty, pt);
if (operand_is_ptr) {
return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
- } else if (isByRef(err_union_ty, pt)) {
+ } else if (isByRef(err_union_ty, zcu)) {
const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
- const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
- if (isByRef(payload_ty, pt)) {
+ const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
+ if (isByRef(payload_ty, zcu)) {
if (can_elide_load)
return payload_ptr;
@@ -6140,7 +6143,7 @@ pub const FuncGen = struct {
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
@@ -6156,7 +6159,7 @@ pub const FuncGen = struct {
// would have been emitted already. Also the main loop in genBody can
// be while(true) instead of for(body), which will eliminate 1 branch on
// a hot path.
- if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) {
+ if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(zcu)) {
_ = try self.wip.br(loop_block);
}
return .none;
@@ -6165,15 +6168,15 @@ pub const FuncGen = struct {
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
- const array_ty = operand_ty.childType(mod);
+ const array_ty = operand_ty.childType(zcu);
const llvm_usize = try o.lowerType(Type.usize);
- const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod));
+ const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(zcu));
const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
const operand = try self.resolveInst(ty_op.operand);
- if (!array_ty.hasRuntimeBitsIgnoreComptime(pt))
+ if (!array_ty.hasRuntimeBitsIgnoreComptime(zcu))
return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, "");
const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{
try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0),
@@ -6184,17 +6187,17 @@ pub const FuncGen = struct {
fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const workaround_operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const operand_scalar_ty = operand_ty.scalarType(mod);
- const is_signed_int = operand_scalar_ty.isSignedInt(mod);
+ const operand_scalar_ty = operand_ty.scalarType(zcu);
+ const is_signed_int = operand_scalar_ty.isSignedInt(zcu);
const operand = o: {
// Work around LLVM bug. See https://github.com/ziglang/zig/issues/17381.
- const bit_size = operand_scalar_ty.bitSize(pt);
+ const bit_size = operand_scalar_ty.bitSize(zcu);
for ([_]u8{ 8, 16, 32, 64, 128 }) |b| {
if (bit_size < b) {
break :o try self.wip.cast(
@@ -6211,9 +6214,9 @@ pub const FuncGen = struct {
};
const dest_ty = self.typeOfIndex(inst);
- const dest_scalar_ty = dest_ty.scalarType(mod);
+ const dest_scalar_ty = dest_ty.scalarType(zcu);
const dest_llvm_ty = try o.lowerType(dest_ty);
- const target = mod.getTarget();
+ const target = zcu.getTarget();
if (intrinsicsAllowed(dest_scalar_ty, target)) return self.wip.conv(
if (is_signed_int) .signed else .unsigned,
@@ -6222,7 +6225,7 @@ pub const FuncGen = struct {
"",
);
- const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(pt)));
+ const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(zcu)));
const rt_int_ty = try o.builder.intType(rt_int_bits);
var extended = try self.wip.conv(
if (is_signed_int) .signed else .unsigned,
@@ -6269,29 +6272,29 @@ pub const FuncGen = struct {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const target = mod.getTarget();
+ const zcu = pt.zcu;
+ const target = zcu.getTarget();
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const operand_scalar_ty = operand_ty.scalarType(mod);
+ const operand_scalar_ty = operand_ty.scalarType(zcu);
const dest_ty = self.typeOfIndex(inst);
- const dest_scalar_ty = dest_ty.scalarType(mod);
+ const dest_scalar_ty = dest_ty.scalarType(zcu);
const dest_llvm_ty = try o.lowerType(dest_ty);
if (intrinsicsAllowed(operand_scalar_ty, target)) {
// TODO set fast math flag
return self.wip.conv(
- if (dest_scalar_ty.isSignedInt(mod)) .signed else .unsigned,
+ if (dest_scalar_ty.isSignedInt(zcu)) .signed else .unsigned,
operand,
dest_llvm_ty,
"",
);
}
- const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(pt)));
+ const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(zcu)));
const ret_ty = try o.builder.intType(rt_int_bits);
const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
@@ -6303,7 +6306,7 @@ pub const FuncGen = struct {
const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits);
const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits);
- const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns";
+ const sign_prefix = if (dest_scalar_ty.isSignedInt(zcu)) "" else "uns";
const fn_name = try o.builder.strtabStringFmt("__fix{s}{s}f{s}i", .{
sign_prefix,
@@ -6330,29 +6333,29 @@ pub const FuncGen = struct {
fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
const o = fg.ng.object;
- const mod = o.pt.zcu;
- return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr;
+ const zcu = o.pt.zcu;
+ return if (ty.isSlice(zcu)) fg.wip.extractValue(ptr, &.{0}, "") else ptr;
}
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
const o = fg.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const llvm_usize = try o.lowerType(Type.usize);
- switch (ty.ptrSize(mod)) {
+ switch (ty.ptrSize(zcu)) {
.Slice => {
const len = try fg.wip.extractValue(ptr, &.{1}, "");
- const elem_ty = ty.childType(mod);
- const abi_size = elem_ty.abiSize(pt);
+ const elem_ty = ty.childType(zcu);
+ const abi_size = elem_ty.abiSize(zcu);
if (abi_size == 1) return len;
const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size);
return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, "");
},
.One => {
- const array_ty = ty.childType(mod);
- const elem_ty = array_ty.childType(mod);
- const abi_size = elem_ty.abiSize(pt);
- return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size);
+ const array_ty = ty.childType(zcu);
+ const elem_ty = array_ty.childType(zcu);
+ const abi_size = elem_ty.abiSize(zcu);
+ return o.builder.intValue(llvm_usize, array_ty.arrayLen(zcu) * abi_size);
},
.Many, .C => unreachable,
}
@@ -6366,11 +6369,11 @@ pub const FuncGen = struct {
fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const slice_ptr = try self.resolveInst(ty_op.operand);
const slice_ptr_ty = self.typeOf(ty_op.operand);
- const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(mod));
+ const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(zcu));
return self.wip.gepStruct(slice_llvm_ty, slice_ptr, index, "");
}
@@ -6378,21 +6381,21 @@ pub const FuncGen = struct {
fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const elem_ty = slice_ty.childType(mod);
+ const elem_ty = slice_ty.childType(zcu);
const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
- if (isByRef(elem_ty, pt)) {
+ if (isByRef(elem_ty, zcu)) {
if (self.canElideLoad(body_tail))
return ptr;
- const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
+ const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
}
@@ -6401,14 +6404,14 @@ pub const FuncGen = struct {
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(mod));
+ const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(zcu));
const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
}
@@ -6416,7 +6419,7 @@ pub const FuncGen = struct {
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
@@ -6424,16 +6427,16 @@ pub const FuncGen = struct {
const array_llvm_val = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const array_llvm_ty = try o.lowerType(array_ty);
- const elem_ty = array_ty.childType(mod);
- if (isByRef(array_ty, pt)) {
+ const elem_ty = array_ty.childType(zcu);
+ if (isByRef(array_ty, zcu)) {
const indices: [2]Builder.Value = .{
try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs,
};
- if (isByRef(elem_ty, pt)) {
+ if (isByRef(elem_ty, zcu)) {
const elem_ptr =
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
if (canElideLoad(self, body_tail)) return elem_ptr;
- const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
+ const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
} else {
const elem_ptr =
@@ -6449,23 +6452,23 @@ pub const FuncGen = struct {
fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType(mod);
+ const elem_ty = ptr_ty.childType(zcu);
const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
// TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch
- const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod))
+ const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(zcu))
// If this is a single-item pointer to an array, we need another index in the GEP.
&.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs }
else
&.{rhs}, "");
- if (isByRef(elem_ty, pt)) {
+ if (isByRef(elem_ty, zcu)) {
if (self.canElideLoad(body_tail)) return ptr;
- const elem_alignment = elem_ty.abiAlignment(pt).toLlvm();
+ const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
}
@@ -6475,21 +6478,21 @@ pub const FuncGen = struct {
fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType(mod);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return self.resolveInst(bin_op.lhs);
+ const elem_ty = ptr_ty.childType(zcu);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return self.resolveInst(bin_op.lhs);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const elem_ptr = ty_pl.ty.toType();
- if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr;
+ if (elem_ptr.ptrInfo(zcu).flags.vector_index != .none) return base_ptr;
const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
- return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod))
+ return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(zcu))
// If this is a single-item pointer to an array, we need another index in the GEP.
&.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs }
else
@@ -6518,35 +6521,35 @@ pub const FuncGen = struct {
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.typeOf(struct_field.struct_operand);
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
- const field_ty = struct_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+ const field_ty = struct_ty.structFieldType(field_index, zcu);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
- if (!isByRef(struct_ty, pt)) {
- assert(!isByRef(field_ty, pt));
- switch (struct_ty.zigTypeTag(mod)) {
- .Struct => switch (struct_ty.containerLayout(mod)) {
+ if (!isByRef(struct_ty, zcu)) {
+ assert(!isByRef(field_ty, zcu));
+ switch (struct_ty.zigTypeTag(zcu)) {
+ .Struct => switch (struct_ty.containerLayout(zcu)) {
.@"packed" => {
- const struct_type = mod.typeToStruct(struct_ty).?;
+ const struct_type = zcu.typeToStruct(struct_ty).?;
const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index);
const containing_int = struct_llvm_val;
const shift_amt =
try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(field_ty);
- if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+ if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) {
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
- } else if (field_ty.isPtrAtRuntime(mod)) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+ } else if (field_ty.isPtrAtRuntime(zcu)) {
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -6559,16 +6562,16 @@ pub const FuncGen = struct {
},
},
.Union => {
- assert(struct_ty.containerLayout(mod) == .@"packed");
+ assert(struct_ty.containerLayout(zcu) == .@"packed");
const containing_int = struct_llvm_val;
const elem_llvm_ty = try o.lowerType(field_ty);
- if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+ if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) {
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, containing_int, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
- } else if (field_ty.isPtrAtRuntime(mod)) {
- const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
+ } else if (field_ty.isPtrAtRuntime(zcu)) {
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, containing_int, same_size_int, "");
return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -6579,20 +6582,20 @@ pub const FuncGen = struct {
}
}
- switch (struct_ty.zigTypeTag(mod)) {
+ switch (struct_ty.zigTypeTag(zcu)) {
.Struct => {
- const layout = struct_ty.containerLayout(mod);
+ const layout = struct_ty.containerLayout(zcu);
assert(layout != .@"packed");
const struct_llvm_ty = try o.lowerType(struct_ty);
const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?;
const field_ptr =
try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
- const alignment = struct_ty.structFieldAlign(field_index, pt);
+ const alignment = struct_ty.structFieldAlign(field_index, zcu);
const field_ptr_ty = try pt.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{ .alignment = alignment },
});
- if (isByRef(field_ty, pt)) {
+ if (isByRef(field_ty, zcu)) {
if (canElideLoad(self, body_tail))
return field_ptr;
@@ -6605,12 +6608,12 @@ pub const FuncGen = struct {
},
.Union => {
const union_llvm_ty = try o.lowerType(struct_ty);
- const layout = struct_ty.unionGetLayout(pt);
+ const layout = struct_ty.unionGetLayout(zcu);
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const field_ptr =
try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
const payload_alignment = layout.payload_align.toLlvm();
- if (isByRef(field_ty, pt)) {
+ if (isByRef(field_ty, zcu)) {
if (canElideLoad(self, body_tail)) return field_ptr;
return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
} else {
@@ -6624,14 +6627,14 @@ pub const FuncGen = struct {
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try self.resolveInst(extra.field_ptr);
- const parent_ty = ty_pl.ty.toType().childType(mod);
- const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
+ const parent_ty = ty_pl.ty.toType().childType(zcu);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
if (field_offset == 0) return field_ptr;
const res_ty = try o.lowerType(ty_pl.ty.toType());
@@ -6686,7 +6689,7 @@ pub const FuncGen = struct {
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
@@ -6697,7 +6700,7 @@ pub const FuncGen = struct {
self.file,
self.scope,
self.prev_dbg_line,
- try o.lowerDebugType(ptr_ty.childType(mod)),
+ try o.lowerDebugType(ptr_ty.childType(zcu)),
);
_ = try self.wip.callIntrinsic(
@@ -6741,9 +6744,9 @@ pub const FuncGen = struct {
try o.lowerDebugType(operand_ty),
);
- const pt = o.pt;
+ const zcu = o.pt.zcu;
const owner_mod = self.ng.ownerModule();
- if (isByRef(operand_ty, pt)) {
+ if (isByRef(operand_ty, zcu)) {
_ = try self.wip.callIntrinsic(
.normal,
.none,
@@ -6760,7 +6763,7 @@ pub const FuncGen = struct {
// We avoid taking this path for naked functions because there's no guarantee that such
// functions even have a valid stack pointer, making the `alloca` + `store` unsafe.
- const alignment = operand_ty.abiAlignment(pt).toLlvm();
+ const alignment = operand_ty.abiAlignment(zcu).toLlvm();
const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, operand, alloca, alignment);
_ = try self.wip.callIntrinsic(
@@ -6832,8 +6835,8 @@ pub const FuncGen = struct {
// if so, the element type itself.
const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count);
const pt = o.pt;
- const mod = pt.zcu;
- const target = mod.getTarget();
+ const zcu = pt.zcu;
+ const target = zcu.getTarget();
var llvm_ret_i: usize = 0;
var llvm_param_i: usize = 0;
@@ -6860,8 +6863,8 @@ pub const FuncGen = struct {
if (output != .none) {
const output_inst = try self.resolveInst(output);
const output_ty = self.typeOf(output);
- assert(output_ty.zigTypeTag(mod) == .Pointer);
- const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(mod));
+ assert(output_ty.zigTypeTag(zcu) == .Pointer);
+ const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(zcu));
switch (constraint[0]) {
'=' => {},
@@ -6932,13 +6935,13 @@ pub const FuncGen = struct {
const arg_llvm_value = try self.resolveInst(input);
const arg_ty = self.typeOf(input);
- const is_by_ref = isByRef(arg_ty, pt);
+ const is_by_ref = isByRef(arg_ty, zcu);
if (is_by_ref) {
if (constraintAllowsMemory(constraint)) {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
- const alignment = arg_ty.abiAlignment(pt).toLlvm();
+ const alignment = arg_ty.abiAlignment(zcu).toLlvm();
const arg_llvm_ty = try o.lowerType(arg_ty);
const load_inst =
try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, "");
@@ -6950,7 +6953,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
- const alignment = arg_ty.abiAlignment(pt).toLlvm();
+ const alignment = arg_ty.abiAlignment(zcu).toLlvm();
const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment);
llvm_param_values[llvm_param_i] = arg_ptr;
@@ -6978,7 +6981,7 @@ pub const FuncGen = struct {
// In the case of indirect inputs, LLVM requires the callsite to have
// an elementtype(<ty>) attribute.
llvm_param_attrs[llvm_param_i] = if (constraint[0] == '*')
- try o.lowerPtrElemTy(if (is_by_ref) arg_ty else arg_ty.childType(mod))
+ try o.lowerPtrElemTy(if (is_by_ref) arg_ty else arg_ty.childType(zcu))
else
.none;
@@ -6997,12 +7000,12 @@ pub const FuncGen = struct {
if (constraint[0] != '+') continue;
const rw_ty = self.typeOf(output);
- const llvm_elem_ty = try o.lowerPtrElemTy(rw_ty.childType(mod));
+ const llvm_elem_ty = try o.lowerPtrElemTy(rw_ty.childType(zcu));
if (is_indirect) {
llvm_param_values[llvm_param_i] = llvm_rw_val;
llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip);
} else {
- const alignment = rw_ty.abiAlignment(pt).toLlvm();
+ const alignment = rw_ty.abiAlignment(zcu).toLlvm();
const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, "");
llvm_param_values[llvm_param_i] = loaded;
llvm_param_types[llvm_param_i] = llvm_elem_ty;
@@ -7163,7 +7166,7 @@ pub const FuncGen = struct {
const output_ptr = try self.resolveInst(output);
const output_ptr_ty = self.typeOf(output);
- const alignment = output_ptr_ty.ptrAlignment(pt).toLlvm();
+ const alignment = output_ptr_ty.ptrAlignment(zcu).toLlvm();
_ = try self.wip.store(.normal, output_value, output_ptr, alignment);
} else {
ret_val = output_value;
@@ -7182,23 +7185,23 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.typeOf(un_op);
- const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+ const optional_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
const optional_llvm_ty = try o.lowerType(optional_ty);
- const payload_ty = optional_ty.optionalChild(mod);
- if (optional_ty.optionalReprIsPayload(mod)) {
+ const payload_ty = optional_ty.optionalChild(zcu);
+ if (optional_ty.optionalReprIsPayload(zcu)) {
const loaded = if (operand_is_ptr)
try self.wip.load(.normal, optional_llvm_ty, operand, .default, "")
else
operand;
- if (payload_ty.isSlice(mod)) {
+ if (payload_ty.isSlice(zcu)) {
const slice_ptr = try self.wip.extractValue(loaded, &.{0}, "");
const ptr_ty = try o.builder.ptrType(toLlvmAddressSpace(
- payload_ty.ptrAddressSpace(mod),
- mod.getTarget(),
+ payload_ty.ptrAddressSpace(zcu),
+ zcu.getTarget(),
));
return self.wip.icmp(cond, slice_ptr, try o.builder.nullValue(ptr_ty), "");
}
@@ -7207,7 +7210,7 @@ pub const FuncGen = struct {
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const loaded = if (operand_is_ptr)
try self.wip.load(.normal, optional_llvm_ty, operand, .default, "")
else
@@ -7215,7 +7218,7 @@ pub const FuncGen = struct {
return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), "");
}
- const is_by_ref = operand_is_ptr or isByRef(optional_ty, pt);
+ const is_by_ref = operand_is_ptr or isByRef(optional_ty, zcu);
return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref);
}
@@ -7227,16 +7230,16 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.typeOf(un_op);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const error_type = try o.errorIntType();
const zero = try o.builder.intValue(error_type, 0);
- if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
const val: Builder.Constant = switch (cond) {
.eq => .true, // 0 == 0
.ne => .false, // 0 != 0
@@ -7245,7 +7248,7 @@ pub const FuncGen = struct {
return val.toValue();
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const loaded = if (operand_is_ptr)
try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "")
else
@@ -7255,7 +7258,7 @@ pub const FuncGen = struct {
const err_field_index = try errUnionErrorOffset(payload_ty, pt);
- const loaded = if (operand_is_ptr or isByRef(err_union_ty, pt)) loaded: {
+ const loaded = if (operand_is_ptr or isByRef(err_union_ty, zcu)) loaded: {
const err_union_llvm_ty = try o.lowerType(err_union_ty);
const err_field_ptr =
try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, "");
@@ -7267,17 +7270,17 @@ pub const FuncGen = struct {
fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.typeOf(ty_op.operand).childType(mod);
- const payload_ty = optional_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const optional_ty = self.typeOf(ty_op.operand).childType(zcu);
+ const payload_ty = optional_ty.optionalChild(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
return operand;
}
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
// The payload and the optional are the same value.
return operand;
}
@@ -7289,18 +7292,18 @@ pub const FuncGen = struct {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.typeOf(ty_op.operand).childType(mod);
- const payload_ty = optional_ty.optionalChild(mod);
+ const optional_ty = self.typeOf(ty_op.operand).childType(zcu);
+ const payload_ty = optional_ty.optionalChild(zcu);
const non_null_bit = try o.builder.intValue(.i8, 1);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
_ = try self.wip.store(.normal, non_null_bit, operand, .default);
return operand;
}
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
// The payload and the optional are the same value.
// Setting to non-null will be done when the payload is set.
return operand;
@@ -7321,21 +7324,21 @@ pub const FuncGen = struct {
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand);
const payload_ty = self.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
// Payload value is the same as the optional value.
return operand;
}
const opt_llvm_ty = try o.lowerType(optional_ty);
- const can_elide_load = if (isByRef(payload_ty, pt)) self.canElideLoad(body_tail) else false;
+ const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load);
}
@@ -7346,26 +7349,26 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
const result_ty = self.typeOfIndex(inst);
- const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty;
+ const payload_ty = if (operand_is_ptr) result_ty.childType(zcu) else result_ty;
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return if (operand_is_ptr) operand else .none;
}
const offset = try errUnionPayloadOffset(payload_ty, pt);
const err_union_llvm_ty = try o.lowerType(err_union_ty);
if (operand_is_ptr) {
return self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
- } else if (isByRef(err_union_ty, pt)) {
- const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
+ } else if (isByRef(err_union_ty, zcu)) {
+ const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
- if (isByRef(payload_ty, pt)) {
+ if (isByRef(payload_ty, zcu)) {
if (self.canElideLoad(body_tail)) return payload_ptr;
return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal);
}
@@ -7382,13 +7385,13 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
const error_type = try o.errorIntType();
- const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
- if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
+ if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
if (operand_is_ptr) {
return operand;
} else {
@@ -7396,15 +7399,15 @@ pub const FuncGen = struct {
}
}
- const payload_ty = err_union_ty.errorUnionPayload(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
if (!operand_is_ptr) return operand;
return self.wip.load(.normal, error_type, operand, .default, "");
}
const offset = try errUnionErrorOffset(payload_ty, pt);
- if (operand_is_ptr or isByRef(err_union_ty, pt)) {
+ if (operand_is_ptr or isByRef(err_union_ty, zcu)) {
const err_union_llvm_ty = try o.lowerType(err_union_ty);
const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
return self.wip.load(.normal, error_type, err_field_ptr, .default, "");
@@ -7416,21 +7419,21 @@ pub const FuncGen = struct {
fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const err_union_ty = self.typeOf(ty_op.operand).childType(mod);
+ const err_union_ty = self.typeOf(ty_op.operand).childType(zcu);
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const non_error_val = try o.builder.intValue(try o.errorIntType(), 0);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
_ = try self.wip.store(.normal, non_error_val, operand, .default);
return operand;
}
const err_union_llvm_ty = try o.lowerType(err_union_ty);
{
const err_int_ty = try pt.errorIntType();
- const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
+ const error_alignment = err_int_ty.abiAlignment(zcu).toLlvm();
const error_offset = try errUnionErrorOffset(payload_ty, pt);
// First set the non-error value.
const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, "");
@@ -7457,7 +7460,7 @@ pub const FuncGen = struct {
fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_ty = ty_pl.ty.toType();
@@ -7468,8 +7471,8 @@ pub const FuncGen = struct {
assert(self.err_ret_trace != .none);
const field_ptr =
try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, "");
- const field_alignment = struct_ty.structFieldAlign(field_index, pt);
- const field_ty = struct_ty.structFieldType(field_index, mod);
+ const field_alignment = struct_ty.structFieldAlign(field_index, zcu);
+ const field_ty = struct_ty.structFieldType(field_index, zcu);
const field_ptr_ty = try pt.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{ .alignment = field_alignment },
@@ -7503,23 +7506,23 @@ pub const FuncGen = struct {
fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const payload_ty = self.typeOf(ty_op.operand);
const non_null_bit = try o.builder.intValue(.i8, 1);
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return non_null_bit;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOfIndex(inst);
- if (optional_ty.optionalReprIsPayload(mod)) return operand;
+ if (optional_ty.optionalReprIsPayload(zcu)) return operand;
const llvm_optional_ty = try o.lowerType(optional_ty);
- if (isByRef(optional_ty, pt)) {
+ if (isByRef(optional_ty, zcu)) {
const directReturn = self.isNextRet(body_tail);
const optional_ptr = if (directReturn)
self.ret_ptr
else brk: {
- const alignment = optional_ty.abiAlignment(pt).toLlvm();
+ const alignment = optional_ty.abiAlignment(zcu).toLlvm();
const optional_ptr = try self.buildAllocaWorkaround(optional_ty, alignment);
break :brk optional_ptr;
};
@@ -7537,12 +7540,13 @@ pub const FuncGen = struct {
fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_un_ty = self.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
const payload_ty = self.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return operand;
}
const ok_err_code = try o.builder.intValue(try o.errorIntType(), 0);
@@ -7550,19 +7554,19 @@ pub const FuncGen = struct {
const payload_offset = try errUnionPayloadOffset(payload_ty, pt);
const error_offset = try errUnionErrorOffset(payload_ty, pt);
- if (isByRef(err_un_ty, pt)) {
+ if (isByRef(err_un_ty, zcu)) {
const directReturn = self.isNextRet(body_tail);
const result_ptr = if (directReturn)
self.ret_ptr
else brk: {
- const alignment = err_un_ty.abiAlignment(pt).toLlvm();
+ const alignment = err_un_ty.abiAlignment(pt.zcu).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
break :brk result_ptr;
};
const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
const err_int_ty = try pt.errorIntType();
- const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
+ const error_alignment = err_int_ty.abiAlignment(pt.zcu).toLlvm();
_ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment);
const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
const payload_ptr_ty = try pt.singleMutPtrType(payload_ty);
@@ -7578,30 +7582,30 @@ pub const FuncGen = struct {
fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_un_ty = self.typeOfIndex(inst);
- const payload_ty = err_un_ty.errorUnionPayload(mod);
+ const payload_ty = err_un_ty.errorUnionPayload(zcu);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return operand;
const err_un_llvm_ty = try o.lowerType(err_un_ty);
const payload_offset = try errUnionPayloadOffset(payload_ty, pt);
const error_offset = try errUnionErrorOffset(payload_ty, pt);
- if (isByRef(err_un_ty, pt)) {
+ if (isByRef(err_un_ty, zcu)) {
const directReturn = self.isNextRet(body_tail);
const result_ptr = if (directReturn)
self.ret_ptr
else brk: {
- const alignment = err_un_ty.abiAlignment(pt).toLlvm();
+ const alignment = err_un_ty.abiAlignment(zcu).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
break :brk result_ptr;
};
const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
const err_int_ty = try pt.errorIntType();
- const error_alignment = err_int_ty.abiAlignment(pt).toLlvm();
+ const error_alignment = err_int_ty.abiAlignment(zcu).toLlvm();
_ = try self.wip.store(.normal, operand, err_ptr, error_alignment);
const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
const payload_ptr_ty = try pt.singleMutPtrType(payload_ty);
@@ -7639,7 +7643,7 @@ pub const FuncGen = struct {
fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
@@ -7649,9 +7653,9 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(extra.rhs);
const access_kind: Builder.MemoryAccessKind =
- if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
- const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod));
- const alignment = vector_ptr_ty.ptrAlignment(pt).toLlvm();
+ if (vector_ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
+ const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(zcu));
+ const alignment = vector_ptr_ty.ptrAlignment(zcu).toLlvm();
const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, "");
const new_vector = try self.wip.insertElement(loaded, operand, index, "");
@@ -7661,18 +7665,18 @@ pub const FuncGen = struct {
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, .normal, inst_ty, 2, .{ lhs, rhs });
return self.wip.callIntrinsic(
.normal,
.none,
- if (scalar_ty.isSignedInt(mod)) .smin else .umin,
+ if (scalar_ty.isSignedInt(zcu)) .smin else .umin,
&.{try o.lowerType(inst_ty)},
&.{ lhs, rhs },
"",
@@ -7681,18 +7685,18 @@ pub const FuncGen = struct {
fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, .normal, inst_ty, 2, .{ lhs, rhs });
return self.wip.callIntrinsic(
.normal,
.none,
- if (scalar_ty.isSignedInt(mod)) .smax else .umax,
+ if (scalar_ty.isSignedInt(zcu)) .smax else .umax,
&.{try o.lowerType(inst_ty)},
&.{ lhs, rhs },
"",
@@ -7711,15 +7715,15 @@ pub const FuncGen = struct {
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, fast, inst_ty, 2, .{ lhs, rhs });
- return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"add nsw" else .@"add nuw", lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .@"add nsw" else .@"add nuw", lhs, rhs, "");
}
fn airSafeArithmetic(
@@ -7729,15 +7733,15 @@ pub const FuncGen = struct {
unsigned_intrinsic: Builder.Intrinsic,
) !Builder.Value {
const o = fg.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try fg.resolveInst(bin_op.lhs);
const rhs = try fg.resolveInst(bin_op.rhs);
const inst_ty = fg.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
- const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic;
+ const intrinsic = if (scalar_ty.isSignedInt(zcu)) signed_intrinsic else unsigned_intrinsic;
const llvm_inst_ty = try o.lowerType(inst_ty);
const results =
try fg.wip.callIntrinsic(.normal, .none, intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, "");
@@ -7777,18 +7781,18 @@ pub const FuncGen = struct {
fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{});
return self.wip.callIntrinsic(
.normal,
.none,
- if (scalar_ty.isSignedInt(mod)) .@"sadd.sat" else .@"uadd.sat",
+ if (scalar_ty.isSignedInt(zcu)) .@"sadd.sat" else .@"uadd.sat",
&.{try o.lowerType(inst_ty)},
&.{ lhs, rhs },
"",
@@ -7797,15 +7801,15 @@ pub const FuncGen = struct {
fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, fast, inst_ty, 2, .{ lhs, rhs });
- return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"sub nsw" else .@"sub nuw", lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .@"sub nsw" else .@"sub nuw", lhs, rhs, "");
}
fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -7818,18 +7822,18 @@ pub const FuncGen = struct {
fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
return self.wip.callIntrinsic(
.normal,
.none,
- if (scalar_ty.isSignedInt(mod)) .@"ssub.sat" else .@"usub.sat",
+ if (scalar_ty.isSignedInt(zcu)) .@"ssub.sat" else .@"usub.sat",
&.{try o.lowerType(inst_ty)},
&.{ lhs, rhs },
"",
@@ -7838,15 +7842,15 @@ pub const FuncGen = struct {
fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, fast, inst_ty, 2, .{ lhs, rhs });
- return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"mul nsw" else .@"mul nuw", lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .@"mul nsw" else .@"mul nuw", lhs, rhs, "");
}
fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -7859,18 +7863,18 @@ pub const FuncGen = struct {
fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
return self.wip.callIntrinsic(
.normal,
.none,
- if (scalar_ty.isSignedInt(mod)) .@"smul.fix.sat" else .@"umul.fix.sat",
+ if (scalar_ty.isSignedInt(zcu)) .@"smul.fix.sat" else .@"umul.fix.sat",
&.{try o.lowerType(inst_ty)},
&.{ lhs, rhs, .@"0" },
"",
@@ -7888,34 +7892,34 @@ pub const FuncGen = struct {
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isRuntimeFloat()) {
const result = try self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs });
return self.buildFloatOp(.trunc, fast, inst_ty, 1, .{result});
}
- return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .sdiv else .udiv, lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(zcu)) .sdiv else .udiv, lhs, rhs, "");
}
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isRuntimeFloat()) {
const result = try self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs });
return self.buildFloatOp(.floor, fast, inst_ty, 1, .{result});
}
- if (scalar_ty.isSignedInt(mod)) {
+ if (scalar_ty.isSignedInt(zcu)) {
const inst_llvm_ty = try o.lowerType(inst_ty);
const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst(
inst_llvm_ty.scalarType(&o.builder),
@@ -7936,16 +7940,16 @@ pub const FuncGen = struct {
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs });
return self.wip.bin(
- if (scalar_ty.isSignedInt(mod)) .@"sdiv exact" else .@"udiv exact",
+ if (scalar_ty.isSignedInt(zcu)) .@"sdiv exact" else .@"udiv exact",
lhs,
rhs,
"",
@@ -7954,16 +7958,16 @@ pub const FuncGen = struct {
fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isRuntimeFloat())
return self.buildFloatOp(.fmod, fast, inst_ty, 2, .{ lhs, rhs });
- return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ return self.wip.bin(if (scalar_ty.isSignedInt(zcu))
.srem
else
.urem, lhs, rhs, "");
@@ -7971,13 +7975,13 @@ pub const FuncGen = struct {
fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
const inst_llvm_ty = try o.lowerType(inst_ty);
- const scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = inst_ty.scalarType(zcu);
if (scalar_ty.isRuntimeFloat()) {
const a = try self.buildFloatOp(.fmod, fast, inst_ty, 2, .{ lhs, rhs });
@@ -7987,7 +7991,7 @@ pub const FuncGen = struct {
const ltz = try self.buildFloatCmp(fast, .lt, inst_ty, .{ lhs, zero });
return self.wip.select(fast, ltz, c, a, "");
}
- if (scalar_ty.isSignedInt(mod)) {
+ if (scalar_ty.isSignedInt(zcu)) {
const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst(
inst_llvm_ty.scalarType(&o.builder),
inst_llvm_ty.scalarBits(&o.builder) - 1,
@@ -8007,14 +8011,14 @@ pub const FuncGen = struct {
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const ptr_ty = self.typeOf(bin_op.lhs);
- const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod));
- switch (ptr_ty.ptrSize(mod)) {
+ const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(zcu));
+ switch (ptr_ty.ptrSize(zcu)) {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
.One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{
try o.builder.intValue(try o.lowerType(Type.usize), 0), offset,
@@ -8029,15 +8033,15 @@ pub const FuncGen = struct {
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const negative_offset = try self.wip.neg(offset, "");
const ptr_ty = self.typeOf(bin_op.lhs);
- const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod));
- switch (ptr_ty.ptrSize(mod)) {
+ const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(zcu));
+ switch (ptr_ty.ptrSize(zcu)) {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
.One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{
try o.builder.intValue(try o.lowerType(Type.usize), 0), negative_offset,
@@ -8058,7 +8062,7 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -8066,10 +8070,10 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.typeOf(extra.lhs);
- const scalar_ty = lhs_ty.scalarType(mod);
+ const scalar_ty = lhs_ty.scalarType(zcu);
const inst_ty = self.typeOfIndex(inst);
- const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic;
+ const intrinsic = if (scalar_ty.isSignedInt(zcu)) signed_intrinsic else unsigned_intrinsic;
const llvm_inst_ty = try o.lowerType(inst_ty);
const llvm_lhs_ty = try o.lowerType(lhs_ty);
const results =
@@ -8081,8 +8085,8 @@ pub const FuncGen = struct {
const result_index = o.llvmFieldIndex(inst_ty, 0).?;
const overflow_index = o.llvmFieldIndex(inst_ty, 1).?;
- if (isByRef(inst_ty, pt)) {
- const result_alignment = inst_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(inst_ty, zcu)) {
+ const result_alignment = inst_ty.abiAlignment(zcu).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(inst_ty, result_alignment);
{
const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, "");
@@ -8165,9 +8169,9 @@ pub const FuncGen = struct {
params: [2]Builder.Value,
) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
- const target = mod.getTarget();
- const scalar_ty = ty.scalarType(mod);
+ const zcu = o.pt.zcu;
+ const target = zcu.getTarget();
+ const scalar_ty = ty.scalarType(zcu);
const scalar_llvm_ty = try o.lowerType(scalar_ty);
if (intrinsicsAllowed(scalar_ty, target)) {
@@ -8205,8 +8209,8 @@ pub const FuncGen = struct {
.gte => .sge,
};
- if (ty.zigTypeTag(mod) == .Vector) {
- const vec_len = ty.vectorLen(mod);
+ if (ty.zigTypeTag(zcu) == .Vector) {
+ const vec_len = ty.vectorLen(zcu);
const vector_result_ty = try o.builder.vectorType(.normal, vec_len, .i32);
const init = try o.builder.poisonValue(vector_result_ty);
@@ -8271,9 +8275,9 @@ pub const FuncGen = struct {
params: [params_len]Builder.Value,
) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
- const target = mod.getTarget();
- const scalar_ty = ty.scalarType(mod);
+ const zcu = o.pt.zcu;
+ const target = zcu.getTarget();
+ const scalar_ty = ty.scalarType(zcu);
const llvm_ty = try o.lowerType(ty);
if (op != .tan and intrinsicsAllowed(scalar_ty, target)) switch (op) {
@@ -8382,9 +8386,9 @@ pub const FuncGen = struct {
([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len],
scalar_llvm_ty,
);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
const result = try o.builder.poisonValue(llvm_ty);
- return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen(mod));
+ return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen(zcu));
}
return self.wip.call(
@@ -8413,7 +8417,7 @@ pub const FuncGen = struct {
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -8421,7 +8425,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.typeOf(extra.lhs);
- const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const dest_ty = self.typeOfIndex(inst);
const llvm_dest_ty = try o.lowerType(dest_ty);
@@ -8429,7 +8433,7 @@ pub const FuncGen = struct {
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
const result = try self.wip.bin(.shl, lhs, casted_rhs, "");
- const reconstructed = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod))
+ const reconstructed = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(zcu))
.ashr
else
.lshr, result, casted_rhs, "");
@@ -8439,8 +8443,8 @@ pub const FuncGen = struct {
const result_index = o.llvmFieldIndex(dest_ty, 0).?;
const overflow_index = o.llvmFieldIndex(dest_ty, 1).?;
- if (isByRef(dest_ty, pt)) {
- const result_alignment = dest_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(dest_ty, zcu)) {
+ const result_alignment = dest_ty.abiAlignment(zcu).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(dest_ty, result_alignment);
{
const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
@@ -8483,17 +8487,17 @@ pub const FuncGen = struct {
fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
- const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
- return self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod))
+ return self.wip.bin(if (lhs_scalar_ty.isSignedInt(zcu))
.@"shl nsw"
else
.@"shl nuw", lhs, casted_rhs, "");
@@ -8515,15 +8519,15 @@ pub const FuncGen = struct {
fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
- const lhs_scalar_ty = lhs_ty.scalarType(mod);
- const lhs_bits = lhs_scalar_ty.bitSize(pt);
+ const lhs_scalar_ty = lhs_ty.scalarType(zcu);
+ const lhs_bits = lhs_scalar_ty.bitSize(zcu);
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
@@ -8532,7 +8536,7 @@ pub const FuncGen = struct {
const result = try self.wip.callIntrinsic(
.normal,
.none,
- if (lhs_scalar_ty.isSignedInt(mod)) .@"sshl.sat" else .@"ushl.sat",
+ if (lhs_scalar_ty.isSignedInt(zcu)) .@"sshl.sat" else .@"ushl.sat",
&.{llvm_lhs_ty},
&.{ lhs, casted_rhs },
"",
@@ -8557,17 +8561,17 @@ pub const FuncGen = struct {
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
- const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
- const is_signed_int = lhs_scalar_ty.isSignedInt(mod);
+ const is_signed_int = lhs_scalar_ty.isSignedInt(zcu);
return self.wip.bin(if (is_exact)
if (is_signed_int) .@"ashr exact" else .@"lshr exact"
@@ -8576,13 +8580,13 @@ pub const FuncGen = struct {
fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
- switch (scalar_ty.zigTypeTag(mod)) {
+ switch (scalar_ty.zigTypeTag(zcu)) {
.Int => return self.wip.callIntrinsic(
.normal,
.none,
@@ -8598,13 +8602,13 @@ pub const FuncGen = struct {
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dest_ty = self.typeOfIndex(inst);
const dest_llvm_ty = try o.lowerType(dest_ty);
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const operand_info = operand_ty.intInfo(mod);
+ const operand_info = operand_ty.intInfo(zcu);
return self.wip.conv(switch (operand_info.signedness) {
.signed => .signed,
@@ -8622,12 +8626,12 @@ pub const FuncGen = struct {
fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
- const target = mod.getTarget();
+ const target = zcu.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
@@ -8656,12 +8660,12 @@ pub const FuncGen = struct {
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
- const target = mod.getTarget();
+ const target = zcu.getTarget();
if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) {
return self.wip.cast(.fpext, operand, try o.lowerType(dest_ty), "");
@@ -8669,18 +8673,18 @@ pub const FuncGen = struct {
const operand_llvm_ty = try o.lowerType(operand_ty);
const dest_llvm_ty = try o.lowerType(dest_ty);
- const dest_bits = dest_ty.scalarType(mod).floatBits(target);
- const src_bits = operand_ty.scalarType(mod).floatBits(target);
+ const dest_bits = dest_ty.scalarType(zcu).floatBits(target);
+ const src_bits = operand_ty.scalarType(zcu).floatBits(target);
const fn_name = try o.builder.strtabStringFmt("__extend{s}f{s}f2", .{
compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits),
});
const libc_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty);
- if (dest_ty.isVector(mod)) return self.buildElementwiseCall(
+ if (dest_ty.isVector(zcu)) return self.buildElementwiseCall(
libc_fn,
&.{operand},
try o.builder.poisonValue(dest_llvm_ty),
- dest_ty.vectorLen(mod),
+ dest_ty.vectorLen(zcu),
);
return self.wip.call(
.normal,
@@ -8715,9 +8719,9 @@ pub const FuncGen = struct {
fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const operand_is_ref = isByRef(operand_ty, pt);
- const result_is_ref = isByRef(inst_ty, pt);
+ const zcu = pt.zcu;
+ const operand_is_ref = isByRef(operand_ty, zcu);
+ const result_is_ref = isByRef(inst_ty, zcu);
const llvm_dest_ty = try o.lowerType(inst_ty);
if (operand_is_ref and result_is_ref) {
@@ -8731,18 +8735,18 @@ pub const FuncGen = struct {
return self.wip.conv(.unsigned, operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) {
+ if (operand_ty.zigTypeTag(zcu) == .Int and inst_ty.isPtrAtRuntime(zcu)) {
return self.wip.cast(.inttoptr, operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) {
- const elem_ty = operand_ty.childType(mod);
+ if (operand_ty.zigTypeTag(zcu) == .Vector and inst_ty.zigTypeTag(zcu) == .Array) {
+ const elem_ty = operand_ty.childType(zcu);
if (!result_is_ref) {
return self.ng.todo("implement bitcast vector to non-ref array", .{});
}
- const alignment = inst_ty.abiAlignment(pt).toLlvm();
+ const alignment = inst_ty.abiAlignment(zcu).toLlvm();
const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
- const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8;
+ const bitcast_ok = elem_ty.bitSize(zcu) == elem_ty.abiSize(zcu) * 8;
if (bitcast_ok) {
_ = try self.wip.store(.normal, operand, array_ptr, alignment);
} else {
@@ -8750,7 +8754,7 @@ pub const FuncGen = struct {
// a simple bitcast will not work, and we fall back to extractelement.
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
- const vector_len = operand_ty.arrayLen(mod);
+ const vector_len = operand_ty.arrayLen(zcu);
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
const elem_ptr = try self.wip.gep(.inbounds, llvm_dest_ty, array_ptr, &.{
@@ -8762,16 +8766,16 @@ pub const FuncGen = struct {
}
}
return array_ptr;
- } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) {
- const elem_ty = operand_ty.childType(mod);
+ } else if (operand_ty.zigTypeTag(zcu) == .Array and inst_ty.zigTypeTag(zcu) == .Vector) {
+ const elem_ty = operand_ty.childType(zcu);
const llvm_vector_ty = try o.lowerType(inst_ty);
if (!operand_is_ref) return self.ng.todo("implement bitcast non-ref array to vector", .{});
- const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8;
+ const bitcast_ok = elem_ty.bitSize(zcu) == elem_ty.abiSize(zcu) * 8;
if (bitcast_ok) {
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
- const alignment = elem_ty.abiAlignment(pt).toLlvm();
+ const alignment = elem_ty.abiAlignment(zcu).toLlvm();
return self.wip.load(.normal, llvm_vector_ty, operand, alignment, "");
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8780,7 +8784,7 @@ pub const FuncGen = struct {
const elem_llvm_ty = try o.lowerType(elem_ty);
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
- const vector_len = operand_ty.arrayLen(mod);
+ const vector_len = operand_ty.arrayLen(zcu);
var vector = try o.builder.poisonValue(llvm_vector_ty);
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
@@ -8796,25 +8800,25 @@ pub const FuncGen = struct {
}
if (operand_is_ref) {
- const alignment = operand_ty.abiAlignment(pt).toLlvm();
+ const alignment = operand_ty.abiAlignment(zcu).toLlvm();
return self.wip.load(.normal, llvm_dest_ty, operand, alignment, "");
}
if (result_is_ref) {
- const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm();
+ const alignment = operand_ty.abiAlignment(zcu).max(inst_ty.abiAlignment(zcu)).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
return result_ptr;
}
if (llvm_dest_ty.isStruct(&o.builder) or
- ((operand_ty.zigTypeTag(mod) == .Vector or inst_ty.zigTypeTag(mod) == .Vector) and
- operand_ty.bitSize(pt) != inst_ty.bitSize(pt)))
+ ((operand_ty.zigTypeTag(zcu) == .Vector or inst_ty.zigTypeTag(zcu) == .Vector) and
+ operand_ty.bitSize(zcu) != inst_ty.bitSize(zcu)))
{
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values or vectors with padding bits.
// Therefore, we store operand to alloca, then load for result.
- const alignment = operand_ty.abiAlignment(pt).max(inst_ty.abiAlignment(pt)).toLlvm();
+ const alignment = operand_ty.abiAlignment(zcu).max(inst_ty.abiAlignment(zcu)).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
@@ -8868,7 +8872,7 @@ pub const FuncGen = struct {
};
const mod = self.ng.ownerModule();
- if (isByRef(inst_ty, pt)) {
+ if (isByRef(inst_ty, zcu)) {
_ = try self.wip.callIntrinsic(
.normal,
.none,
@@ -8882,7 +8886,7 @@ pub const FuncGen = struct {
"",
);
} else if (mod.optimize_mode == .Debug) {
- const alignment = inst_ty.abiAlignment(pt).toLlvm();
+ const alignment = inst_ty.abiAlignment(zcu).toLlvm();
const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_val, alloca, alignment);
_ = try self.wip.callIntrinsic(
@@ -8919,28 +8923,28 @@ pub const FuncGen = struct {
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
- const pointee_type = ptr_ty.childType(mod);
- if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(pt))
+ const pointee_type = ptr_ty.childType(zcu);
+ if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
//const pointee_llvm_ty = try o.lowerType(pointee_type);
- const alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+ const alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
return self.buildAllocaWorkaround(pointee_type, alignment);
}
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
- const ret_ty = ptr_ty.childType(mod);
- if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt))
+ const ret_ty = ptr_ty.childType(zcu);
+ if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
if (self.ret_ptr != .none) return self.ret_ptr;
//const ret_llvm_ty = try o.lowerType(ret_ty);
- const alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+ const alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
return self.buildAllocaWorkaround(ret_ty, alignment);
}
@@ -8962,19 +8966,19 @@ pub const FuncGen = struct {
alignment: Builder.Alignment,
) Allocator.Error!Builder.Value {
const o = self.ng.object;
- return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment);
+ return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt.zcu), .i8), alignment);
}
fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
- const operand_ty = ptr_ty.childType(mod);
+ const operand_ty = ptr_ty.childType(zcu);
- const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false;
+ const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(zcu) else false;
if (val_is_undef) {
const owner_mod = self.ng.ownerModule();
@@ -8991,7 +8995,7 @@ pub const FuncGen = struct {
return .none;
}
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
const needs_bitmask = (ptr_info.packed_offset.host_size != 0);
if (needs_bitmask) {
// TODO: only some bits are to be undef, we cannot write with a simple memset.
@@ -9000,13 +9004,13 @@ pub const FuncGen = struct {
return .none;
}
- const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(pt));
+ const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(zcu));
_ = try self.wip.callMemSet(
dest_ptr,
- ptr_ty.ptrAlignment(pt).toLlvm(),
+ ptr_ty.ptrAlignment(zcu).toLlvm(),
if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8),
len,
- if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
+ if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal,
);
if (safety and owner_mod.valgrind) {
try self.valgrindMarkUndef(dest_ptr, len);
@@ -9027,8 +9031,8 @@ pub const FuncGen = struct {
/// The first instruction of `body_tail` is the one whose copy we want to elide.
fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
const o = fg.ng.object;
- const mod = o.pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = o.pt.zcu;
+ const ip = &zcu.intern_pool;
for (body_tail[1..]) |body_inst| {
switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) {
.none => continue,
@@ -9044,15 +9048,15 @@ pub const FuncGen = struct {
fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = fg.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = fg.typeOf(ty_op.operand);
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
- if (!isByRef(Type.fromInterned(ptr_info.child), pt)) break :elide;
+ if (!isByRef(Type.fromInterned(ptr_info.child), zcu)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
}
@@ -9105,34 +9109,34 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr = try self.resolveInst(extra.ptr);
const ptr_ty = self.typeOf(extra.ptr);
var expected_value = try self.resolveInst(extra.expected_value);
var new_value = try self.resolveInst(extra.new_value);
- const operand_ty = ptr_ty.childType(mod);
+ const operand_ty = ptr_ty.childType(zcu);
const llvm_operand_ty = try o.lowerType(operand_ty);
const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
if (llvm_abi_ty != .none) {
// operand needs widening and truncating
const signedness: Builder.Function.Instruction.Cast.Signedness =
- if (operand_ty.isSignedInt(mod)) .signed else .unsigned;
+ if (operand_ty.isSignedInt(zcu)) .signed else .unsigned;
expected_value = try self.wip.conv(signedness, expected_value, llvm_abi_ty, "");
new_value = try self.wip.conv(signedness, new_value, llvm_abi_ty, "");
}
const result = try self.wip.cmpxchg(
kind,
- if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
+ if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal,
ptr,
expected_value,
new_value,
self.sync_scope,
toLlvmAtomicOrdering(extra.successOrder()),
toLlvmAtomicOrdering(extra.failureOrder()),
- ptr_ty.ptrAlignment(pt).toLlvm(),
+ ptr_ty.ptrAlignment(zcu).toLlvm(),
"",
);
@@ -9142,7 +9146,7 @@ pub const FuncGen = struct {
if (llvm_abi_ty != .none) payload = try self.wip.cast(.trunc, payload, llvm_operand_ty, "");
const success_bit = try self.wip.extractValue(result, &.{1}, "");
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
const zero = try o.builder.zeroInitValue(payload.typeOfWip(&self.wip));
return self.wip.select(.normal, success_bit, zero, payload, "");
}
@@ -9156,14 +9160,14 @@ pub const FuncGen = struct {
fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
const ptr_ty = self.typeOf(pl_op.operand);
- const operand_ty = ptr_ty.childType(mod);
+ const operand_ty = ptr_ty.childType(zcu);
const operand = try self.resolveInst(extra.operand);
- const is_signed_int = operand_ty.isSignedInt(mod);
+ const is_signed_int = operand_ty.isSignedInt(zcu);
const is_float = operand_ty.isRuntimeFloat();
const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
const ordering = toLlvmAtomicOrdering(extra.ordering());
@@ -9171,8 +9175,8 @@ pub const FuncGen = struct {
const llvm_operand_ty = try o.lowerType(operand_ty);
const access_kind: Builder.MemoryAccessKind =
- if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
- const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+ if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
+ const ptr_alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
if (llvm_abi_ty != .none) {
// operand needs widening and truncating or bitcasting.
@@ -9220,19 +9224,19 @@ pub const FuncGen = struct {
fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.typeOf(atomic_load.ptr);
- const info = ptr_ty.ptrInfo(mod);
+ const info = ptr_ty.ptrInfo(zcu);
const elem_ty = Type.fromInterned(info.child);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false);
const ptr_alignment = (if (info.flags.alignment != .none)
@as(InternPool.Alignment, info.flags.alignment)
else
- Type.fromInterned(info.child).abiAlignment(pt)).toLlvm();
+ Type.fromInterned(info.child).abiAlignment(zcu)).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9268,11 +9272,11 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
- const operand_ty = ptr_ty.childType(mod);
- if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return .none;
+ const operand_ty = ptr_ty.childType(zcu);
+ if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .none;
const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
@@ -9280,7 +9284,7 @@ pub const FuncGen = struct {
if (llvm_abi_ty != .none) {
// operand needs widening
element = try self.wip.conv(
- if (operand_ty.isSignedInt(mod)) .signed else .unsigned,
+ if (operand_ty.isSignedInt(zcu)) .signed else .unsigned,
element,
llvm_abi_ty,
"",
@@ -9293,26 +9297,26 @@ pub const FuncGen = struct {
fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = self.typeOf(bin_op.rhs);
- const dest_ptr_align = ptr_ty.ptrAlignment(pt).toLlvm();
+ const dest_ptr_align = ptr_ty.ptrAlignment(zcu).toLlvm();
const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty);
const access_kind: Builder.MemoryAccessKind =
- if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
+ if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
// Any WebAssembly runtime will trap when the destination pointer is out-of-bounds, regardless
// of the length. This means we need to emit a check where we skip the memset when the length
// is 0 as we allow for undefined pointers in 0-sized slices.
// This logic can be removed once https://github.com/ziglang/zig/issues/16360 is done.
const intrinsic_len0_traps = o.target.isWasm() and
- ptr_ty.isSlice(mod) and
+ ptr_ty.isSlice(zcu) and
std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory);
if (try self.air.value(bin_op.rhs, pt)) |elem_val| {
- if (elem_val.isUndefDeep(mod)) {
+ if (elem_val.isUndefDeep(zcu)) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
// 0xaa or actual undefined for the fill byte.
@@ -9350,7 +9354,7 @@ pub const FuncGen = struct {
}
const value = try self.resolveInst(bin_op.rhs);
- const elem_abi_size = elem_ty.abiSize(pt);
+ const elem_abi_size = elem_ty.abiSize(zcu);
if (elem_abi_size == 1) {
// In this case we can take advantage of LLVM's intrinsic.
@@ -9387,9 +9391,9 @@ pub const FuncGen = struct {
const end_block = try self.wip.block(1, "InlineMemsetEnd");
const llvm_usize_ty = try o.lowerType(Type.usize);
- const len = switch (ptr_ty.ptrSize(mod)) {
+ const len = switch (ptr_ty.ptrSize(zcu)) {
.Slice => try self.wip.extractValue(dest_slice, &.{1}, ""),
- .One => try o.builder.intValue(llvm_usize_ty, ptr_ty.childType(mod).arrayLen(mod)),
+ .One => try o.builder.intValue(llvm_usize_ty, ptr_ty.childType(zcu).arrayLen(zcu)),
.Many, .C => unreachable,
};
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9402,9 +9406,9 @@ pub const FuncGen = struct {
_ = try self.wip.brCond(end, body_block, end_block);
self.wip.cursor = .{ .block = body_block };
- const elem_abi_align = elem_ty.abiAlignment(pt);
+ const elem_abi_align = elem_ty.abiAlignment(zcu);
const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm();
- if (isByRef(elem_ty, pt)) {
+ if (isByRef(elem_ty, zcu)) {
_ = try self.wip.callMemCpy(
it_ptr.toValue(),
it_ptr_align,
@@ -9447,7 +9451,7 @@ pub const FuncGen = struct {
fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
const dest_ptr_ty = self.typeOf(bin_op.lhs);
@@ -9456,8 +9460,8 @@ pub const FuncGen = struct {
const src_ptr = try self.sliceOrArrayPtr(src_slice, src_ptr_ty);
const len = try self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
const dest_ptr = try self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
- const access_kind: Builder.MemoryAccessKind = if (src_ptr_ty.isVolatilePtr(mod) or
- dest_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
+ const access_kind: Builder.MemoryAccessKind = if (src_ptr_ty.isVolatilePtr(zcu) or
+ dest_ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
// When bulk-memory is enabled, this will be lowered to WebAssembly's memory.copy instruction.
// This instruction will trap on an invalid address, regardless of the length.
@@ -9466,7 +9470,7 @@ pub const FuncGen = struct {
// This logic can be removed once https://github.com/ziglang/zig/issues/16360 is done.
if (o.target.isWasm() and
std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory) and
- dest_ptr_ty.isSlice(mod))
+ dest_ptr_ty.isSlice(zcu))
{
const usize_zero = try o.builder.intValue(try o.lowerType(Type.usize), 0);
const cond = try self.cmp(.normal, .neq, Type.usize, len, usize_zero);
@@ -9476,9 +9480,9 @@ pub const FuncGen = struct {
self.wip.cursor = .{ .block = memcpy_block };
_ = try self.wip.callMemCpy(
dest_ptr,
- dest_ptr_ty.ptrAlignment(pt).toLlvm(),
+ dest_ptr_ty.ptrAlignment(zcu).toLlvm(),
src_ptr,
- src_ptr_ty.ptrAlignment(pt).toLlvm(),
+ src_ptr_ty.ptrAlignment(zcu).toLlvm(),
len,
access_kind,
);
@@ -9489,9 +9493,9 @@ pub const FuncGen = struct {
_ = try self.wip.callMemCpy(
dest_ptr,
- dest_ptr_ty.ptrAlignment(pt).toLlvm(),
+ dest_ptr_ty.ptrAlignment(zcu).toLlvm(),
src_ptr,
- src_ptr_ty.ptrAlignment(pt).toLlvm(),
+ src_ptr_ty.ptrAlignment(zcu).toLlvm(),
len,
access_kind,
);
@@ -9501,10 +9505,10 @@ pub const FuncGen = struct {
fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const un_ty = self.typeOf(bin_op.lhs).childType(mod);
- const layout = un_ty.unionGetLayout(pt);
+ const un_ty = self.typeOf(bin_op.lhs).childType(zcu);
+ const layout = un_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none;
const union_ptr = try self.resolveInst(bin_op.lhs);
const new_tag = try self.resolveInst(bin_op.rhs);
@@ -9523,12 +9527,13 @@ pub const FuncGen = struct {
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = self.typeOf(ty_op.operand);
- const layout = un_ty.unionGetLayout(pt);
+ const layout = un_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return .none;
const union_handle = try self.resolveInst(ty_op.operand);
- if (isByRef(un_ty, pt)) {
+ if (isByRef(un_ty, zcu)) {
const llvm_un_ty = try o.lowerType(un_ty);
if (layout.payload_size == 0)
return self.wip.load(.normal, llvm_un_ty, union_handle, .default, "");
@@ -9597,10 +9602,10 @@ pub const FuncGen = struct {
fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
+ const zcu = o.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
- var bits = operand_ty.intInfo(mod).bits;
+ var bits = operand_ty.intInfo(zcu).bits;
assert(bits % 8 == 0);
const inst_ty = self.typeOfIndex(inst);
@@ -9611,8 +9616,8 @@ pub const FuncGen = struct {
// If not an even byte-multiple, we need zero-extend + shift-left 1 byte
// The truncated result at the end will be the correct bswap
const scalar_ty = try o.builder.intType(@intCast(bits + 8));
- if (operand_ty.zigTypeTag(mod) == .Vector) {
- const vec_len = operand_ty.vectorLen(mod);
+ if (operand_ty.zigTypeTag(zcu) == .Vector) {
+ const vec_len = operand_ty.vectorLen(zcu);
llvm_operand_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty);
} else llvm_operand_ty = scalar_ty;
@@ -9631,13 +9636,13 @@ pub const FuncGen = struct {
fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = o.pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const error_set_ty = ty_op.ty.toType();
- const names = error_set_ty.errorSetNames(mod);
+ const names = error_set_ty.errorSetNames(zcu);
const valid_block = try self.wip.block(@intCast(names.len), "Valid");
const invalid_block = try self.wip.block(1, "Invalid");
const end_block = try self.wip.block(2, "End");
@@ -9790,14 +9795,14 @@ pub const FuncGen = struct {
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
const b = try self.resolveInst(extra.b);
const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
- const a_len = self.typeOf(extra.a).vectorLen(mod);
+ const a_len = self.typeOf(extra.a).vectorLen(zcu);
// LLVM uses integers larger than the length of the first array to
// index into the second array. This was deemed unnecessarily fragile
@@ -9809,10 +9814,10 @@ pub const FuncGen = struct {
for (values, 0..) |*val, i| {
const elem = try mask.elemValue(pt, i);
- if (elem.isUndef(mod)) {
+ if (elem.isUndef(zcu)) {
val.* = try o.builder.undefConst(.i32);
} else {
- const int = elem.toSignedInt(pt);
+ const int = elem.toSignedInt(zcu);
const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len);
val.* = try o.builder.intConst(.i32, unsigned);
}
@@ -9899,8 +9904,8 @@ pub const FuncGen = struct {
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value {
const o = self.ng.object;
- const mod = o.pt.zcu;
- const target = mod.getTarget();
+ const zcu = o.pt.zcu;
+ const target = zcu.getTarget();
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const operand = try self.resolveInst(reduce.operand);
@@ -9916,13 +9921,13 @@ pub const FuncGen = struct {
.Xor => .@"vector.reduce.xor",
else => unreachable,
}, &.{llvm_operand_ty}, &.{operand}, ""),
- .Min, .Max => switch (scalar_ty.zigTypeTag(mod)) {
+ .Min, .Max => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
- .Min => if (scalar_ty.isSignedInt(mod))
+ .Min => if (scalar_ty.isSignedInt(zcu))
.@"vector.reduce.smin"
else
.@"vector.reduce.umin",
- .Max => if (scalar_ty.isSignedInt(mod))
+ .Max => if (scalar_ty.isSignedInt(zcu))
.@"vector.reduce.smax"
else
.@"vector.reduce.umax",
@@ -9936,7 +9941,7 @@ pub const FuncGen = struct {
}, &.{llvm_operand_ty}, &.{operand}, ""),
else => unreachable,
},
- .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
+ .Add, .Mul => switch (scalar_ty.zigTypeTag(zcu)) {
.Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
.Add => .@"vector.reduce.add",
.Mul => .@"vector.reduce.mul",
@@ -10004,21 +10009,21 @@ pub const FuncGen = struct {
))),
else => unreachable,
};
- return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_val);
+ return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(zcu), init_val);
}
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = self.typeOfIndex(inst);
- const len: usize = @intCast(result_ty.arrayLen(mod));
+ const len: usize = @intCast(result_ty.arrayLen(zcu));
const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
const llvm_result_ty = try o.lowerType(result_ty);
- switch (result_ty.zigTypeTag(mod)) {
+ switch (result_ty.zigTypeTag(zcu)) {
.Vector => {
var vector = try o.builder.poisonValue(llvm_result_ty);
for (elements, 0..) |elem, i| {
@@ -10029,21 +10034,21 @@ pub const FuncGen = struct {
return vector;
},
.Struct => {
- if (mod.typeToPackedStruct(result_ty)) |struct_type| {
+ if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
const backing_int_ty = struct_type.backingIntTypeUnordered(ip);
assert(backing_int_ty != .none);
- const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt);
+ const big_bits = Type.fromInterned(backing_int_ty).bitSize(zcu);
const int_ty = try o.builder.intType(@intCast(big_bits));
comptime assert(Type.packed_struct_layout_version == 2);
var running_int = try o.builder.intValue(int_ty, 0);
var running_bits: u16 = 0;
for (elements, struct_type.field_types.get(ip)) |elem, field_ty| {
- if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(zcu)) continue;
const non_int_val = try self.resolveInst(elem);
- const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(pt));
+ const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(zcu));
const small_int_ty = try o.builder.intType(ty_bit_size);
- const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(mod))
+ const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(zcu))
try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
@@ -10057,12 +10062,12 @@ pub const FuncGen = struct {
return running_int;
}
- assert(result_ty.containerLayout(mod) != .@"packed");
+ assert(result_ty.containerLayout(zcu) != .@"packed");
- if (isByRef(result_ty, pt)) {
+ if (isByRef(result_ty, zcu)) {
// TODO in debug builds init to undef so that the padding will be 0xaa
// even if we fully populate the fields.
- const alignment = result_ty.abiAlignment(pt).toLlvm();
+ const alignment = result_ty.abiAlignment(zcu).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
for (elements, 0..) |elem, i| {
@@ -10075,7 +10080,7 @@ pub const FuncGen = struct {
const field_ptr_ty = try pt.ptrType(.{
.child = self.typeOf(elem).toIntern(),
.flags = .{
- .alignment = result_ty.structFieldAlign(i, pt),
+ .alignment = result_ty.structFieldAlign(i, zcu),
},
});
try self.store(field_ptr, field_ptr_ty, llvm_elem, .none);
@@ -10095,14 +10100,14 @@ pub const FuncGen = struct {
}
},
.Array => {
- assert(isByRef(result_ty, pt));
+ assert(isByRef(result_ty, zcu));
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
- const alignment = result_ty.abiAlignment(pt).toLlvm();
+ const alignment = result_ty.abiAlignment(zcu).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
- const array_info = result_ty.arrayInfo(mod);
+ const array_info = result_ty.arrayInfo(zcu);
const elem_ptr_ty = try pt.ptrType(.{
.child = array_info.elem_type.toIntern(),
});
@@ -10131,22 +10136,22 @@ pub const FuncGen = struct {
fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = self.typeOfIndex(inst);
const union_llvm_ty = try o.lowerType(union_ty);
- const layout = union_ty.unionGetLayout(pt);
- const union_obj = mod.typeToUnion(union_ty).?;
+ const layout = union_ty.unionGetLayout(zcu);
+ const union_obj = zcu.typeToUnion(union_ty).?;
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
- const big_bits = union_ty.bitSize(pt);
+ const big_bits = union_ty.bitSize(zcu);
const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const non_int_val = try self.resolveInst(extra.init);
- const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(pt)));
- const small_int_val = if (field_ty.isPtrAtRuntime(mod))
+ const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
+ const small_int_val = if (field_ty.isPtrAtRuntime(zcu))
try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
@@ -10154,9 +10159,9 @@ pub const FuncGen = struct {
}
const tag_int_val = blk: {
- const tag_ty = union_ty.unionTagTypeHypothetical(mod);
+ const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
- const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
+ const enum_field_index = tag_ty.enumFieldIndex(union_field_name, zcu).?;
const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
break :blk try tag_val.intFromEnum(tag_ty, pt);
};
@@ -10164,12 +10169,12 @@ pub const FuncGen = struct {
if (layout.tag_size == 0) {
return .none;
}
- assert(!isByRef(union_ty, pt));
+ assert(!isByRef(union_ty, zcu));
var big_int_space: Value.BigIntSpace = undefined;
- const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt);
+ const tag_big_int = tag_int_val.toBigInt(&big_int_space, zcu);
return try o.builder.bigIntValue(union_llvm_ty, tag_big_int);
}
- assert(isByRef(union_ty, pt));
+ assert(isByRef(union_ty, zcu));
// The llvm type of the alloca will be the named LLVM union type, and will not
// necessarily match the format that we need, depending on which tag is active.
// We must construct the correct unnamed struct type here, in order to then set
@@ -10179,14 +10184,14 @@ pub const FuncGen = struct {
const llvm_payload = try self.resolveInst(extra.init);
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_llvm_ty = try o.lowerType(field_ty);
- const field_size = field_ty.abiSize(pt);
- const field_align = pt.unionFieldNormalAlignment(union_obj, extra.field_index);
+ const field_size = field_ty.abiSize(zcu);
+ const field_align = Type.unionFieldNormalAlignment(union_obj, extra.field_index, zcu);
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
const llvm_union_ty = t: {
const payload_ty = p: {
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const padding_len = layout.payload_size;
break :p try o.builder.arrayType(padding_len, .i8);
}
@@ -10242,9 +10247,9 @@ pub const FuncGen = struct {
const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
var big_int_space: Value.BigIntSpace = undefined;
- const tag_big_int = tag_int_val.toBigInt(&big_int_space, pt);
+ const tag_big_int = tag_int_val.toBigInt(&big_int_space, zcu);
const llvm_tag = try o.builder.bigIntValue(tag_ty, tag_big_int);
- const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(pt).toLlvm();
+ const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(zcu).toLlvm();
_ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
}
@@ -10270,8 +10275,8 @@ pub const FuncGen = struct {
// by the target.
// To work around this, don't emit llvm.prefetch in this case.
// See https://bugs.llvm.org/show_bug.cgi?id=21037
- const mod = o.pt.zcu;
- const target = mod.getTarget();
+ const zcu = o.pt.zcu;
+ const target = zcu.getTarget();
switch (prefetch.cache) {
.instruction => switch (target.cpu.arch) {
.x86_64,
@@ -10397,7 +10402,7 @@ pub const FuncGen = struct {
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
variable_index.setAlignment(
- Type.slice_const_u8_sentinel_0.abiAlignment(pt).toLlvm(),
+ Type.slice_const_u8_sentinel_0.abiAlignment(pt.zcu).toLlvm(),
&o.builder,
);
@@ -10436,15 +10441,15 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = fg.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const payload_ty = opt_ty.optionalChild(mod);
+ const zcu = pt.zcu;
+ const payload_ty = opt_ty.optionalChild(zcu);
- if (isByRef(opt_ty, pt)) {
+ if (isByRef(opt_ty, zcu)) {
// We have a pointer and we need to return a pointer to the first field.
const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, "");
- const payload_alignment = payload_ty.abiAlignment(pt).toLlvm();
- if (isByRef(payload_ty, pt)) {
+ const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
+ if (isByRef(payload_ty, zcu)) {
if (can_elide_load)
return payload_ptr;
@@ -10453,7 +10458,7 @@ pub const FuncGen = struct {
return fg.loadTruncate(.normal, payload_ty, payload_ptr, payload_alignment);
}
- assert(!isByRef(payload_ty, pt));
+ assert(!isByRef(payload_ty, zcu));
return fg.wip.extractValue(opt_handle, &.{0}, "");
}
@@ -10465,11 +10470,12 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
+ const zcu = pt.zcu;
const optional_llvm_ty = try o.lowerType(optional_ty);
const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, "");
- if (isByRef(optional_ty, pt)) {
- const payload_alignment = optional_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(optional_ty, zcu)) {
+ const payload_alignment = optional_ty.abiAlignment(pt.zcu).toLlvm();
const alloca_inst = try self.buildAllocaWorkaround(optional_ty, payload_alignment);
{
@@ -10497,15 +10503,15 @@ pub const FuncGen = struct {
) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const struct_ty = struct_ptr_ty.childType(mod);
- switch (struct_ty.zigTypeTag(mod)) {
- .Struct => switch (struct_ty.containerLayout(mod)) {
+ const zcu = pt.zcu;
+ const struct_ty = struct_ptr_ty.childType(zcu);
+ switch (struct_ty.zigTypeTag(zcu)) {
+ .Struct => switch (struct_ty.containerLayout(zcu)) {
.@"packed" => {
const result_ty = self.typeOfIndex(inst);
- const result_ty_info = result_ty.ptrInfo(mod);
- const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
- const struct_type = mod.typeToStruct(struct_ty).?;
+ const result_ty_info = result_ty.ptrInfo(zcu);
+ const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
+ const struct_type = zcu.typeToStruct(struct_ty).?;
if (result_ty_info.packed_offset.host_size != 0) {
// From LLVM's perspective, a pointer to a packed struct and a pointer
@@ -10535,15 +10541,15 @@ pub const FuncGen = struct {
// the struct.
const llvm_index = try o.builder.intValue(
try o.lowerType(Type.usize),
- @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(pt)),
+ @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(zcu)),
);
return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, "");
}
},
},
.Union => {
- const layout = struct_ty.unionGetLayout(pt);
- if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr;
+ const layout = struct_ty.unionGetLayout(zcu);
+ if (layout.payload_size == 0 or struct_ty.containerLayout(zcu) == .@"packed") return struct_ptr;
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const union_llvm_ty = try o.lowerType(struct_ty);
return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, "");
@@ -10566,9 +10572,9 @@ pub const FuncGen = struct {
const o = fg.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const payload_llvm_ty = try o.lowerType(payload_ty);
- const abi_size = payload_ty.abiSize(pt);
+ const abi_size = payload_ty.abiSize(zcu);
// llvm bug workarounds:
const workaround_explicit_mask = o.target.cpu.arch == .powerpc and abi_size >= 4;
@@ -10580,7 +10586,7 @@ pub const FuncGen = struct {
return try fg.wip.load(access_kind, payload_llvm_ty, payload_ptr, payload_alignment, "");
}
- const load_llvm_ty = if (payload_ty.isAbiInt(mod))
+ const load_llvm_ty = if (payload_ty.isAbiInt(zcu))
try o.builder.intType(@intCast(abi_size * 8))
else
payload_llvm_ty;
@@ -10588,7 +10594,7 @@ pub const FuncGen = struct {
const shifted = if (payload_llvm_ty != load_llvm_ty and o.target.cpu.arch.endian() == .big)
try fg.wip.bin(.lshr, loaded, try o.builder.intValue(
load_llvm_ty,
- (payload_ty.abiSize(pt) - (std.math.divCeil(u64, payload_ty.bitSize(pt), 8) catch unreachable)) * 8,
+ (payload_ty.abiSize(zcu) - (std.math.divCeil(u64, payload_ty.bitSize(zcu), 8) catch unreachable)) * 8,
), "")
else
loaded;
@@ -10614,9 +10620,10 @@ pub const FuncGen = struct {
const o = fg.ng.object;
const pt = o.pt;
//const pointee_llvm_ty = try o.lowerType(pointee_type);
- const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm();
+ const result_align = InternPool.Alignment.fromLlvm(ptr_alignment)
+ .max(pointee_type.abiAlignment(pt.zcu)).toLlvm();
const result_ptr = try fg.buildAllocaWorkaround(pointee_type, result_align);
- const size_bytes = pointee_type.abiSize(pt);
+ const size_bytes = pointee_type.abiSize(pt.zcu);
_ = try fg.wip.callMemCpy(
result_ptr,
result_align,
@@ -10634,15 +10641,15 @@ pub const FuncGen = struct {
fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const info = ptr_ty.ptrInfo(mod);
+ const zcu = pt.zcu;
+ const info = ptr_ty.ptrInfo(zcu);
const elem_ty = Type.fromInterned(info.child);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
const ptr_alignment = (if (info.flags.alignment != .none)
@as(InternPool.Alignment, info.flags.alignment)
else
- elem_ty.abiAlignment(pt)).toLlvm();
+ elem_ty.abiAlignment(zcu)).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
@@ -10658,7 +10665,7 @@ pub const FuncGen = struct {
}
if (info.packed_offset.host_size == 0) {
- if (isByRef(elem_ty, pt)) {
+ if (isByRef(elem_ty, zcu)) {
return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind);
}
return self.loadTruncate(access_kind, elem_ty, ptr, ptr_alignment);
@@ -10668,13 +10675,13 @@ pub const FuncGen = struct {
const containing_int =
try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, "");
- const elem_bits = ptr_ty.childType(mod).bitSize(pt);
+ const elem_bits = ptr_ty.childType(zcu).bitSize(zcu);
const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset);
const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(elem_ty);
- if (isByRef(elem_ty, pt)) {
- const result_align = elem_ty.abiAlignment(pt).toLlvm();
+ if (isByRef(elem_ty, zcu)) {
+ const result_align = elem_ty.abiAlignment(zcu).toLlvm();
const result_ptr = try self.buildAllocaWorkaround(elem_ty, result_align);
const same_size_int = try o.builder.intType(@intCast(elem_bits));
@@ -10683,13 +10690,13 @@ pub const FuncGen = struct {
return result_ptr;
}
- if (elem_ty.zigTypeTag(mod) == .Float or elem_ty.zigTypeTag(mod) == .Vector) {
+ if (elem_ty.zigTypeTag(zcu) == .Float or elem_ty.zigTypeTag(zcu) == .Vector) {
const same_size_int = try o.builder.intType(@intCast(elem_bits));
const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
}
- if (elem_ty.isPtrAtRuntime(mod)) {
+ if (elem_ty.isPtrAtRuntime(zcu)) {
const same_size_int = try o.builder.intType(@intCast(elem_bits));
const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
@@ -10707,13 +10714,13 @@ pub const FuncGen = struct {
) !void {
const o = self.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const info = ptr_ty.ptrInfo(mod);
+ const zcu = pt.zcu;
+ const info = ptr_ty.ptrInfo(zcu);
const elem_ty = Type.fromInterned(info.child);
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
return;
}
- const ptr_alignment = ptr_ty.ptrAlignment(pt).toLlvm();
+ const ptr_alignment = ptr_ty.ptrAlignment(zcu).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
@@ -10737,12 +10744,12 @@ pub const FuncGen = struct {
assert(ordering == .none);
const containing_int =
try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, "");
- const elem_bits = ptr_ty.childType(mod).bitSize(pt);
+ const elem_bits = ptr_ty.childType(zcu).bitSize(zcu);
const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset);
// Convert to equally-sized integer type in order to perform the bit
// operations on the value to store
const value_bits_type = try o.builder.intType(@intCast(elem_bits));
- const value_bits = if (elem_ty.isPtrAtRuntime(mod))
+ const value_bits = if (elem_ty.isPtrAtRuntime(zcu))
try self.wip.cast(.ptrtoint, elem, value_bits_type, "")
else
try self.wip.cast(.bitcast, elem, value_bits_type, "");
@@ -10772,7 +10779,7 @@ pub const FuncGen = struct {
_ = try self.wip.store(access_kind, ored_value, ptr, ptr_alignment);
return;
}
- if (!isByRef(elem_ty, pt)) {
+ if (!isByRef(elem_ty, zcu)) {
_ = try self.wip.storeAtomic(
access_kind,
elem,
@@ -10788,8 +10795,8 @@ pub const FuncGen = struct {
ptr,
ptr_alignment,
elem,
- elem_ty.abiAlignment(pt).toLlvm(),
- try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(pt)),
+ elem_ty.abiAlignment(zcu).toLlvm(),
+ try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(zcu)),
access_kind,
);
}
@@ -10816,12 +10823,12 @@ pub const FuncGen = struct {
) Allocator.Error!Builder.Value {
const o = fg.ng.object;
const pt = o.pt;
- const mod = pt.zcu;
- const target = mod.getTarget();
+ const zcu = pt.zcu;
+ const target = zcu.getTarget();
if (!target_util.hasValgrindSupport(target)) return default_value;
const llvm_usize = try o.lowerType(Type.usize);
- const usize_alignment = Type.usize.abiAlignment(pt).toLlvm();
+ const usize_alignment = Type.usize.abiAlignment(zcu).toLlvm();
const array_llvm_ty = try o.builder.arrayType(6, llvm_usize);
const array_ptr = if (fg.valgrind_client_request_array == .none) a: {
@@ -10882,14 +10889,14 @@ pub const FuncGen = struct {
fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type {
const o = fg.ng.object;
- const mod = o.pt.zcu;
- return fg.air.typeOf(inst, &mod.intern_pool);
+ const zcu = o.pt.zcu;
+ return fg.air.typeOf(inst, &zcu.intern_pool);
}
fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type {
const o = fg.ng.object;
- const mod = o.pt.zcu;
- return fg.air.typeOfIndex(inst, &mod.intern_pool);
+ const zcu = o.pt.zcu;
+ return fg.air.typeOfIndex(inst, &zcu.intern_pool);
}
};
@@ -11059,12 +11066,12 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
};
}
-fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool {
- if (isByRef(ty, pt)) {
+fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
+ if (isByRef(ty, zcu)) {
return true;
} else if (target.cpu.arch.isX86() and
!std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
- ty.totalVectorBits(pt) >= 512)
+ ty.totalVectorBits(zcu) >= 512)
{
// As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
// "512-bit vector arguments require 'evex512' for AVX512"
@@ -11074,38 +11081,38 @@ fn returnTypeByRef(pt: Zcu.PerThread, target: std.Target, ty: Type) bool {
}
}
-fn firstParamSRet(fn_info: InternPool.Key.FuncType, pt: Zcu.PerThread, target: std.Target) bool {
+fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool {
const return_type = Type.fromInterned(fn_info.return_type);
- if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) return false;
+ if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
return switch (fn_info.cc) {
- .Unspecified, .Inline => returnTypeByRef(pt, target, return_type),
+ .Unspecified, .Inline => returnTypeByRef(zcu, target, return_type),
.C => switch (target.cpu.arch) {
.mips, .mipsel => false,
- .x86 => isByRef(return_type, pt),
+ .x86 => isByRef(return_type, zcu),
.x86_64 => switch (target.os.tag) {
- .windows => x86_64_abi.classifyWindows(return_type, pt) == .memory,
- else => firstParamSRetSystemV(return_type, pt, target),
+ .windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
+ else => firstParamSRetSystemV(return_type, zcu, target),
},
- .wasm32 => wasm_c_abi.classifyType(return_type, pt)[0] == .indirect,
- .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, pt) == .memory,
- .arm, .armeb => switch (arm_c_abi.classifyType(return_type, pt, .ret)) {
+ .wasm32 => wasm_c_abi.classifyType(return_type, zcu)[0] == .indirect,
+ .aarch64, .aarch64_be => aarch64_c_abi.classifyType(return_type, zcu) == .memory,
+ .arm, .armeb => switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
.memory, .i64_array => true,
.i32_array => |size| size != 1,
.byval => false,
},
- .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, pt) == .memory,
+ .riscv32, .riscv64 => riscv_c_abi.classifyType(return_type, zcu) == .memory,
else => false, // TODO investigate C ABI for other architectures
},
- .SysV => firstParamSRetSystemV(return_type, pt, target),
- .Win64 => x86_64_abi.classifyWindows(return_type, pt) == .memory,
- .Stdcall => !isScalar(pt.zcu, return_type),
+ .SysV => firstParamSRetSystemV(return_type, zcu, target),
+ .Win64 => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
+ .Stdcall => !isScalar(zcu, return_type),
else => false,
};
}
-fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
- const class = x86_64_abi.classifySystemV(ty, pt, target, .ret);
+fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
+ const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@@ -11116,62 +11123,62 @@ fn firstParamSRetSystemV(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
/// be effectively bitcasted to the actual return type.
fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const pt = o.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const return_type = Type.fromInterned(fn_info.return_type);
- if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
- return if (return_type.isError(mod)) try o.errorIntType() else .void;
+ return if (return_type.isError(zcu)) try o.errorIntType() else .void;
}
- const target = mod.getTarget();
+ const target = zcu.getTarget();
switch (fn_info.cc) {
.Unspecified,
.Inline,
- => return if (returnTypeByRef(pt, target, return_type)) .void else o.lowerType(return_type),
+ => return if (returnTypeByRef(zcu, target, return_type)) .void else o.lowerType(return_type),
.C => {
switch (target.cpu.arch) {
.mips, .mipsel => return o.lowerType(return_type),
- .x86 => return if (isByRef(return_type, pt)) .void else o.lowerType(return_type),
+ .x86 => return if (isByRef(return_type, zcu)) .void else o.lowerType(return_type),
.x86_64 => switch (target.os.tag) {
.windows => return lowerWin64FnRetTy(o, fn_info),
else => return lowerSystemVFnRetTy(o, fn_info),
},
.wasm32 => {
- if (isScalar(mod, return_type)) {
+ if (isScalar(zcu, return_type)) {
return o.lowerType(return_type);
}
- const classes = wasm_c_abi.classifyType(return_type, pt);
+ const classes = wasm_c_abi.classifyType(return_type, zcu);
if (classes[0] == .indirect or classes[0] == .none) {
return .void;
}
assert(classes[0] == .direct and classes[1] == .none);
- const scalar_type = wasm_c_abi.scalarType(return_type, pt);
- return o.builder.intType(@intCast(scalar_type.abiSize(pt) * 8));
+ const scalar_type = wasm_c_abi.scalarType(return_type, zcu);
+ return o.builder.intType(@intCast(scalar_type.abiSize(zcu) * 8));
},
.aarch64, .aarch64_be => {
- switch (aarch64_c_abi.classifyType(return_type, pt)) {
+ switch (aarch64_c_abi.classifyType(return_type, zcu)) {
.memory => return .void,
.float_array => return o.lowerType(return_type),
.byval => return o.lowerType(return_type),
- .integer => return o.builder.intType(@intCast(return_type.bitSize(pt))),
+ .integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
.double_integer => return o.builder.arrayType(2, .i64),
}
},
.arm, .armeb => {
- switch (arm_c_abi.classifyType(return_type, pt, .ret)) {
+ switch (arm_c_abi.classifyType(return_type, zcu, .ret)) {
.memory, .i64_array => return .void,
.i32_array => |len| return if (len == 1) .i32 else .void,
.byval => return o.lowerType(return_type),
}
},
.riscv32, .riscv64 => {
- switch (riscv_c_abi.classifyType(return_type, pt)) {
+ switch (riscv_c_abi.classifyType(return_type, zcu)) {
.memory => return .void,
.integer => {
- return o.builder.intType(@intCast(return_type.bitSize(pt)));
+ return o.builder.intType(@intCast(return_type.bitSize(zcu)));
},
.double_integer => {
return o.builder.structType(.normal, &.{ .i64, .i64 });
@@ -11180,9 +11187,9 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
.fields => {
var types_len: usize = 0;
var types: [8]Builder.Type = undefined;
- for (0..return_type.structFieldCount(mod)) |field_index| {
- const field_ty = return_type.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ for (0..return_type.structFieldCount(zcu)) |field_index| {
+ const field_ty = return_type.structFieldType(field_index, zcu);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
types[types_len] = try o.lowerType(field_ty);
types_len += 1;
}
@@ -11196,20 +11203,20 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
},
.Win64 => return lowerWin64FnRetTy(o, fn_info),
.SysV => return lowerSystemVFnRetTy(o, fn_info),
- .Stdcall => return if (isScalar(mod, return_type)) o.lowerType(return_type) else .void,
+ .Stdcall => return if (isScalar(zcu, return_type)) o.lowerType(return_type) else .void,
else => return o.lowerType(return_type),
}
}
fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
- const pt = o.pt;
+ const zcu = o.pt.zcu;
const return_type = Type.fromInterned(fn_info.return_type);
- switch (x86_64_abi.classifyWindows(return_type, pt)) {
+ switch (x86_64_abi.classifyWindows(return_type, zcu)) {
.integer => {
- if (isScalar(pt.zcu, return_type)) {
+ if (isScalar(zcu, return_type)) {
return o.lowerType(return_type);
} else {
- return o.builder.intType(@intCast(return_type.abiSize(pt) * 8));
+ return o.builder.intType(@intCast(return_type.abiSize(zcu) * 8));
}
},
.win_i128 => return o.builder.vectorType(.normal, 2, .i64),
@@ -11221,14 +11228,14 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err
fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const pt = o.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const return_type = Type.fromInterned(fn_info.return_type);
- if (isScalar(mod, return_type)) {
+ if (isScalar(zcu, return_type)) {
return o.lowerType(return_type);
}
- const target = mod.getTarget();
- const classes = x86_64_abi.classifySystemV(return_type, pt, target, .ret);
+ const target = zcu.getTarget();
+ const classes = x86_64_abi.classifySystemV(return_type, zcu, target, .ret);
if (classes[0] == .memory) return .void;
var types_index: u32 = 0;
var types_buffer: [8]Builder.Type = undefined;
@@ -11345,7 +11352,7 @@ const ParamTypeIterator = struct {
const zcu = pt.zcu;
const target = zcu.getTarget();
- if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
it.zig_index += 1;
return .no_bits;
}
@@ -11358,11 +11365,11 @@ const ParamTypeIterator = struct {
{
it.llvm_index += 1;
return .slice;
- } else if (isByRef(ty, pt)) {
+ } else if (isByRef(ty, zcu)) {
return .byref;
} else if (target.cpu.arch.isX86() and
!std.Target.x86.featureSetHas(target.cpu.features, .evex512) and
- ty.totalVectorBits(pt) >= 512)
+ ty.totalVectorBits(zcu) >= 512)
{
// As of LLVM 18, passing a vector byval with fastcc that is 512 bits or more returns
// "512-bit vector arguments require 'evex512' for AVX512"
@@ -11390,7 +11397,7 @@ const ParamTypeIterator = struct {
if (isScalar(zcu, ty)) {
return .byval;
}
- const classes = wasm_c_abi.classifyType(ty, pt);
+ const classes = wasm_c_abi.classifyType(ty, zcu);
if (classes[0] == .indirect) {
return .byref;
}
@@ -11399,7 +11406,7 @@ const ParamTypeIterator = struct {
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (aarch64_c_abi.classifyType(ty, pt)) {
+ switch (aarch64_c_abi.classifyType(ty, zcu)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
@@ -11414,7 +11421,7 @@ const ParamTypeIterator = struct {
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (arm_c_abi.classifyType(ty, pt, .arg)) {
+ switch (arm_c_abi.classifyType(ty, zcu, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
@@ -11429,7 +11436,7 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
if (ty.toIntern() == .f16_type and
!std.Target.riscv.featureSetHas(target.cpu.features, .d)) return .as_u16;
- switch (riscv_c_abi.classifyType(ty, pt)) {
+ switch (riscv_c_abi.classifyType(ty, zcu)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
@@ -11438,7 +11445,7 @@ const ParamTypeIterator = struct {
it.types_len = 0;
for (0..ty.structFieldCount(zcu)) |field_index| {
const field_ty = ty.structFieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
it.types_len += 1;
}
@@ -11476,10 +11483,10 @@ const ParamTypeIterator = struct {
}
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
- const pt = it.object.pt;
- switch (x86_64_abi.classifyWindows(ty, pt)) {
+ const zcu = it.object.pt.zcu;
+ switch (x86_64_abi.classifyWindows(ty, zcu)) {
.integer => {
- if (isScalar(pt.zcu, ty)) {
+ if (isScalar(zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -11509,17 +11516,17 @@ const ParamTypeIterator = struct {
}
fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
- const pt = it.object.pt;
- const ip = &pt.zcu.intern_pool;
- const target = pt.zcu.getTarget();
- const classes = x86_64_abi.classifySystemV(ty, pt, target, .arg);
+ const zcu = it.object.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
+ const classes = x86_64_abi.classifySystemV(ty, zcu, target, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
- if (isScalar(pt.zcu, ty)) {
+ if (isScalar(zcu, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -11620,17 +11627,17 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp
fn ccAbiPromoteInt(
cc: std.builtin.CallingConvention,
- mod: *Zcu,
+ zcu: *Zcu,
ty: Type,
) ?std.builtin.Signedness {
- const target = mod.getTarget();
+ const target = zcu.getTarget();
switch (cc) {
.Unspecified, .Inline, .Async => return null,
else => {},
}
- const int_info = switch (ty.zigTypeTag(mod)) {
- .Bool => Type.u1.intInfo(mod),
- .Int, .Enum, .ErrorSet => ty.intInfo(mod),
+ const int_info = switch (ty.zigTypeTag(zcu)) {
+ .Bool => Type.u1.intInfo(zcu),
+ .Int, .Enum, .ErrorSet => ty.intInfo(zcu),
else => return null,
};
return switch (target.os.tag) {
@@ -11668,13 +11675,13 @@ fn ccAbiPromoteInt(
/// This is the one source of truth for whether a type is passed around as an LLVM pointer,
/// or as an LLVM value.
-fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
+fn isByRef(ty: Type, zcu: *Zcu) bool {
// For tuples and structs, if there are more than this many non-void
// fields, then we make it byref, otherwise byval.
const max_fields_byval = 0;
- const ip = &pt.zcu.intern_pool;
+ const ip = &zcu.intern_pool;
- switch (ty.zigTypeTag(pt.zcu)) {
+ switch (ty.zigTypeTag(zcu)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -11697,17 +11704,17 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
.AnyFrame,
=> return false,
- .Array, .Frame => return ty.hasRuntimeBits(pt),
+ .Array, .Frame => return ty.hasRuntimeBits(zcu),
.Struct => {
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var count: usize = 0;
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
count += 1;
if (count > max_fields_byval) return true;
- if (isByRef(Type.fromInterned(field_ty), pt)) return true;
+ if (isByRef(Type.fromInterned(field_ty), zcu)) return true;
}
return false;
},
@@ -11725,27 +11732,27 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
count += 1;
if (count > max_fields_byval) return true;
const field_ty = Type.fromInterned(field_types[field_index]);
- if (isByRef(field_ty, pt)) return true;
+ if (isByRef(field_ty, zcu)) return true;
}
return false;
},
- .Union => switch (ty.containerLayout(pt.zcu)) {
+ .Union => switch (ty.containerLayout(zcu)) {
.@"packed" => return false,
- else => return ty.hasRuntimeBits(pt),
+ else => return ty.hasRuntimeBits(zcu),
},
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload(pt.zcu);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const payload_ty = ty.errorUnionPayload(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return false;
}
return true;
},
.Optional => {
- const payload_ty = ty.optionalChild(pt.zcu);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const payload_ty = ty.optionalChild(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return false;
}
- if (ty.optionalReprIsPayload(pt.zcu)) {
+ if (ty.optionalReprIsPayload(zcu)) {
return false;
}
return true;
@@ -11753,8 +11760,8 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool {
}
}
-fn isScalar(mod: *Zcu, ty: Type) bool {
- return switch (ty.zigTypeTag(mod)) {
+fn isScalar(zcu: *Zcu, ty: Type) bool {
+ return switch (ty.zigTypeTag(zcu)) {
.Void,
.Bool,
.NoReturn,
@@ -11768,8 +11775,8 @@ fn isScalar(mod: *Zcu, ty: Type) bool {
.Vector,
=> true,
- .Struct => ty.containerLayout(mod) == .@"packed",
- .Union => ty.containerLayout(mod) == .@"packed",
+ .Struct => ty.containerLayout(zcu) == .@"packed",
+ .Union => ty.containerLayout(zcu) == .@"packed",
else => false,
};
}
@@ -11892,13 +11899,15 @@ fn buildAllocaInner(
}
fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 {
+ const zcu = pt.zcu;
const err_int_ty = try pt.errorIntType();
- return @intFromBool(err_int_ty.abiAlignment(pt).compare(.gt, payload_ty.abiAlignment(pt)));
+ return @intFromBool(err_int_ty.abiAlignment(zcu).compare(.gt, payload_ty.abiAlignment(zcu)));
}
fn errUnionErrorOffset(payload_ty: Type, pt: Zcu.PerThread) !u1 {
+ const zcu = pt.zcu;
const err_int_ty = try pt.errorIntType();
- return @intFromBool(err_int_ty.abiAlignment(pt).compare(.lte, payload_ty.abiAlignment(pt)));
+ return @intFromBool(err_int_ty.abiAlignment(zcu).compare(.lte, payload_ty.abiAlignment(zcu)));
}
/// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index a89dd8f10b..44b48efc43 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -436,16 +436,16 @@ const NavGen = struct {
/// Fetch the result-id for a previously generated instruction or constant.
fn resolve(self: *NavGen, inst: Air.Inst.Ref) !IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
if (try self.air.value(inst, pt)) |val| {
const ty = self.typeOf(inst);
- if (ty.zigTypeTag(mod) == .Fn) {
- const fn_nav = switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ if (ty.zigTypeTag(zcu) == .Fn) {
+ const fn_nav = switch (zcu.intern_pool.indexToKey(val.ip_index)) {
.@"extern" => |@"extern"| @"extern".owner_nav,
.func => |func| func.owner_nav,
else => unreachable,
};
- const spv_decl_index = try self.object.resolveNav(mod, fn_nav);
+ const spv_decl_index = try self.object.resolveNav(zcu, fn_nav);
try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
return self.spv.declPtr(spv_decl_index).result_id;
}
@@ -459,8 +459,8 @@ const NavGen = struct {
fn resolveUav(self: *NavGen, val: InternPool.Index) !IdRef {
// TODO: This cannot be a function at this point, but it should probably be handled anyway.
- const mod = self.pt.zcu;
- const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
+ const zcu = self.pt.zcu;
+ const ty = Type.fromInterned(zcu.intern_pool.typeOf(val));
const decl_ptr_ty_id = try self.ptrType(ty, .Generic);
const spv_decl_index = blk: {
@@ -639,15 +639,15 @@ const NavGen = struct {
/// Checks whether the type can be directly translated to SPIR-V vectors
fn isSpvVector(self: *NavGen, ty: Type) bool {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const target = self.getTarget();
- if (ty.zigTypeTag(mod) != .Vector) return false;
+ if (ty.zigTypeTag(zcu) != .Vector) return false;
// TODO: This check must be expanded for types that can be represented
// as integers (enums / packed structs?) and types that are represented
// by multiple SPIR-V values.
- const scalar_ty = ty.scalarType(mod);
- switch (scalar_ty.zigTypeTag(mod)) {
+ const scalar_ty = ty.scalarType(zcu);
+ switch (scalar_ty.zigTypeTag(zcu)) {
.Bool,
.Int,
.Float,
@@ -655,24 +655,24 @@ const NavGen = struct {
else => return false,
}
- const elem_ty = ty.childType(mod);
+ const elem_ty = ty.childType(zcu);
- const len = ty.vectorLen(mod);
- const is_scalar = elem_ty.isNumeric(mod) or elem_ty.toIntern() == .bool_type;
+ const len = ty.vectorLen(zcu);
+ const is_scalar = elem_ty.isNumeric(zcu) or elem_ty.toIntern() == .bool_type;
const spirv_len = len > 1 and len <= 4;
const opencl_len = if (target.os.tag == .opencl) (len == 8 or len == 16) else false;
return is_scalar and (spirv_len or opencl_len);
}
fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const target = self.getTarget();
- var scalar_ty = ty.scalarType(mod);
- if (scalar_ty.zigTypeTag(mod) == .Enum) {
- scalar_ty = scalar_ty.intTagType(mod);
+ var scalar_ty = ty.scalarType(zcu);
+ if (scalar_ty.zigTypeTag(zcu) == .Enum) {
+ scalar_ty = scalar_ty.intTagType(zcu);
}
- const vector_len = if (ty.isVector(mod)) ty.vectorLen(mod) else null;
- return switch (scalar_ty.zigTypeTag(mod)) {
+ const vector_len = if (ty.isVector(zcu)) ty.vectorLen(zcu) else null;
+ return switch (scalar_ty.zigTypeTag(zcu)) {
.Bool => ArithmeticTypeInfo{
.bits = 1, // Doesn't matter for this class.
.backing_bits = self.backingIntBits(1).?,
@@ -688,7 +688,7 @@ const NavGen = struct {
.class = .float,
},
.Int => blk: {
- const int_info = scalar_ty.intInfo(mod);
+ const int_info = scalar_ty.intInfo(zcu);
// TODO: Maybe it's useful to also return this value.
const maybe_backing_bits = self.backingIntBits(int_info.bits);
break :blk ArithmeticTypeInfo{
@@ -741,9 +741,9 @@ const NavGen = struct {
/// the value to an unsigned int first for Kernels.
fn constInt(self: *NavGen, ty: Type, value: anytype, repr: Repr) !IdRef {
// TODO: Cache?
- const mod = self.pt.zcu;
- const scalar_ty = ty.scalarType(mod);
- const int_info = scalar_ty.intInfo(mod);
+ const zcu = self.pt.zcu;
+ const scalar_ty = ty.scalarType(zcu);
+ const int_info = scalar_ty.intInfo(zcu);
// Use backing bits so that negatives are sign extended
const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int
@@ -783,11 +783,11 @@ const NavGen = struct {
else => unreachable, // TODO: Large integer constants
}
- if (!ty.isVector(mod)) {
+ if (!ty.isVector(zcu)) {
return result_id;
}
- const n = ty.vectorLen(mod);
+ const n = ty.vectorLen(zcu);
const ids = try self.gpa.alloc(IdRef, n);
defer self.gpa.free(ids);
@memset(ids, result_id);
@@ -821,8 +821,8 @@ const NavGen = struct {
/// Construct a vector at runtime.
/// ty must be an vector type.
fn constructVector(self: *NavGen, ty: Type, constituents: []const IdRef) !IdRef {
- const mod = self.pt.zcu;
- assert(ty.vectorLen(mod) == constituents.len);
+ const zcu = self.pt.zcu;
+ assert(ty.vectorLen(zcu) == constituents.len);
// Note: older versions of the Khronos SPRIV-LLVM translator crash on this instruction
// because it cannot construct structs which' operands are not constant.
@@ -845,8 +845,8 @@ const NavGen = struct {
/// Construct a vector at runtime with all lanes set to the same value.
/// ty must be an vector type.
fn constructVectorSplat(self: *NavGen, ty: Type, constituent: IdRef) !IdRef {
- const mod = self.pt.zcu;
- const n = ty.vectorLen(mod);
+ const zcu = self.pt.zcu;
+ const n = ty.vectorLen(zcu);
const constituents = try self.gpa.alloc(IdRef, n);
defer self.gpa.free(constituents);
@@ -884,13 +884,13 @@ const NavGen = struct {
}
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const target = self.getTarget();
const result_ty_id = try self.resolveType(ty, repr);
- const ip = &mod.intern_pool;
+ const ip = &zcu.intern_pool;
log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(pt), val.fmtValue(pt) });
- if (val.isUndefDeep(mod)) {
+ if (val.isUndefDeep(zcu)) {
return self.spv.constUndef(result_ty_id);
}
@@ -937,17 +937,17 @@ const NavGen = struct {
.false, .true => break :cache try self.constBool(val.toBool(), repr),
},
.int => {
- if (ty.isSignedInt(mod)) {
- break :cache try self.constInt(ty, val.toSignedInt(pt), repr);
+ if (ty.isSignedInt(zcu)) {
+ break :cache try self.constInt(ty, val.toSignedInt(zcu), repr);
} else {
- break :cache try self.constInt(ty, val.toUnsignedInt(pt), repr);
+ break :cache try self.constInt(ty, val.toUnsignedInt(zcu), repr);
}
},
.float => {
const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) {
- 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, pt))) },
- 32 => .{ .float32 = val.toFloat(f32, pt) },
- 64 => .{ .float64 = val.toFloat(f64, pt) },
+ 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, zcu))) },
+ 32 => .{ .float32 = val.toFloat(f32, zcu) },
+ 64 => .{ .float64 = val.toFloat(f64, zcu) },
80, 128 => unreachable, // TODO
else => unreachable,
};
@@ -968,17 +968,17 @@ const NavGen = struct {
// allows it. For now, just generate it here regardless.
const err_int_ty = try pt.errorIntType();
const err_ty = switch (error_union.val) {
- .err_name => ty.errorUnionSet(mod),
+ .err_name => ty.errorUnionSet(zcu),
.payload => err_int_ty,
};
const err_val = switch (error_union.val) {
.err_name => |err_name| Value.fromInterned(try pt.intern(.{ .err = .{
- .ty = ty.errorUnionSet(mod).toIntern(),
+ .ty = ty.errorUnionSet(zcu).toIntern(),
.name = err_name,
} })),
.payload => try pt.intValue(err_int_ty, 0),
};
- const payload_ty = ty.errorUnionPayload(mod);
+ const payload_ty = ty.errorUnionPayload(zcu);
const eu_layout = self.errorUnionLayout(payload_ty);
if (!eu_layout.payload_has_bits) {
// We use the error type directly as the type.
@@ -1006,12 +1006,12 @@ const NavGen = struct {
},
.enum_tag => {
const int_val = try val.intFromEnum(ty, pt);
- const int_ty = ty.intTagType(mod);
+ const int_ty = ty.intTagType(zcu);
break :cache try self.constant(int_ty, int_val, repr);
},
.ptr => return self.constantPtr(val),
.slice => |slice| {
- const ptr_ty = ty.slicePtrFieldType(mod);
+ const ptr_ty = ty.slicePtrFieldType(zcu);
const ptr_id = try self.constantPtr(Value.fromInterned(slice.ptr));
const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect);
return self.constructStruct(
@@ -1021,12 +1021,12 @@ const NavGen = struct {
);
},
.opt => {
- const payload_ty = ty.optionalChild(mod);
- const maybe_payload_val = val.optionalValue(mod);
+ const payload_ty = ty.optionalChild(zcu);
+ const maybe_payload_val = val.optionalValue(zcu);
- if (!payload_ty.hasRuntimeBits(pt)) {
+ if (!payload_ty.hasRuntimeBits(zcu)) {
break :cache try self.constBool(maybe_payload_val != null, .indirect);
- } else if (ty.optionalReprIsPayload(mod)) {
+ } else if (ty.optionalReprIsPayload(zcu)) {
// Optional representation is a nullable pointer or slice.
if (maybe_payload_val) |payload_val| {
return try self.constant(payload_ty, payload_val, .indirect);
@@ -1054,7 +1054,7 @@ const NavGen = struct {
inline .array_type, .vector_type => |array_type, tag| {
const elem_ty = Type.fromInterned(array_type.child);
- const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(mod)));
+ const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(zcu)));
defer self.gpa.free(constituents);
const child_repr: Repr = switch (tag) {
@@ -1088,7 +1088,7 @@ const NavGen = struct {
}
},
.struct_type => {
- const struct_type = mod.typeToStruct(ty).?;
+ const struct_type = zcu.typeToStruct(ty).?;
if (struct_type.layout == .@"packed") {
return self.todo("packed struct constants", .{});
}
@@ -1102,7 +1102,7 @@ const NavGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
@@ -1121,10 +1121,10 @@ const NavGen = struct {
else => unreachable,
},
.un => |un| {
- const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
- const union_obj = mod.typeToUnion(ty).?;
+ const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
+ const union_obj = zcu.typeToUnion(ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
- const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt))
+ const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu))
try self.constant(field_ty, Value.fromInterned(un.val), .direct)
else
null;
@@ -1232,8 +1232,8 @@ const NavGen = struct {
// TODO: Merge this function with constantDeclRef.
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_id = try self.resolveType(ty, .direct);
const uav_ty = Type.fromInterned(ip.typeOf(uav.val));
@@ -1243,14 +1243,14 @@ const NavGen = struct {
else => {},
}
- // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
- if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ // const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn;
+ if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
// Pointer to nothing - return undefined
return self.spv.constUndef(ty_id);
}
// Uav refs are always generic.
- assert(ty.ptrAddressSpace(mod) == .generic);
+ assert(ty.ptrAddressSpace(zcu) == .generic);
const decl_ptr_ty_id = try self.ptrType(uav_ty, .Generic);
const ptr_id = try self.resolveUav(uav.val);
@@ -1270,12 +1270,12 @@ const NavGen = struct {
fn constantNavRef(self: *NavGen, ty: Type, nav_index: InternPool.Nav.Index) !IdRef {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_id = try self.resolveType(ty, .direct);
const nav = ip.getNav(nav_index);
- const nav_val = mod.navValue(nav_index);
- const nav_ty = nav_val.typeOf(mod);
+ const nav_val = zcu.navValue(nav_index);
+ const nav_ty = nav_val.typeOf(zcu);
switch (ip.indexToKey(nav_val.toIntern())) {
.func => {
@@ -1287,12 +1287,12 @@ const NavGen = struct {
else => {},
}
- if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
// Pointer to nothing - return undefined.
return self.spv.constUndef(ty_id);
}
- const spv_decl_index = try self.object.resolveNav(mod, nav_index);
+ const spv_decl_index = try self.object.resolveNav(zcu, nav_index);
const spv_decl = self.spv.declPtr(spv_decl_index);
const decl_id = switch (spv_decl.kind) {
@@ -1452,9 +1452,9 @@ const NavGen = struct {
/// }
/// If any of the fields' size is 0, it will be omitted.
fn resolveUnionType(self: *NavGen, ty: Type) !IdRef {
- const mod = self.pt.zcu;
- const ip = &mod.intern_pool;
- const union_obj = mod.typeToUnion(ty).?;
+ const zcu = self.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
return self.todo("packed union types", .{});
@@ -1503,12 +1503,12 @@ const NavGen = struct {
}
fn resolveFnReturnType(self: *NavGen, ret_ty: Type) !IdRef {
- const pt = self.pt;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const zcu = self.pt.zcu;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
- if (ret_ty.isError(pt.zcu)) {
+ if (ret_ty.isError(zcu)) {
return self.resolveType(Type.anyerror, .direct);
} else {
return self.resolveType(Type.void, .direct);
@@ -1531,14 +1531,14 @@ const NavGen = struct {
fn resolveTypeInner(self: *NavGen, ty: Type, repr: Repr) Error!IdRef {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
log.debug("resolveType: ty = {}", .{ty.fmt(pt)});
const target = self.getTarget();
const section = &self.spv.sections.types_globals_constants;
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.NoReturn => {
assert(repr == .direct);
return try self.spv.voidType();
@@ -1562,7 +1562,7 @@ const NavGen = struct {
.indirect => return try self.resolveType(Type.u1, .indirect),
},
.Int => {
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
if (int_info.bits == 0) {
// Some times, the backend will be asked to generate a pointer to i0. OpTypeInt
// with 0 bits is invalid, so return an opaque type in this case.
@@ -1577,7 +1577,7 @@ const NavGen = struct {
return try self.intType(int_info.signedness, int_info.bits);
},
.Enum => {
- const tag_ty = ty.intTagType(mod);
+ const tag_ty = ty.intTagType(zcu);
return try self.resolveType(tag_ty, repr);
},
.Float => {
@@ -1599,13 +1599,13 @@ const NavGen = struct {
return try self.spv.floatType(bits);
},
.Array => {
- const elem_ty = ty.childType(mod);
+ const elem_ty = ty.childType(zcu);
const elem_ty_id = try self.resolveType(elem_ty, .indirect);
- const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
- return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
+ const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(zcu)) orelse {
+ return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(zcu)});
};
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// The size of the array would be 0, but that is not allowed in SPIR-V.
// This path can be reached when the backend is asked to generate a pointer to
// an array of some zero-bit type. This should always be an indirect path.
@@ -1635,7 +1635,7 @@ const NavGen = struct {
},
.Fn => switch (repr) {
.direct => {
- const fn_info = mod.typeToFunc(ty).?;
+ const fn_info = zcu.typeToFunc(ty).?;
comptime assert(zig_call_abi_ver == 3);
switch (fn_info.cc) {
@@ -1653,7 +1653,7 @@ const NavGen = struct {
var param_index: usize = 0;
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
- if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
param_ty_ids[param_index] = try self.resolveType(param_ty, .direct);
param_index += 1;
@@ -1677,7 +1677,7 @@ const NavGen = struct {
},
},
.Pointer => {
- const ptr_info = ty.ptrInfo(mod);
+ const ptr_info = ty.ptrInfo(zcu);
const storage_class = self.spvStorageClass(ptr_info.flags.address_space);
const ptr_ty_id = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class);
@@ -1693,9 +1693,9 @@ const NavGen = struct {
);
},
.Vector => {
- const elem_ty = ty.childType(mod);
+ const elem_ty = ty.childType(zcu);
const elem_ty_id = try self.resolveType(elem_ty, repr);
- const len = ty.vectorLen(mod);
+ const len = ty.vectorLen(zcu);
if (self.isSpvVector(ty)) {
return try self.spv.vectorType(len, elem_ty_id);
@@ -1711,7 +1711,7 @@ const NavGen = struct {
var member_index: usize = 0;
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
- if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
member_types[member_index] = try self.resolveType(Type.fromInterned(field_ty), .indirect);
member_index += 1;
@@ -1740,13 +1740,13 @@ const NavGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
- try ip.getOrPutStringFmt(mod.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
+ try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
try member_types.append(try self.resolveType(field_ty, .indirect));
try member_names.append(field_name.toSlice(ip));
}
@@ -1758,8 +1758,8 @@ const NavGen = struct {
return result_id;
},
.Optional => {
- const payload_ty = ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const payload_ty = ty.optionalChild(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// Just use a bool.
// Note: Always generate the bool with indirect format, to save on some sanity
// Perform the conversion to a direct bool when the field is extracted.
@@ -1767,7 +1767,7 @@ const NavGen = struct {
}
const payload_ty_id = try self.resolveType(payload_ty, .indirect);
- if (ty.optionalReprIsPayload(mod)) {
+ if (ty.optionalReprIsPayload(zcu)) {
// Optional is actually a pointer or a slice.
return payload_ty_id;
}
@@ -1782,7 +1782,7 @@ const NavGen = struct {
.Union => return try self.resolveUnionType(ty),
.ErrorSet => return try self.resolveType(Type.u16, repr),
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload(mod);
+ const payload_ty = ty.errorUnionPayload(zcu);
const error_ty_id = try self.resolveType(Type.anyerror, .indirect);
const eu_layout = self.errorUnionLayout(payload_ty);
@@ -1877,13 +1877,14 @@ const NavGen = struct {
fn errorUnionLayout(self: *NavGen, payload_ty: Type) ErrorUnionLayout {
const pt = self.pt;
+ const zcu = pt.zcu;
- const error_align = Type.anyerror.abiAlignment(pt);
- const payload_align = payload_ty.abiAlignment(pt);
+ const error_align = Type.anyerror.abiAlignment(zcu);
+ const payload_align = payload_ty.abiAlignment(zcu);
const error_first = error_align.compare(.gt, payload_align);
return .{
- .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(pt),
+ .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu),
.error_first = error_first,
};
}
@@ -1908,10 +1909,10 @@ const NavGen = struct {
fn unionLayout(self: *NavGen, ty: Type) UnionLayout {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const layout = ty.unionGetLayout(pt);
- const union_obj = mod.typeToUnion(ty).?;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const layout = ty.unionGetLayout(zcu);
+ const union_obj = zcu.typeToUnion(ty).?;
var union_layout = UnionLayout{
.has_payload = layout.payload_size != 0,
@@ -1931,7 +1932,7 @@ const NavGen = struct {
const most_aligned_field = layout.most_aligned_field;
const most_aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[most_aligned_field]);
union_layout.payload_ty = most_aligned_field_ty;
- union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(pt));
+ union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(zcu));
} else {
union_layout.payload_size = 0;
}
@@ -1998,12 +1999,12 @@ const NavGen = struct {
}
fn materialize(self: Temporary, ng: *NavGen) !IdResult {
- const mod = ng.pt.zcu;
+ const zcu = ng.pt.zcu;
switch (self.value) {
.singleton => |id| return id,
.exploded_vector => |range| {
- assert(self.ty.isVector(mod));
- assert(self.ty.vectorLen(mod) == range.len);
+ assert(self.ty.isVector(zcu));
+ assert(self.ty.vectorLen(zcu) == range.len);
const consituents = try ng.gpa.alloc(IdRef, range.len);
defer ng.gpa.free(consituents);
for (consituents, 0..range.len) |*id, i| {
@@ -2028,18 +2029,18 @@ const NavGen = struct {
/// 'Explode' a temporary into separate elements. This turns a vector
/// into a bag of elements.
fn explode(self: Temporary, ng: *NavGen) !IdRange {
- const mod = ng.pt.zcu;
+ const zcu = ng.pt.zcu;
// If the value is a scalar, then this is a no-op.
- if (!self.ty.isVector(mod)) {
+ if (!self.ty.isVector(zcu)) {
return switch (self.value) {
.singleton => |id| .{ .base = @intFromEnum(id), .len = 1 },
.exploded_vector => |range| range,
};
}
- const ty_id = try ng.resolveType(self.ty.scalarType(mod), .direct);
- const n = self.ty.vectorLen(mod);
+ const ty_id = try ng.resolveType(self.ty.scalarType(zcu), .direct);
+ const n = self.ty.vectorLen(zcu);
const results = ng.spv.allocIds(n);
const id = switch (self.value) {
@@ -2087,13 +2088,13 @@ const NavGen = struct {
/// only checks the size, but the source-of-truth is implemented
/// by `isSpvVector()`.
fn fromType(ty: Type, ng: *NavGen) Vectorization {
- const mod = ng.pt.zcu;
- if (!ty.isVector(mod)) {
+ const zcu = ng.pt.zcu;
+ if (!ty.isVector(zcu)) {
return .scalar;
} else if (ng.isSpvVector(ty)) {
- return .{ .spv_vectorized = ty.vectorLen(mod) };
+ return .{ .spv_vectorized = ty.vectorLen(zcu) };
} else {
- return .{ .unrolled = ty.vectorLen(mod) };
+ return .{ .unrolled = ty.vectorLen(zcu) };
}
}
@@ -2339,10 +2340,10 @@ const NavGen = struct {
/// This function builds an OpSConvert of OpUConvert depending on the
/// signedness of the types.
fn buildIntConvert(self: *NavGen, dst_ty: Type, src: Temporary) !Temporary {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
- const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct);
- const src_ty_id = try self.resolveType(src.ty.scalarType(mod), .direct);
+ const dst_ty_id = try self.resolveType(dst_ty.scalarType(zcu), .direct);
+ const src_ty_id = try self.resolveType(src.ty.scalarType(zcu), .direct);
const v = self.vectorization(.{ dst_ty, src });
const result_ty = try v.resultType(self, dst_ty);
@@ -2363,7 +2364,7 @@ const NavGen = struct {
const op_result_ty = try v.operationType(self, dst_ty);
const op_result_ty_id = try self.resolveType(op_result_ty, .direct);
- const opcode: Opcode = if (dst_ty.isSignedInt(mod)) .OpSConvert else .OpUConvert;
+ const opcode: Opcode = if (dst_ty.isSignedInt(zcu)) .OpSConvert else .OpUConvert;
const op_src = try v.prepare(self, src);
@@ -2418,7 +2419,7 @@ const NavGen = struct {
}
fn buildSelect(self: *NavGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const v = self.vectorization(.{ condition, lhs, rhs });
const ops = v.operations();
@@ -2428,7 +2429,7 @@ const NavGen = struct {
const op_result_ty_id = try self.resolveType(op_result_ty, .direct);
const result_ty = try v.resultType(self, lhs.ty);
- assert(condition.ty.scalarType(mod).zigTypeTag(mod) == .Bool);
+ assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .Bool);
const cond = try v.prepare(self, condition);
const object_1 = try v.prepare(self, lhs);
@@ -2764,9 +2765,9 @@ const NavGen = struct {
rhs: Temporary,
) !struct { Temporary, Temporary } {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const target = self.getTarget();
- const ip = &mod.intern_pool;
+ const ip = &zcu.intern_pool;
const v = lhs.vectorization(self).unify(rhs.vectorization(self));
const ops = v.operations();
@@ -2814,7 +2815,7 @@ const NavGen = struct {
// where T is maybe vectorized.
const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() };
const values = [2]InternPool.Index{ .none, .none };
- const index = try ip.getAnonStructType(mod.gpa, pt.tid, .{
+ const index = try ip.getAnonStructType(zcu.gpa, pt.tid, .{
.types = &types,
.values = &values,
.names = &.{},
@@ -2941,17 +2942,17 @@ const NavGen = struct {
fn genNav(self: *NavGen) !void {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const spv_decl_index = try self.object.resolveNav(mod, self.owner_nav);
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const spv_decl_index = try self.object.resolveNav(zcu, self.owner_nav);
const result_id = self.spv.declPtr(spv_decl_index).result_id;
const nav = ip.getNav(self.owner_nav);
- const val = mod.navValue(self.owner_nav);
- const ty = val.typeOf(mod);
+ const val = zcu.navValue(self.owner_nav);
+ const ty = val.typeOf(zcu);
switch (self.spv.declPtr(spv_decl_index).kind) {
.func => {
- const fn_info = mod.typeToFunc(ty).?;
+ const fn_info = zcu.typeToFunc(ty).?;
const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
const prototype_ty_id = try self.resolveType(ty, .direct);
@@ -2969,7 +2970,7 @@ const NavGen = struct {
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
- if (!param_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const param_type_id = try self.resolveType(param_ty, .direct);
const arg_result_id = self.spv.allocId();
@@ -3116,8 +3117,8 @@ const NavGen = struct {
/// Convert representation from indirect (in memory) to direct (in 'register')
/// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
fn convertToDirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
- const mod = self.pt.zcu;
- switch (ty.scalarType(mod).zigTypeTag(mod)) {
+ const zcu = self.pt.zcu;
+ switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
.Bool => {
const false_id = try self.constBool(false, .indirect);
// The operation below requires inputs in direct representation, but the operand
@@ -3142,8 +3143,8 @@ const NavGen = struct {
/// Convert representation from direct (in 'register) to direct (in memory)
/// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
fn convertToIndirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
- const mod = self.pt.zcu;
- switch (ty.scalarType(mod).zigTypeTag(mod)) {
+ const zcu = self.pt.zcu;
+ switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
.Bool => {
const result = try self.intFromBool(Temporary.init(ty, operand_id));
return try result.materialize(self);
@@ -3219,8 +3220,8 @@ const NavGen = struct {
}
fn genInst(self: *NavGen, inst: Air.Inst.Index) !void {
- const mod = self.pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = self.pt.zcu;
+ const ip = &zcu.intern_pool;
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
return;
@@ -3399,7 +3400,7 @@ const NavGen = struct {
}
fn airShift(self: *NavGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const base = try self.temporary(bin_op.lhs);
@@ -3420,7 +3421,7 @@ const NavGen = struct {
// Note: The sign may differ here between the shift and the base type, in case
// of an arithmetic right shift. SPIR-V still expects the same type,
// so in that case we have to cast convert to signed.
- const casted_shift = try self.buildIntConvert(base.ty.scalarType(mod), shift);
+ const casted_shift = try self.buildIntConvert(base.ty.scalarType(zcu), shift);
const shifted = switch (info.signedness) {
.unsigned => try self.buildBinary(unsigned, base, casted_shift),
@@ -3477,7 +3478,7 @@ const NavGen = struct {
/// All other values are returned unmodified (this makes strange integer
/// wrapping easier to use in generic operations).
fn normalize(self: *NavGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ty = value.ty;
switch (info.class) {
.integer, .bool, .float => return value,
@@ -3485,13 +3486,13 @@ const NavGen = struct {
.strange_integer => switch (info.signedness) {
.unsigned => {
const mask_value = if (info.bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(info.bits))) - 1;
- const mask_id = try self.constInt(ty.scalarType(mod), mask_value, .direct);
- return try self.buildBinary(.bit_and, value, Temporary.init(ty.scalarType(mod), mask_id));
+ const mask_id = try self.constInt(ty.scalarType(zcu), mask_value, .direct);
+ return try self.buildBinary(.bit_and, value, Temporary.init(ty.scalarType(zcu), mask_id));
},
.signed => {
// Shift left and right so that we can copy the sight bit that way.
- const shift_amt_id = try self.constInt(ty.scalarType(mod), info.backing_bits - info.bits, .direct);
- const shift_amt = Temporary.init(ty.scalarType(mod), shift_amt_id);
+ const shift_amt_id = try self.constInt(ty.scalarType(zcu), info.backing_bits - info.bits, .direct);
+ const shift_amt = Temporary.init(ty.scalarType(zcu), shift_amt_id);
const left = try self.buildBinary(.sll, value, shift_amt);
return try self.buildBinary(.sra, left, shift_amt);
},
@@ -3897,7 +3898,7 @@ const NavGen = struct {
}
fn airShlOverflow(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3916,7 +3917,7 @@ const NavGen = struct {
// Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
// so just manually upcast it if required.
- const casted_shift = try self.buildIntConvert(base.ty.scalarType(mod), shift);
+ const casted_shift = try self.buildIntConvert(base.ty.scalarType(zcu), shift);
const left = try self.buildBinary(.sll, base, casted_shift);
const result = try self.normalize(left, info);
@@ -3955,12 +3956,12 @@ const NavGen = struct {
fn airClzCtz(self: *NavGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const target = self.getTarget();
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.temporary(ty_op.operand);
- const scalar_result_ty = self.typeOfIndex(inst).scalarType(mod);
+ const scalar_result_ty = self.typeOfIndex(inst).scalarType(zcu);
const info = self.arithmeticTypeInfo(operand.ty);
switch (info.class) {
@@ -4004,16 +4005,16 @@ const NavGen = struct {
}
fn airReduce(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const operand = try self.resolve(reduce.operand);
const operand_ty = self.typeOf(reduce.operand);
- const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(zcu);
const scalar_ty_id = try self.resolveType(scalar_ty, .direct);
const info = self.arithmeticTypeInfo(operand_ty);
- const len = operand_ty.vectorLen(mod);
+ const len = operand_ty.vectorLen(zcu);
const first = try self.extractVectorComponent(scalar_ty, operand, 0);
@@ -4080,7 +4081,7 @@ const NavGen = struct {
fn airShuffle(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolve(extra.a);
@@ -4092,7 +4093,7 @@ const NavGen = struct {
const a_ty = self.typeOf(extra.a);
const b_ty = self.typeOf(extra.b);
- const scalar_ty = result_ty.scalarType(mod);
+ const scalar_ty = result_ty.scalarType(zcu);
const scalar_ty_id = try self.resolveType(scalar_ty, .direct);
// If all of the types are SPIR-V vectors, we can use OpVectorShuffle.
@@ -4100,20 +4101,20 @@ const NavGen = struct {
// The SPIR-V shuffle instruction is similar to the Air instruction, except that the elements are
// numbered consecutively instead of using negatives.
- const components = try self.gpa.alloc(Word, result_ty.vectorLen(mod));
+ const components = try self.gpa.alloc(Word, result_ty.vectorLen(zcu));
defer self.gpa.free(components);
- const a_len = a_ty.vectorLen(mod);
+ const a_len = a_ty.vectorLen(zcu);
for (components, 0..) |*component, i| {
const elem = try mask.elemValue(pt, i);
- if (elem.isUndef(mod)) {
+ if (elem.isUndef(zcu)) {
// This is explicitly valid for OpVectorShuffle, it indicates undefined.
component.* = 0xFFFF_FFFF;
continue;
}
- const index = elem.toSignedInt(pt);
+ const index = elem.toSignedInt(zcu);
if (index >= 0) {
component.* = @intCast(index);
} else {
@@ -4134,17 +4135,17 @@ const NavGen = struct {
// Fall back to manually extracting and inserting components.
- const components = try self.gpa.alloc(IdRef, result_ty.vectorLen(mod));
+ const components = try self.gpa.alloc(IdRef, result_ty.vectorLen(zcu));
defer self.gpa.free(components);
for (components, 0..) |*id, i| {
const elem = try mask.elemValue(pt, i);
- if (elem.isUndef(mod)) {
+ if (elem.isUndef(zcu)) {
id.* = try self.spv.constUndef(scalar_ty_id);
continue;
}
- const index = elem.toSignedInt(pt);
+ const index = elem.toSignedInt(zcu);
if (index >= 0) {
id.* = try self.extractVectorComponent(scalar_ty, a, @intCast(index));
} else {
@@ -4218,10 +4219,10 @@ const NavGen = struct {
}
fn ptrAdd(self: *NavGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const result_ty_id = try self.resolveType(result_ty, .direct);
- switch (ptr_ty.ptrSize(mod)) {
+ switch (ptr_ty.ptrSize(zcu)) {
.One => {
// Pointer to array
// TODO: Is this correct?
@@ -4275,15 +4276,15 @@ const NavGen = struct {
rhs: Temporary,
) !Temporary {
const pt = self.pt;
- const mod = pt.zcu;
- const scalar_ty = lhs.ty.scalarType(mod);
- const is_vector = lhs.ty.isVector(mod);
+ const zcu = pt.zcu;
+ const scalar_ty = lhs.ty.scalarType(zcu);
+ const is_vector = lhs.ty.isVector(zcu);
- switch (scalar_ty.zigTypeTag(mod)) {
+ switch (scalar_ty.zigTypeTag(zcu)) {
.Int, .Bool, .Float => {},
.Enum => {
assert(!is_vector);
- const ty = lhs.ty.intTagType(mod);
+ const ty = lhs.ty.intTagType(zcu);
return try self.cmp(op, lhs.pun(ty), rhs.pun(ty));
},
.ErrorSet => {
@@ -4321,10 +4322,10 @@ const NavGen = struct {
const ty = lhs.ty;
- const payload_ty = ty.optionalChild(mod);
- if (ty.optionalReprIsPayload(mod)) {
- assert(payload_ty.hasRuntimeBitsIgnoreComptime(pt));
- assert(!payload_ty.isSlice(mod));
+ const payload_ty = ty.optionalChild(zcu);
+ if (ty.optionalReprIsPayload(zcu)) {
+ assert(payload_ty.hasRuntimeBitsIgnoreComptime(zcu));
+ assert(!payload_ty.isSlice(zcu));
return try self.cmp(op, lhs.pun(payload_ty), rhs.pun(payload_ty));
}
@@ -4332,12 +4333,12 @@ const NavGen = struct {
const lhs_id = try lhs.materialize(self);
const rhs_id = try rhs.materialize(self);
- const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
+ const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
try self.extractField(Type.bool, lhs_id, 1)
else
try self.convertToDirect(Type.bool, lhs_id);
- const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
+ const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
try self.extractField(Type.bool, rhs_id, 1)
else
try self.convertToDirect(Type.bool, rhs_id);
@@ -4345,7 +4346,7 @@ const NavGen = struct {
const lhs_valid = Temporary.init(Type.bool, lhs_valid_id);
const rhs_valid = Temporary.init(Type.bool, rhs_valid_id);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return try self.cmp(op, lhs_valid, rhs_valid);
}
@@ -4465,7 +4466,7 @@ const NavGen = struct {
src_ty: Type,
src_id: IdRef,
) !IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const src_ty_id = try self.resolveType(src_ty, .direct);
const dst_ty_id = try self.resolveType(dst_ty, .direct);
@@ -4477,7 +4478,7 @@ const NavGen = struct {
// TODO: Some more cases are missing here
// See fn bitCast in llvm.zig
- if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
+ if (src_ty.zigTypeTag(zcu) == .Int and dst_ty.isPtrAtRuntime(zcu)) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = dst_ty_id,
@@ -4490,7 +4491,7 @@ const NavGen = struct {
// We can only use OpBitcast for specific conversions: between numerical types, and
// between pointers. If the resolved spir-v types fall into this category then emit OpBitcast,
// otherwise use a temporary and perform a pointer cast.
- const can_bitcast = (src_ty.isNumeric(mod) and dst_ty.isNumeric(mod)) or (src_ty.isPtrAtRuntime(mod) and dst_ty.isPtrAtRuntime(mod));
+ const can_bitcast = (src_ty.isNumeric(zcu) and dst_ty.isNumeric(zcu)) or (src_ty.isPtrAtRuntime(zcu) and dst_ty.isPtrAtRuntime(zcu));
if (can_bitcast) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
@@ -4519,7 +4520,7 @@ const NavGen = struct {
// the result here.
// TODO: This detail could cause stuff like @as(*const i1, @ptrCast(&@as(u1, 1))) to break
// should we change the representation of strange integers?
- if (dst_ty.zigTypeTag(mod) == .Int) {
+ if (dst_ty.zigTypeTag(zcu) == .Int) {
const info = self.arithmeticTypeInfo(dst_ty);
const result = try self.normalize(Temporary.init(dst_ty, result_id), info);
return try result.materialize(self);
@@ -4675,19 +4676,19 @@ const NavGen = struct {
fn airArrayToSlice(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const array_ptr_ty = self.typeOf(ty_op.operand);
- const array_ty = array_ptr_ty.childType(mod);
+ const array_ty = array_ptr_ty.childType(zcu);
const slice_ty = self.typeOfIndex(inst);
- const elem_ptr_ty = slice_ty.slicePtrFieldType(mod);
+ const elem_ptr_ty = slice_ty.slicePtrFieldType(zcu);
const elem_ptr_ty_id = try self.resolveType(elem_ptr_ty, .direct);
const array_ptr_id = try self.resolve(ty_op.operand);
- const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct);
+ const len_id = try self.constInt(Type.usize, array_ty.arrayLen(zcu), .direct);
- const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(pt))
+ const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(zcu))
// Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type.
try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id)
else
@@ -4720,16 +4721,16 @@ const NavGen = struct {
fn airAggregateInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = self.typeOfIndex(inst);
- const len: usize = @intCast(result_ty.arrayLen(mod));
+ const len: usize = @intCast(result_ty.arrayLen(zcu));
const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
- switch (result_ty.zigTypeTag(mod)) {
+ switch (result_ty.zigTypeTag(zcu)) {
.Struct => {
- if (mod.typeToPackedStruct(result_ty)) |struct_type| {
+ if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
_ = struct_type;
unreachable; // TODO
}
@@ -4744,7 +4745,7 @@ const NavGen = struct {
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| {
if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
- assert(Type.fromInterned(field_ty).hasRuntimeBits(pt));
+ assert(Type.fromInterned(field_ty).hasRuntimeBits(zcu));
const id = try self.resolve(element);
types[index] = Type.fromInterned(field_ty);
@@ -4759,7 +4760,7 @@ const NavGen = struct {
const field_index = it.next().?;
if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- assert(field_ty.hasRuntimeBitsIgnoreComptime(pt));
+ assert(field_ty.hasRuntimeBitsIgnoreComptime(zcu));
const id = try self.resolve(element);
types[index] = field_ty;
@@ -4777,7 +4778,7 @@ const NavGen = struct {
);
},
.Vector => {
- const n_elems = result_ty.vectorLen(mod);
+ const n_elems = result_ty.vectorLen(zcu);
const elem_ids = try self.gpa.alloc(IdRef, n_elems);
defer self.gpa.free(elem_ids);
@@ -4788,8 +4789,8 @@ const NavGen = struct {
return try self.constructVector(result_ty, elem_ids);
},
.Array => {
- const array_info = result_ty.arrayInfo(mod);
- const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(mod));
+ const array_info = result_ty.arrayInfo(zcu);
+ const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(zcu));
const elem_ids = try self.gpa.alloc(IdRef, n_elems);
defer self.gpa.free(elem_ids);
@@ -4810,14 +4811,14 @@ const NavGen = struct {
fn sliceOrArrayLen(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef {
const pt = self.pt;
- const mod = pt.zcu;
- switch (ty.ptrSize(mod)) {
+ const zcu = pt.zcu;
+ switch (ty.ptrSize(zcu)) {
.Slice => return self.extractField(Type.usize, operand_id, 1),
.One => {
- const array_ty = ty.childType(mod);
- const elem_ty = array_ty.childType(mod);
- const abi_size = elem_ty.abiSize(pt);
- const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size;
+ const array_ty = ty.childType(zcu);
+ const elem_ty = array_ty.childType(zcu);
+ const abi_size = elem_ty.abiSize(zcu);
+ const size = array_ty.arrayLenIncludingSentinel(zcu) * abi_size;
return try self.constInt(Type.usize, size, .direct);
},
.Many, .C => unreachable,
@@ -4825,9 +4826,9 @@ const NavGen = struct {
}
fn sliceOrArrayPtr(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef {
- const mod = self.pt.zcu;
- if (ty.isSlice(mod)) {
- const ptr_ty = ty.slicePtrFieldType(mod);
+ const zcu = self.pt.zcu;
+ if (ty.isSlice(zcu)) {
+ const ptr_ty = ty.slicePtrFieldType(zcu);
return self.extractField(ptr_ty, operand_id, 0);
}
return operand_id;
@@ -4857,11 +4858,11 @@ const NavGen = struct {
}
fn airSliceElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOf(bin_op.lhs);
- if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
+ if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null;
const slice_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
@@ -4874,28 +4875,28 @@ const NavGen = struct {
}
fn airSliceElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
- if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
+ if (!slice_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null;
const slice_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
- const ptr_ty = slice_ty.slicePtrFieldType(mod);
+ const ptr_ty = slice_ty.slicePtrFieldType(zcu);
const ptr_ty_id = try self.resolveType(ptr_ty, .direct);
const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
const elem_ptr = try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
- return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) });
+ return try self.load(slice_ty.childType(zcu), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(zcu) });
}
fn ptrElemPtr(self: *NavGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
// Construct new pointer type for the resulting pointer
- const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
- const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
- if (ptr_ty.isSinglePointer(mod)) {
+ const elem_ty = ptr_ty.elemType2(zcu); // use elemType() so that we get T for *[N]T.
+ const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(zcu)));
+ if (ptr_ty.isSinglePointer(zcu)) {
// Pointer-to-array. In this case, the resulting pointer is not of the same type
// as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
return try self.accessChainId(elem_ptr_ty_id, ptr_id, &.{index_id});
@@ -4907,14 +4908,14 @@ const NavGen = struct {
fn airPtrElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const src_ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = src_ptr_ty.childType(mod);
+ const elem_ty = src_ptr_ty.childType(zcu);
const ptr_id = try self.resolve(bin_op.lhs);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const dst_ptr_ty = self.typeOfIndex(inst);
return try self.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id);
}
@@ -4924,10 +4925,10 @@ const NavGen = struct {
}
fn airArrayElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const array_ty = self.typeOf(bin_op.lhs);
- const elem_ty = array_ty.childType(mod);
+ const elem_ty = array_ty.childType(zcu);
const array_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
@@ -4946,7 +4947,7 @@ const NavGen = struct {
// For now, just generate a temporary and use that.
// TODO: This backend probably also should use isByRef from llvm...
- const is_vector = array_ty.isVector(mod);
+ const is_vector = array_ty.isVector(zcu);
const elem_repr: Repr = if (is_vector) .direct else .indirect;
const ptr_array_ty_id = try self.ptrType2(array_ty, .Function, .direct);
@@ -4985,26 +4986,26 @@ const NavGen = struct {
}
fn airPtrElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = self.typeOfIndex(inst);
const ptr_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
const elem_ptr_id = try self.ptrElemPtr(ptr_ty, ptr_id, index_id);
- return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+ return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
}
fn airVectorStoreElem(self: *NavGen, inst: Air.Inst.Index) !void {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
const vector_ptr_ty = self.typeOf(data.vector_ptr);
- const vector_ty = vector_ptr_ty.childType(mod);
- const scalar_ty = vector_ty.scalarType(mod);
+ const vector_ty = vector_ptr_ty.childType(zcu);
+ const scalar_ty = vector_ty.scalarType(zcu);
- const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(mod));
+ const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(zcu));
const scalar_ptr_ty_id = try self.ptrType(scalar_ty, storage_class);
const vector_ptr = try self.resolve(data.vector_ptr);
@@ -5013,30 +5014,30 @@ const NavGen = struct {
const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index});
try self.store(scalar_ty, elem_ptr_id, operand, .{
- .is_volatile = vector_ptr_ty.isVolatilePtr(mod),
+ .is_volatile = vector_ptr_ty.isVolatilePtr(zcu),
});
}
fn airSetUnionTag(self: *NavGen, inst: Air.Inst.Index) !void {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const un_ptr_ty = self.typeOf(bin_op.lhs);
- const un_ty = un_ptr_ty.childType(mod);
+ const un_ty = un_ptr_ty.childType(zcu);
const layout = self.unionLayout(un_ty);
if (layout.tag_size == 0) return;
- const tag_ty = un_ty.unionTagTypeSafety(mod).?;
- const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
+ const tag_ty = un_ty.unionTagTypeSafety(zcu).?;
+ const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(zcu)));
const union_ptr_id = try self.resolve(bin_op.lhs);
const new_tag_id = try self.resolve(bin_op.rhs);
if (!layout.has_payload) {
- try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
+ try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) });
} else {
const ptr_id = try self.accessChain(tag_ptr_ty_id, union_ptr_id, &.{layout.tag_index});
- try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
+ try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) });
}
}
@@ -5044,14 +5045,14 @@ const NavGen = struct {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = self.typeOf(ty_op.operand);
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const layout = self.unionLayout(un_ty);
if (layout.tag_size == 0) return null;
const union_handle = try self.resolve(ty_op.operand);
if (!layout.has_payload) return union_handle;
- const tag_ty = un_ty.unionTagTypeSafety(mod).?;
+ const tag_ty = un_ty.unionTagTypeSafety(zcu).?;
return try self.extractField(tag_ty, union_handle, layout.tag_index);
}
@@ -5068,9 +5069,9 @@ const NavGen = struct {
// Note: The result here is not cached, because it generates runtime code.
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const union_ty = mod.typeToUnion(ty).?;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const union_ty = zcu.typeToUnion(ty).?;
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
if (union_ty.flagsUnordered(ip).layout == .@"packed") {
@@ -5082,7 +5083,7 @@ const NavGen = struct {
const tag_int = if (layout.tag_size != 0) blk: {
const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field);
const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
- break :blk tag_int_val.toUnsignedInt(pt);
+ break :blk tag_int_val.toUnsignedInt(zcu);
} else 0;
if (!layout.has_payload) {
@@ -5099,7 +5100,7 @@ const NavGen = struct {
}
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
- if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function);
@@ -5123,15 +5124,15 @@ const NavGen = struct {
fn airUnionInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const ty = self.typeOfIndex(inst);
- const union_obj = mod.typeToUnion(ty).?;
+ const union_obj = zcu.typeToUnion(ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
- const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(pt))
+ const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu))
try self.resolve(extra.init)
else
null;
@@ -5140,23 +5141,23 @@ const NavGen = struct {
fn airStructFieldVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const object_ty = self.typeOf(struct_field.struct_operand);
const object_id = try self.resolve(struct_field.struct_operand);
const field_index = struct_field.field_index;
- const field_ty = object_ty.structFieldType(field_index, mod);
+ const field_ty = object_ty.structFieldType(field_index, zcu);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return null;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
- switch (object_ty.zigTypeTag(mod)) {
- .Struct => switch (object_ty.containerLayout(mod)) {
+ switch (object_ty.zigTypeTag(zcu)) {
+ .Struct => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => return try self.extractField(field_ty, object_id, field_index),
},
- .Union => switch (object_ty.containerLayout(mod)) {
+ .Union => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => {
// Store, ptr-elem-ptr, pointer-cast, load
@@ -5185,16 +5186,16 @@ const NavGen = struct {
fn airFieldParentPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
- const parent_ty = ty_pl.ty.toType().childType(mod);
+ const parent_ty = ty_pl.ty.toType().childType(zcu);
const result_ty_id = try self.resolveType(ty_pl.ty.toType(), .indirect);
const field_ptr = try self.resolve(extra.field_ptr);
const field_ptr_int = try self.intFromPtr(field_ptr);
- const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
const base_ptr_int = base_ptr_int: {
if (field_offset == 0) break :base_ptr_int field_ptr_int;
@@ -5319,10 +5320,10 @@ const NavGen = struct {
}
fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
- assert(ptr_ty.ptrAddressSpace(mod) == .generic);
- const child_ty = ptr_ty.childType(mod);
+ assert(ptr_ty.ptrAddressSpace(zcu) == .generic);
+ const child_ty = ptr_ty.childType(zcu);
return try self.alloc(child_ty, .{});
}
@@ -5494,9 +5495,9 @@ const NavGen = struct {
// ir.Block in a different SPIR-V block.
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty = self.typeOfIndex(inst);
- const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(pt);
+ const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu);
const cf = switch (self.control_flow) {
.structured => |*cf| cf,
@@ -5570,7 +5571,7 @@ const NavGen = struct {
const sblock = cf.block_stack.getLast();
- if (ty.isNoReturn(mod)) {
+ if (ty.isNoReturn(zcu)) {
// If this block is noreturn, this instruction is the last of a block,
// and we must simply jump to the block's merge unconditionally.
try self.structuredBreak(next_block);
@@ -5626,13 +5627,13 @@ const NavGen = struct {
}
fn airBr(self: *NavGen, inst: Air.Inst.Index) !void {
- const pt = self.pt;
+ const zcu = self.pt.zcu;
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const operand_ty = self.typeOf(br.operand);
switch (self.control_flow) {
.structured => |*cf| {
- if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
const operand_id = try self.resolve(br.operand);
const block_result_var_id = cf.block_results.get(br.block_inst).?;
try self.store(operand_ty, block_result_var_id, operand_id, .{});
@@ -5643,7 +5644,7 @@ const NavGen = struct {
},
.unstructured => |cf| {
const block = cf.blocks.get(br.block_inst).?;
- if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
const operand_id = try self.resolve(br.operand);
// current_block_label should not be undefined here, lest there
// is a br or br_void in the function's body.
@@ -5770,35 +5771,35 @@ const NavGen = struct {
}
fn airLoad(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = self.typeOf(ty_op.operand);
const elem_ty = self.typeOfIndex(inst);
const operand = try self.resolve(ty_op.operand);
- if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
+ if (!ptr_ty.isVolatilePtr(zcu) and self.liveness.isUnused(inst)) return null;
- return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+ return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
}
fn airStore(self: *NavGen, inst: Air.Inst.Index) !void {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType(mod);
+ const elem_ty = ptr_ty.childType(zcu);
const ptr = try self.resolve(bin_op.lhs);
const value = try self.resolve(bin_op.rhs);
- try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+ try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
}
fn airRet(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ret_ty = self.typeOf(operand);
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?;
- if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const fn_info = zcu.typeToFunc(zcu.navValue(self.owner_nav).typeOf(zcu)).?;
+ if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5815,14 +5816,14 @@ const NavGen = struct {
fn airRetLoad(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ptr_ty = self.typeOf(un_op);
- const ret_ty = ptr_ty.childType(mod);
+ const ret_ty = ptr_ty.childType(zcu);
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?;
- if (Type.fromInterned(fn_info.return_type).isError(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const fn_info = zcu.typeToFunc(zcu.navValue(self.owner_nav).typeOf(zcu)).?;
+ if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5834,14 +5835,14 @@ const NavGen = struct {
}
const ptr = try self.resolve(un_op);
- const value = try self.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
+ const value = try self.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{
.value = value,
});
}
fn airTry(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union_id = try self.resolve(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
@@ -5854,7 +5855,7 @@ const NavGen = struct {
const eu_layout = self.errorUnionLayout(payload_ty);
- if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
const err_id = if (eu_layout.payload_has_bits)
try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex())
else
@@ -5911,18 +5912,18 @@ const NavGen = struct {
}
fn airErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const err_union_ty = self.typeOf(ty_op.operand);
const err_ty_id = try self.resolveType(Type.anyerror, .direct);
- if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
// No error possible, so just return undefined.
return try self.spv.constUndef(err_ty_id);
}
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const eu_layout = self.errorUnionLayout(payload_ty);
if (!eu_layout.payload_has_bits) {
@@ -5947,10 +5948,10 @@ const NavGen = struct {
}
fn airWrapErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_union_ty = self.typeOfIndex(inst);
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const operand_id = try self.resolve(ty_op.operand);
const eu_layout = self.errorUnionLayout(payload_ty);
@@ -5995,28 +5996,28 @@ const NavGen = struct {
fn airIsNull(self: *NavGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand_id = try self.resolve(un_op);
const operand_ty = self.typeOf(un_op);
- const optional_ty = if (is_pointer) operand_ty.childType(mod) else operand_ty;
- const payload_ty = optional_ty.optionalChild(mod);
+ const optional_ty = if (is_pointer) operand_ty.childType(zcu) else operand_ty;
+ const payload_ty = optional_ty.optionalChild(zcu);
const bool_ty_id = try self.resolveType(Type.bool, .direct);
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
// Pointer payload represents nullability: pointer or slice.
const loaded_id = if (is_pointer)
try self.load(optional_ty, operand_id, .{})
else
operand_id;
- const ptr_ty = if (payload_ty.isSlice(mod))
- payload_ty.slicePtrFieldType(mod)
+ const ptr_ty = if (payload_ty.isSlice(zcu))
+ payload_ty.slicePtrFieldType(zcu)
else
payload_ty;
- const ptr_id = if (payload_ty.isSlice(mod))
+ const ptr_id = if (payload_ty.isSlice(zcu))
try self.extractField(ptr_ty, loaded_id, 0)
else
loaded_id;
@@ -6036,8 +6037,8 @@ const NavGen = struct {
const is_non_null_id = blk: {
if (is_pointer) {
- if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod));
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(zcu));
const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class);
const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1});
break :blk try self.load(Type.bool, tag_ptr_id, .{});
@@ -6046,7 +6047,7 @@ const NavGen = struct {
break :blk try self.load(Type.bool, operand_id, .{});
}
- break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(pt))
+ break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
try self.extractField(Type.bool, operand_id, 1)
else
// Optional representation is bool indicating whether the optional is set
@@ -6071,16 +6072,16 @@ const NavGen = struct {
}
fn airIsErr(self: *NavGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand_id = try self.resolve(un_op);
const err_union_ty = self.typeOf(un_op);
- if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
return try self.constBool(pred == .is_non_err, .direct);
}
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const eu_layout = self.errorUnionLayout(payload_ty);
const bool_ty_id = try self.resolveType(Type.bool, .direct);
@@ -6105,15 +6106,15 @@ const NavGen = struct {
fn airUnwrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand);
const payload_ty = self.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) return null;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
return operand_id;
}
@@ -6122,22 +6123,22 @@ const NavGen = struct {
fn airUnwrapOptionalPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const optional_ty = operand_ty.childType(mod);
- const payload_ty = optional_ty.optionalChild(mod);
+ const optional_ty = operand_ty.childType(zcu);
+ const payload_ty = optional_ty.optionalChild(zcu);
const result_ty = self.typeOfIndex(inst);
const result_ty_id = try self.resolveType(result_ty, .direct);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// There is no payload, but we still need to return a valid pointer.
// We can just return anything here, so just return a pointer to the operand.
return try self.bitCast(result_ty, operand_ty, operand_id);
}
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
// They are the same value.
return try self.bitCast(result_ty, operand_ty, operand_id);
}
@@ -6147,18 +6148,18 @@ const NavGen = struct {
fn airWrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const payload_ty = self.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return try self.constBool(true, .indirect);
}
const operand_id = try self.resolve(ty_op.operand);
const optional_ty = self.typeOfIndex(inst);
- if (optional_ty.optionalReprIsPayload(mod)) {
+ if (optional_ty.optionalReprIsPayload(zcu)) {
return operand_id;
}
@@ -6170,7 +6171,7 @@ const NavGen = struct {
fn airSwitchBr(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const target = self.getTarget();
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond_ty = self.typeOf(pl_op.operand);
@@ -6178,18 +6179,18 @@ const NavGen = struct {
var cond_indirect = try self.convertToIndirect(cond_ty, cond);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
- const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) {
+ const cond_words: u32 = switch (cond_ty.zigTypeTag(zcu)) {
.Bool, .ErrorSet => 1,
.Int => blk: {
- const bits = cond_ty.intInfo(mod).bits;
+ const bits = cond_ty.intInfo(zcu).bits;
const backing_bits = self.backingIntBits(bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) 1 else 2;
},
.Enum => blk: {
- const int_ty = cond_ty.intTagType(mod);
- const int_info = int_ty.intInfo(mod);
+ const int_ty = cond_ty.intTagType(zcu);
+ const int_info = int_ty.intInfo(zcu);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
return self.todo("implement composite int switch", .{});
};
@@ -6200,7 +6201,7 @@ const NavGen = struct {
break :blk target.ptrBitWidth() / 32;
},
// TODO: Figure out which types apply here, and work around them as we can only do integers.
- else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}),
+ else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(zcu))}),
};
const num_cases = switch_br.data.cases_len;
@@ -6255,14 +6256,14 @@ const NavGen = struct {
for (items) |item| {
const value = (try self.air.value(item, pt)) orelse unreachable;
- const int_val: u64 = switch (cond_ty.zigTypeTag(mod)) {
- .Bool, .Int => if (cond_ty.isSignedInt(mod)) @bitCast(value.toSignedInt(pt)) else value.toUnsignedInt(pt),
+ const int_val: u64 = switch (cond_ty.zigTypeTag(zcu)) {
+ .Bool, .Int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu),
.Enum => blk: {
// TODO: figure out of cond_ty is correct (something with enum literals)
- break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(pt); // TODO: composite integer constants
+ break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(zcu); // TODO: composite integer constants
},
- .ErrorSet => value.getErrorInt(mod),
- .Pointer => value.toUnsignedInt(pt),
+ .ErrorSet => value.getErrorInt(zcu),
+ .Pointer => value.toUnsignedInt(zcu),
else => unreachable,
};
const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
@@ -6343,9 +6344,9 @@ const NavGen = struct {
fn airDbgStmt(self: *NavGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
- const path = mod.navFileScope(self.owner_nav).sub_file_path;
+ const path = zcu.navFileScope(self.owner_nav).sub_file_path;
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = try self.spv.resolveString(path),
.line = self.base_line + dbg_stmt.line + 1,
@@ -6354,12 +6355,12 @@ const NavGen = struct {
}
fn airDbgInlineBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const inst_datas = self.air.instructions.items(.data);
const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload);
const old_base_line = self.base_line;
defer self.base_line = old_base_line;
- self.base_line = mod.navSrcLine(mod.funcInfo(extra.data.func).owner_nav);
+ self.base_line = zcu.navSrcLine(zcu.funcInfo(extra.data.func).owner_nav);
return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
}
@@ -6371,7 +6372,7 @@ const NavGen = struct {
}
fn airAssembly(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const mod = self.pt.zcu;
+ const zcu = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
@@ -6453,20 +6454,20 @@ const NavGen = struct {
// TODO: Translate proper error locations.
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
- const src_loc = mod.navSrcLoc(self.owner_nav);
- self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
- const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len);
+ const src_loc = zcu.navSrcLoc(self.owner_nav);
+ self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
+ const notes = try zcu.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len);
// Sub-scope to prevent `return error.CodegenFail` from running the errdefers.
{
- errdefer mod.gpa.free(notes);
+ errdefer zcu.gpa.free(notes);
var i: usize = 0;
errdefer for (notes[0..i]) |*note| {
- note.deinit(mod.gpa);
+ note.deinit(zcu.gpa);
};
while (i < as.errors.items.len) : (i += 1) {
- notes[i] = try Zcu.ErrorMsg.init(mod.gpa, src_loc, "{s}", .{as.errors.items[i].msg});
+ notes[i] = try Zcu.ErrorMsg.init(zcu.gpa, src_loc, "{s}", .{as.errors.items[i].msg});
}
}
self.error_msg.?.notes = notes;
@@ -6503,17 +6504,17 @@ const NavGen = struct {
_ = modifier;
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = self.typeOf(pl_op.operand);
- const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
+ const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
.Fn => callee_ty,
.Pointer => return self.fail("cannot call function pointers", .{}),
else => unreachable,
};
- const fn_info = mod.typeToFunc(zig_fn_ty).?;
+ const fn_info = zcu.typeToFunc(zig_fn_ty).?;
const return_type = fn_info.return_type;
const result_type_id = try self.resolveFnReturnType(Type.fromInterned(return_type));
@@ -6529,7 +6530,7 @@ const NavGen = struct {
// before starting to emit OpFunctionCall instructions. Hence the
// temporary params buffer.
const arg_ty = self.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const arg_id = try self.resolve(arg);
params[n_params] = arg_id;
@@ -6547,7 +6548,7 @@ const NavGen = struct {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
- if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(pt)) {
+ if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(zcu)) {
return null;
}
@@ -6604,12 +6605,12 @@ const NavGen = struct {
}
fn typeOf(self: *NavGen, inst: Air.Inst.Ref) Type {
- const mod = self.pt.zcu;
- return self.air.typeOf(inst, &mod.intern_pool);
+ const zcu = self.pt.zcu;
+ return self.air.typeOf(inst, &zcu.intern_pool);
}
fn typeOfIndex(self: *NavGen, inst: Air.Inst.Index) Type {
- const mod = self.pt.zcu;
- return self.air.typeOfIndex(inst, &mod.intern_pool);
+ const zcu = self.pt.zcu;
+ return self.air.typeOfIndex(inst, &zcu.intern_pool);
}
};