aboutsummaryrefslogtreecommitdiff
path: root/src/arch/x86_64/CodeGen.zig
diff options
context:
space:
mode:
authorDavid Rubin <daviru007@icloud.com>2024-08-11 03:14:12 -0700
committerDavid Rubin <daviru007@icloud.com>2024-08-25 15:16:42 -0700
commitb4bb64ce78bf2dee9437f366a362ef4d8c77b204 (patch)
tree218658e59522e59a432b6e9adea9f1993c0fb51d /src/arch/x86_64/CodeGen.zig
parent849c31a6cc3d1e554f97c2ccf7aaa886070cfadd (diff)
downloadzig-b4bb64ce78bf2dee9437f366a362ef4d8c77b204.tar.gz
zig-b4bb64ce78bf2dee9437f366a362ef4d8c77b204.zip
sema: rework type resolution to use Zcu when possible
Diffstat (limited to 'src/arch/x86_64/CodeGen.zig')
-rw-r--r--src/arch/x86_64/CodeGen.zig1448
1 files changed, 726 insertions, 722 deletions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 316389a7c2..94814b70f6 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -732,14 +732,14 @@ const FrameAlloc = struct {
.ref_count = 0,
};
}
- fn initType(ty: Type, pt: Zcu.PerThread) FrameAlloc {
+ fn initType(ty: Type, zcu: *Zcu) FrameAlloc {
return init(.{
- .size = ty.abiSize(pt),
- .alignment = ty.abiAlignment(pt),
+ .size = ty.abiSize(zcu),
+ .alignment = ty.abiAlignment(zcu),
});
}
- fn initSpill(ty: Type, pt: Zcu.PerThread) FrameAlloc {
- const abi_size = ty.abiSize(pt);
+ fn initSpill(ty: Type, zcu: *Zcu) FrameAlloc {
+ const abi_size = ty.abiSize(zcu);
const spill_size = if (abi_size < 8)
math.ceilPowerOfTwoAssert(u64, abi_size)
else
@@ -747,7 +747,7 @@ const FrameAlloc = struct {
return init(.{
.size = spill_size,
.pad = @intCast(spill_size - abi_size),
- .alignment = ty.abiAlignment(pt).maxStrict(
+ .alignment = ty.abiAlignment(zcu).maxStrict(
Alignment.fromNonzeroByteUnits(@min(spill_size, 8)),
),
});
@@ -756,7 +756,7 @@ const FrameAlloc = struct {
const StackAllocation = struct {
inst: ?Air.Inst.Index,
- /// TODO do we need size? should be determined by inst.ty.abiSize(pt)
+ /// TODO do we need size? should be determined by inst.ty.abiSize(zcu)
size: u32,
};
@@ -859,11 +859,11 @@ pub fn generate(
function.args = call_info.args;
function.ret_mcv = call_info.return_value;
function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(pt),
- .alignment = Type.usize.abiAlignment(pt).min(call_info.stack_align),
+ .size = Type.usize.abiSize(zcu),
+ .alignment = Type.usize.abiAlignment(zcu).min(call_info.stack_align),
}));
function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(pt),
+ .size = Type.usize.abiSize(zcu),
.alignment = Alignment.min(
call_info.stack_align,
Alignment.fromNonzeroByteUnits(function.target.stackAlignment()),
@@ -1872,8 +1872,8 @@ fn asmMemoryRegisterImmediate(
fn gen(self: *Self) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const fn_info = mod.typeToFunc(self.fn_type).?;
+ const zcu = pt.zcu;
+ const fn_info = zcu.typeToFunc(self.fn_type).?;
const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*);
if (cc != .Naked) {
try self.asmRegister(.{ ._, .push }, .rbp);
@@ -1890,7 +1890,7 @@ fn gen(self: *Self) InnerError!void {
// The address where to store the return value for the caller is in a
// register which the callee is free to clobber. Therefore, we purposely
// spill it to stack immediately.
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(Type.usize, zcu));
try self.genSetMem(
.{ .frame = frame_index },
0,
@@ -2099,8 +2099,8 @@ fn checkInvariantsAfterAirInst(self: *Self, inst: Air.Inst.Index, old_air_bookke
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const air_tags = self.air.instructions.items(.tag);
self.arg_index = 0;
@@ -2370,9 +2370,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu)) {
.Enum => {
const enum_ty = Type.fromInterned(lazy_sym.ty);
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
@@ -2385,7 +2385,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
const ret_reg = param_regs[0];
const enum_mcv = MCValue{ .register = param_regs[1] };
- const exitlude_jump_relocs = try self.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(mod));
+ const exitlude_jump_relocs = try self.gpa.alloc(Mir.Inst.Index, enum_ty.enumFieldCount(zcu));
defer self.gpa.free(exitlude_jump_relocs);
const data_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
@@ -2394,7 +2394,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty.toIntern() });
var data_off: i32 = 0;
- const tag_names = enum_ty.enumFields(mod);
+ const tag_names = enum_ty.enumFields(zcu);
for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| {
const tag_name_len = tag_names.get(ip)[tag_index].length(ip);
const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index));
@@ -2630,14 +2630,14 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
- const val_ty = ptr_ty.childType(mod);
+ const val_ty = ptr_ty.childType(zcu);
return self.allocFrameIndex(FrameAlloc.init(.{
- .size = math.cast(u32, val_ty.abiSize(pt)) orelse {
+ .size = math.cast(u32, val_ty.abiSize(zcu)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(pt)});
},
- .alignment = ptr_ty.ptrAlignment(pt).max(.@"1"),
+ .alignment = ptr_ty.ptrAlignment(zcu).max(.@"1"),
}));
}
@@ -2651,20 +2651,20 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue {
fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- const abi_size = math.cast(u32, ty.abiSize(pt)) orelse {
+ const zcu = pt.zcu;
+ const abi_size = math.cast(u32, ty.abiSize(zcu)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(pt)});
};
if (reg_ok) need_mem: {
- if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) {
+ if (abi_size <= @as(u32, switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(self.target.*)) {
16, 32, 64, 128 => 16,
80 => break :need_mem,
else => unreachable,
},
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16,
80 => break :need_mem,
else => unreachable,
@@ -2679,21 +2679,21 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b
}
}
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, zcu));
return .{ .load_frame = .{ .index = frame_index } };
}
fn regClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet {
const pt = self.pt;
- const mod = pt.zcu;
- return switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ return switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(self.target.*)) {
80 => abi.RegisterClass.x87,
else => abi.RegisterClass.sse,
},
- .Vector => switch (ty.childType(mod).toIntern()) {
+ .Vector => switch (ty.childType(zcu).toIntern()) {
.bool_type, .u1_type => abi.RegisterClass.gp,
- else => if (ty.isAbiInt(mod) and ty.intInfo(mod).bits == 1)
+ else => if (ty.isAbiInt(zcu) and ty.intInfo(zcu).bits == 1)
abi.RegisterClass.gp
else
abi.RegisterClass.sse,
@@ -3001,13 +3001,13 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_scalar_ty = dst_ty.scalarType(mod);
+ const dst_scalar_ty = dst_ty.scalarType(zcu);
const dst_bits = dst_scalar_ty.floatBits(self.target.*);
const src_ty = self.typeOf(ty_op.operand);
- const src_scalar_ty = src_ty.scalarType(mod);
+ const src_scalar_ty = src_ty.scalarType(zcu);
const src_bits = src_scalar_ty.floatBits(self.target.*);
const result = result: {
@@ -3032,7 +3032,7 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
},
else => unreachable,
}) {
- if (dst_ty.isVector(mod)) break :result null;
+ if (dst_ty.isVector(zcu)) break :result null;
var callee_buf: ["__extend?f?f2".len]u8 = undefined;
break :result try self.genCall(.{ .lib = .{
.return_type = self.floatCompilerRtAbiType(dst_scalar_ty, src_scalar_ty).toIntern(),
@@ -3044,18 +3044,18 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
} }, &.{src_scalar_ty}, &.{.{ .air_ref = ty_op.operand }});
}
- const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
const dst_reg = dst_mcv.getReg().?;
- const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(pt), 16)));
+ const dst_alias = registerAlias(dst_reg, @intCast(@max(dst_ty.abiSize(zcu), 16)));
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const vec_len = if (dst_ty.isVector(mod)) dst_ty.vectorLen(mod) else 1;
+ const vec_len = if (dst_ty.isVector(zcu)) dst_ty.vectorLen(zcu) else 1;
if (src_bits == 16) {
assert(self.hasFeature(.f16c));
const mat_src_reg = if (src_mcv.isRegister())
@@ -3137,30 +3137,30 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
const dst_ty = self.typeOfIndex(inst);
const result = @as(?MCValue, result: {
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
- const src_int_info = src_ty.intInfo(mod);
- const dst_int_info = dst_ty.intInfo(mod);
+ const src_int_info = src_ty.intInfo(zcu);
+ const dst_int_info = dst_ty.intInfo(zcu);
const extend = switch (src_int_info.signedness) {
.signed => dst_int_info,
.unsigned => src_int_info,
}.signedness;
const src_mcv = try self.resolveInst(ty_op.operand);
- if (dst_ty.isVector(mod)) {
- const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ if (dst_ty.isVector(zcu)) {
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
const max_abi_size = @max(dst_abi_size, src_abi_size);
if (max_abi_size > @as(u32, if (self.hasFeature(.avx2)) 32 else 16)) break :result null;
const has_avx = self.hasFeature(.avx);
- const dst_elem_abi_size = dst_ty.childType(mod).abiSize(pt);
- const src_elem_abi_size = src_ty.childType(mod).abiSize(pt);
+ const dst_elem_abi_size = dst_ty.childType(zcu).abiSize(zcu);
+ const src_elem_abi_size = src_ty.childType(zcu).abiSize(zcu);
switch (math.order(dst_elem_abi_size, src_elem_abi_size)) {
.lt => {
const mir_tag: Mir.Inst.FixedTag = switch (dst_elem_abi_size) {
@@ -3396,13 +3396,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -3414,7 +3414,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
src_mcv
else if (dst_abi_size <= 8)
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv)
- else if (dst_abi_size <= 16 and !dst_ty.isVector(mod)) dst: {
+ else if (dst_abi_size <= 16 and !dst_ty.isVector(zcu)) dst: {
const dst_regs =
try self.register_manager.allocRegs(2, .{ inst, inst }, abi.RegisterClass.gp);
const dst_mcv: MCValue = .{ .register_pair = dst_regs };
@@ -3429,16 +3429,16 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
break :dst dst_mcv;
};
- if (dst_ty.zigTypeTag(mod) == .Vector) {
- assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod));
- const dst_elem_ty = dst_ty.childType(mod);
- const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(pt));
- const src_elem_ty = src_ty.childType(mod);
- const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(pt));
+ if (dst_ty.zigTypeTag(zcu) == .Vector) {
+ assert(src_ty.zigTypeTag(zcu) == .Vector and dst_ty.vectorLen(zcu) == src_ty.vectorLen(zcu));
+ const dst_elem_ty = dst_ty.childType(zcu);
+ const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(zcu));
+ const src_elem_ty = src_ty.childType(zcu);
+ const src_elem_abi_size: u32 = @intCast(src_elem_ty.abiSize(zcu));
const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_elem_abi_size) {
1 => switch (src_elem_abi_size) {
- 2 => switch (dst_ty.vectorLen(mod)) {
+ 2 => switch (dst_ty.vectorLen(zcu)) {
1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw },
9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null,
else => null,
@@ -3446,7 +3446,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
else => null,
},
2 => switch (src_elem_abi_size) {
- 4 => switch (dst_ty.vectorLen(mod)) {
+ 4 => switch (dst_ty.vectorLen(zcu)) {
1...4 => if (self.hasFeature(.avx))
.{ .vp_w, .ackusd }
else if (self.hasFeature(.sse4_1))
@@ -3461,8 +3461,8 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
else => null,
}) orelse return self.fail("TODO implement airTrunc for {}", .{dst_ty.fmt(pt)});
- const dst_info = dst_elem_ty.intInfo(mod);
- const src_info = src_elem_ty.intInfo(mod);
+ const dst_info = dst_elem_ty.intInfo(zcu);
+ const src_info = src_elem_ty.intInfo(zcu);
const mask_val = try pt.intValue(src_elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(64 - dst_info.bits));
@@ -3470,7 +3470,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
.len = @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
.child = src_elem_ty.ip_index,
});
- const splat_abi_size: u32 = @intCast(splat_ty.abiSize(pt));
+ const splat_abi_size: u32 = @intCast(splat_ty.abiSize(zcu));
const splat_val = try pt.intern(.{ .aggregate = .{
.ty = splat_ty.ip_index,
@@ -3528,7 +3528,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
try self.truncateRegister(dst_ty, dst_mcv.register.to64());
}
} else if (dst_abi_size <= 16) {
- const dst_info = dst_ty.intInfo(mod);
+ const dst_info = dst_ty.intInfo(zcu);
const high_ty = try pt.intType(dst_info.signedness, dst_info.bits - 64);
if (self.regExtraBits(high_ty) > 0) {
try self.truncateRegister(high_ty, dst_mcv.register_pair[1].to64());
@@ -3554,12 +3554,12 @@ fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
- const pt = self.pt;
+ const zcu = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOfIndex(inst);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
const ptr_ty = self.typeOf(bin_op.lhs);
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, .{ .air_ref = bin_op.lhs }, .{});
@@ -3567,7 +3567,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len_ty = self.typeOf(bin_op.rhs);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(ptr_ty.abiSize(pt)),
+ @intCast(ptr_ty.abiSize(zcu)),
len_ty,
.{ .air_ref = bin_op.rhs },
.{},
@@ -3585,14 +3585,14 @@ fn airUnOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dst_mcv = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
const dst_ty = self.typeOfIndex(inst);
- if (dst_ty.isAbiInt(mod)) {
- const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
- const bit_size: u32 = @intCast(dst_ty.bitSize(pt));
+ if (dst_ty.isAbiInt(zcu)) {
+ const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
+ const bit_size: u32 = @intCast(dst_ty.bitSize(zcu));
if (abi_size * 8 > bit_size) {
const dst_lock = switch (dst_mcv) {
.register => |dst_reg| self.register_manager.lockRegAssumeUnused(dst_reg),
@@ -3607,7 +3607,7 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(pt) - 1) % 64 + 1));
+ const hi_ty = try pt.intType(.unsigned, @intCast((dst_ty.bitSize(zcu) - 1) % 64 + 1));
const hi_mcv = dst_mcv.address().offset(@intCast(bit_size / 64 * 8)).deref();
try self.genSetReg(tmp_reg, hi_ty, hi_mcv, .{});
try self.truncateRegister(dst_ty, tmp_reg);
@@ -3627,17 +3627,17 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const air_tag = self.air.instructions.items(.tag);
const air_data = self.air.instructions.items(.data);
const dst_ty = self.typeOf(dst_air);
- const dst_info = dst_ty.intInfo(mod);
+ const dst_info = dst_ty.intInfo(zcu);
if (dst_air.toIndex()) |inst| {
switch (air_tag[@intFromEnum(inst)]) {
.intcast => {
const src_ty = self.typeOf(air_data[@intFromEnum(inst)].ty_op.operand);
- const src_info = src_ty.intInfo(mod);
+ const src_info = src_ty.intInfo(zcu);
return @min(switch (src_info.signedness) {
.signed => switch (dst_info.signedness) {
.signed => src_info.bits,
@@ -3653,7 +3653,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
}
} else if (dst_air.toInterned()) |ip_index| {
var space: Value.BigIntSpace = undefined;
- const src_int = Value.fromInterned(ip_index).toBigInt(&space, pt);
+ const src_int = Value.fromInterned(ip_index).toBigInt(&space, zcu);
return @as(u16, @intCast(src_int.bitCountTwosComp())) +
@intFromBool(src_int.positive and dst_info.signedness == .signed);
}
@@ -3662,18 +3662,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result = result: {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const dst_ty = self.typeOfIndex(inst);
- switch (dst_ty.zigTypeTag(mod)) {
+ switch (dst_ty.zigTypeTag(zcu)) {
.Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs),
else => {},
}
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
- const dst_info = dst_ty.intInfo(mod);
+ const dst_info = dst_ty.intInfo(zcu);
const src_ty = try pt.intType(dst_info.signedness, switch (tag) {
else => unreachable,
.mul, .mul_wrap => @max(
@@ -3683,20 +3683,20 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
),
.div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits,
});
- const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
if (dst_abi_size == 16 and src_abi_size == 16) switch (tag) {
else => unreachable,
.mul, .mul_wrap => {},
.div_trunc, .div_floor, .div_exact, .rem, .mod => {
- const signed = dst_ty.isSignedInt(mod);
+ const signed = dst_ty.isSignedInt(zcu);
var callee_buf: ["__udiv?i3".len]u8 = undefined;
const signed_div_floor_state: struct {
frame_index: FrameIndex,
state: State,
reloc: Mir.Inst.Index,
} = if (signed and tag == .div_floor) state: {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, zcu));
try self.asmMemoryImmediate(
.{ ._, .mov },
.{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } },
@@ -3771,7 +3771,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
.rem, .mod => "mod",
else => unreachable,
},
- intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))),
+ intCompilerRtAbiName(@intCast(dst_ty.bitSize(zcu))),
}) catch unreachable,
} },
&.{ src_ty, src_ty },
@@ -3800,7 +3800,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
.return_type = dst_ty.toIntern(),
.param_types = &.{ src_ty.toIntern(), src_ty.toIntern() },
.callee = std.fmt.bufPrint(&callee_buf, "__div{c}i3", .{
- intCompilerRtAbiName(@intCast(dst_ty.bitSize(pt))),
+ intCompilerRtAbiName(@intCast(dst_ty.bitSize(zcu))),
}) catch unreachable,
} },
&.{ src_ty, src_ty },
@@ -3892,10 +3892,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
- if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
+ if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
"TODO implement airAddSat for {}",
.{ty.fmt(pt)},
);
@@ -3923,7 +3923,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const reg_bits = self.regBitSize(ty);
const reg_extra_bits = self.regExtraBits(ty);
- const cc: Condition = if (ty.isSignedInt(mod)) cc: {
+ const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
if (reg_extra_bits > 0) {
try self.genShiftBinOpMir(
.{ ._l, .sa },
@@ -3962,7 +3962,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .o;
} else cc: {
try self.genSetReg(limit_reg, ty, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(pt)),
+ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(zcu)),
}, .{});
try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
@@ -3973,14 +3973,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
try self.asmCmovccRegisterRegister(
cc,
registerAlias(dst_reg, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
);
- if (reg_extra_bits > 0 and ty.isSignedInt(mod)) try self.genShiftBinOpMir(
+ if (reg_extra_bits > 0 and ty.isSignedInt(zcu)) try self.genShiftBinOpMir(
.{ ._r, .sa },
ty,
dst_mcv,
@@ -3993,10 +3993,10 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
- if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
+ if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
"TODO implement airSubSat for {}",
.{ty.fmt(pt)},
);
@@ -4024,7 +4024,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const reg_bits = self.regBitSize(ty);
const reg_extra_bits = self.regExtraBits(ty);
- const cc: Condition = if (ty.isSignedInt(mod)) cc: {
+ const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
if (reg_extra_bits > 0) {
try self.genShiftBinOpMir(
.{ ._l, .sa },
@@ -4067,14 +4067,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
try self.asmCmovccRegisterRegister(
cc,
registerAlias(dst_reg, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
);
- if (reg_extra_bits > 0 and ty.isSignedInt(mod)) try self.genShiftBinOpMir(
+ if (reg_extra_bits > 0 and ty.isSignedInt(zcu)) try self.genShiftBinOpMir(
.{ ._r, .sa },
ty,
dst_mcv,
@@ -4087,7 +4087,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
@@ -4170,7 +4170,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}
- if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(pt) > 8) return self.fail(
+ if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
"TODO implement airMulSat for {}",
.{ty.fmt(pt)},
);
@@ -4199,7 +4199,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(limit_lock);
const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt(mod)) cc: {
+ const cc: Condition = if (ty.isSignedInt(zcu)) cc: {
try self.genSetReg(limit_reg, ty, lhs_mcv, .{});
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
try self.genShiftBinOpMir(
@@ -4221,7 +4221,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
};
const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
try self.asmCmovccRegisterRegister(
cc,
registerAlias(dst_mcv.register, cmov_abi_size),
@@ -4234,13 +4234,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = result: {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty = self.typeOf(bin_op.lhs);
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}),
.Int => {
try self.spillEflagsIfOccupied();
@@ -4253,7 +4253,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.sub_with_overflow => .sub,
else => unreachable,
}, bin_op.lhs, bin_op.rhs);
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const cc: Condition = switch (int_info.signedness) {
.unsigned => .c,
.signed => .o,
@@ -4270,17 +4270,17 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, pt)),
+ @intCast(tuple_ty.structFieldOffset(1, zcu)),
Type.u1,
.{ .eflags = cc },
.{},
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(0, pt)),
+ @intCast(tuple_ty.structFieldOffset(0, zcu)),
ty,
partial_mcv,
.{},
@@ -4289,7 +4289,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -4301,13 +4301,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = result: {
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
- switch (lhs_ty.zigTypeTag(mod)) {
+ switch (lhs_ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}),
.Int => {
try self.spillEflagsIfOccupied();
@@ -4318,7 +4318,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const int_info = lhs_ty.intInfo(mod);
+ const int_info = lhs_ty.intInfo(zcu);
const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
const partial_lock = switch (partial_mcv) {
@@ -4348,18 +4348,18 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, pt)),
- tuple_ty.structFieldType(1, mod),
+ @intCast(tuple_ty.structFieldOffset(1, zcu)),
+ tuple_ty.structFieldType(1, zcu),
.{ .eflags = cc },
.{},
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(0, pt)),
- tuple_ty.structFieldType(0, mod),
+ @intCast(tuple_ty.structFieldOffset(0, zcu)),
+ tuple_ty.structFieldType(0, zcu),
partial_mcv,
.{},
);
@@ -4367,7 +4367,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+ try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -4385,15 +4385,15 @@ fn genSetFrameTruncatedOverflowCompare(
overflow_cc: ?Condition,
) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const src_lock = switch (src_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
};
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
- const ty = tuple_ty.structFieldType(0, mod);
- const int_info = ty.intInfo(mod);
+ const ty = tuple_ty.structFieldType(0, zcu);
+ const int_info = ty.intInfo(zcu);
const hi_bits = (int_info.bits - 1) % 64 + 1;
const hi_ty = try pt.intType(int_info.signedness, hi_bits);
@@ -4432,7 +4432,7 @@ fn genSetFrameTruncatedOverflowCompare(
);
}
- const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, pt));
+ const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, zcu));
if (hi_limb_off > 0) try self.genSetMem(
.{ .frame = frame_index },
payload_off,
@@ -4449,8 +4449,8 @@ fn genSetFrameTruncatedOverflowCompare(
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, pt)),
- tuple_ty.structFieldType(1, mod),
+ @intCast(tuple_ty.structFieldOffset(1, zcu)),
+ tuple_ty.structFieldType(1, zcu),
if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
.{},
);
@@ -4458,18 +4458,18 @@ fn genSetFrameTruncatedOverflowCompare(
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const tuple_ty = self.typeOfIndex(inst);
const dst_ty = self.typeOf(bin_op.lhs);
- const result: MCValue = switch (dst_ty.zigTypeTag(mod)) {
+ const result: MCValue = switch (dst_ty.zigTypeTag(zcu)) {
.Vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(pt)}),
.Int => result: {
- const dst_info = dst_ty.intInfo(mod);
+ const dst_info = dst_ty.intInfo(zcu);
if (dst_info.bits > 128 and dst_info.signedness == .unsigned) {
const slow_inc = self.hasFeature(.slow_incdec);
- const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
const limb_len = math.divCeil(u32, abi_size, 8) catch unreachable;
try self.spillRegisters(&.{ .rax, .rcx, .rdx });
@@ -4480,7 +4480,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemset(
dst_mcv.address(),
.{ .immediate = 0 },
- .{ .immediate = tuple_ty.abiSize(pt) },
+ .{ .immediate = tuple_ty.abiSize(zcu) },
.{},
);
const lhs_mcv = try self.resolveInst(bin_op.lhs);
@@ -4520,7 +4520,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.index = temp_regs[3].to64(),
.scale = .@"8",
.disp = dst_mcv.load_frame.off +
- @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
} },
}, .rdx);
try self.asmSetccRegister(.c, .cl);
@@ -4544,7 +4544,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.index = temp_regs[3].to64(),
.scale = .@"8",
.disp = dst_mcv.load_frame.off +
- @as(i32, @intCast(tuple_ty.structFieldOffset(0, pt))),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(0, zcu))),
} },
}, .rax);
try self.asmSetccRegister(.c, .ch);
@@ -4593,7 +4593,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.mod = .{ .rm = .{
.size = .byte,
.disp = dst_mcv.load_frame.off +
- @as(i32, @intCast(tuple_ty.structFieldOffset(1, pt))),
+ @as(i32, @intCast(tuple_ty.structFieldOffset(1, zcu))),
} },
}, Immediate.u(1));
self.performReloc(no_overflow);
@@ -4636,8 +4636,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(0, pt)),
- tuple_ty.structFieldType(0, mod),
+ @intCast(tuple_ty.structFieldOffset(0, zcu)),
+ tuple_ty.structFieldType(0, zcu),
result,
.{},
);
@@ -4648,8 +4648,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(1, pt)),
- tuple_ty.structFieldType(1, mod),
+ @intCast(tuple_ty.structFieldOffset(1, zcu)),
+ tuple_ty.structFieldType(1, zcu),
.{ .eflags = .ne },
.{},
);
@@ -4760,15 +4760,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(0, pt)),
- tuple_ty.structFieldType(0, mod),
+ @intCast(tuple_ty.structFieldOffset(0, zcu)),
+ tuple_ty.structFieldType(0, zcu),
.{ .register_pair = .{ .rax, .rdx } },
.{},
);
try self.genSetMem(
.{ .frame = dst_mcv.load_frame.index },
- @intCast(tuple_ty.structFieldOffset(1, pt)),
- tuple_ty.structFieldType(1, mod),
+ @intCast(tuple_ty.structFieldOffset(1, zcu)),
+ tuple_ty.structFieldType(1, zcu),
.{ .register = tmp_regs[1] },
.{},
);
@@ -4800,7 +4800,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
self.eflags_inst = inst;
break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } };
} else {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -4811,19 +4811,19 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
src_ty.fmt(pt), dst_ty.fmt(pt),
});
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(tuple_ty, zcu));
if (dst_info.bits >= lhs_active_bits + rhs_active_bits) {
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(0, pt)),
- tuple_ty.structFieldType(0, mod),
+ @intCast(tuple_ty.structFieldOffset(0, zcu)),
+ tuple_ty.structFieldType(0, zcu),
partial_mcv,
.{},
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(tuple_ty.structFieldOffset(1, pt)),
- tuple_ty.structFieldType(1, mod),
+ @intCast(tuple_ty.structFieldOffset(1, zcu)),
+ tuple_ty.structFieldType(1, zcu),
.{ .immediate = 0 }, // cc being set is impossible
.{},
);
@@ -4847,7 +4847,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
/// Quotient is saved in .rax and remainder in .rdx.
fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void {
const pt = self.pt;
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const abi_size: u32 = @intCast(ty.abiSize(pt.zcu));
const bit_size: u32 = @intCast(self.regBitSize(ty));
if (abi_size > 8) {
return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{});
@@ -4897,9 +4897,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
/// Clobbers .rax and .rdx registers.
fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- const abi_size: u32 = @intCast(ty.abiSize(pt));
- const int_info = ty.intInfo(mod);
+ const zcu = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
+ const int_info = ty.intInfo(zcu);
const dividend = switch (lhs) {
.register => |reg| reg,
else => try self.copyToTmpRegister(ty, lhs),
@@ -4950,7 +4950,7 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const air_tags = self.air.instructions.items(.tag);
@@ -4958,7 +4958,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
const result: MCValue = result: {
- switch (lhs_ty.zigTypeTag(mod)) {
+ switch (lhs_ty.zigTypeTag(zcu)) {
.Int => {
try self.spillRegisters(&.{.rcx});
try self.register_manager.getKnownReg(.rcx, null);
@@ -4977,7 +4977,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const lhs_bits: u31 = @intCast(lhs_ty.bitSize(pt));
+ const lhs_bits: u31 = @intCast(lhs_ty.bitSize(zcu));
const tmp_ty = if (lhs_bits > 64) Type.usize else lhs_ty;
const off = frame_addr.off + (lhs_bits - 1) / 64 * 8;
try self.genSetReg(
@@ -5001,14 +5001,14 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
}
break :result dst_mcv;
},
- .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
- .Int => if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.childType(mod).intInfo(mod).bits) {
+ .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .Int => if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
else => null,
- 16 => switch (lhs_ty.vectorLen(mod)) {
+ 16 => switch (lhs_ty.vectorLen(zcu)) {
else => null,
1...8 => switch (tag) {
else => unreachable,
- .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .sra }
else
@@ -5025,18 +5025,18 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
},
9...16 => switch (tag) {
else => unreachable,
- .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .sra } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .srl } else null,
},
.shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_w, .sll } else null,
},
},
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
else => null,
1...4 => switch (tag) {
else => unreachable,
- .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .sra }
else
@@ -5053,18 +5053,18 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
},
5...8 => switch (tag) {
else => unreachable,
- .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .sra } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .srl } else null,
},
.shl, .shl_exact => if (self.hasFeature(.avx2)) .{ .vp_d, .sll } else null,
},
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
else => null,
1...2 => switch (tag) {
else => unreachable,
- .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_q, .sra }
else
@@ -5081,7 +5081,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
},
3...4 => switch (tag) {
else => unreachable,
- .shr, .shr_exact => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .shr, .shr_exact => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_q, .sra } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_q, .srl } else null,
},
@@ -5089,10 +5089,10 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
},
},
})) |mir_tag| if (try self.air.value(bin_op.rhs, pt)) |rhs_val| {
- switch (mod.intern_pool.indexToKey(rhs_val.toIntern())) {
+ switch (zcu.intern_pool.indexToKey(rhs_val.toIntern())) {
.aggregate => |rhs_aggregate| switch (rhs_aggregate.storage) {
.repeated_elem => |rhs_elem| {
- const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
const lhs_mcv = try self.resolveInst(bin_op.lhs);
const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
@@ -5112,7 +5112,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
self.register_manager.unlockReg(lock);
const shift_imm =
- Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(pt)));
+ Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(zcu)));
if (self.hasFeature(.avx)) try self.asmRegisterRegisterImmediate(
mir_tag,
registerAlias(dst_reg, abi_size),
@@ -5134,7 +5134,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
}
} else if (bin_op.rhs.toIndex()) |rhs_inst| switch (air_tags[@intFromEnum(rhs_inst)]) {
.splat => {
- const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
const lhs_mcv = try self.resolveInst(bin_op.lhs);
const dst_reg, const lhs_reg = if (lhs_mcv.isRegister() and
@@ -5161,7 +5161,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const mask_mcv = try self.genTypedValue(Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
- (try rhs_ty.childType(mod).maxIntScalar(pt, Type.u8)).toIntern(),
+ (try rhs_ty.childType(zcu).maxIntScalar(pt, Type.u8)).toIntern(),
} ++ [1]InternPool.Index{
(try pt.intValue(Type.u8, 0)).toIntern(),
} ** 15) },
@@ -5224,11 +5224,11 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
- const pt = self.pt;
+ const zcu = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const pl_ty = self.typeOfIndex(inst);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
const opt_mcv = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
@@ -5271,15 +5271,15 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = result: {
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
- const opt_ty = src_ty.childType(mod);
+ const opt_ty = src_ty.childType(zcu);
const src_mcv = try self.resolveInst(ty_op.operand);
- if (opt_ty.optionalReprIsPayload(mod)) {
+ if (opt_ty.optionalReprIsPayload(zcu)) {
break :result if (self.liveness.isUnused(inst))
.unreach
else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
@@ -5296,8 +5296,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
else
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
- const pl_ty = dst_ty.childType(mod);
- const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
+ const pl_ty = dst_ty.childType(zcu);
+ const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
try self.genSetMem(
.{ .reg = dst_mcv.getReg().? },
pl_abi_size,
@@ -5312,23 +5312,23 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const err_union_ty = self.typeOf(ty_op.operand);
- const err_ty = err_union_ty.errorUnionSet(mod);
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const err_ty = err_union_ty.errorUnionSet(zcu);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const operand = try self.resolveInst(ty_op.operand);
const result: MCValue = result: {
- if (err_ty.errorSetIsEmpty(mod)) {
+ if (err_ty.errorSetIsEmpty(zcu)) {
break :result MCValue{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :result operand;
}
- const err_off = errUnionErrorOffset(payload_ty, pt);
+ const err_off = errUnionErrorOffset(payload_ty, zcu);
switch (operand) {
.register => |reg| {
// TODO reuse operand
@@ -5366,7 +5366,7 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
// *(E!T) -> E
fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
@@ -5383,11 +5383,11 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const eu_ty = src_ty.childType(mod);
- const pl_ty = eu_ty.errorUnionPayload(mod);
- const err_ty = eu_ty.errorUnionSet(mod);
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
- const err_abi_size: u32 = @intCast(err_ty.abiSize(pt));
+ const eu_ty = src_ty.childType(zcu);
+ const pl_ty = eu_ty.errorUnionPayload(zcu);
+ const err_ty = eu_ty.errorUnionSet(zcu);
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
+ const err_abi_size: u32 = @intCast(err_ty.abiSize(zcu));
try self.asmRegisterMemory(
.{ ._, .mov },
registerAlias(dst_reg, err_abi_size),
@@ -5414,7 +5414,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const src_ty = self.typeOf(ty_op.operand);
@@ -5426,11 +5426,11 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
defer self.register_manager.unlockReg(src_lock);
- const eu_ty = src_ty.childType(mod);
- const pl_ty = eu_ty.errorUnionPayload(mod);
- const err_ty = eu_ty.errorUnionSet(mod);
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
- const err_abi_size: u32 = @intCast(err_ty.abiSize(pt));
+ const eu_ty = src_ty.childType(zcu);
+ const pl_ty = eu_ty.errorUnionPayload(zcu);
+ const err_ty = eu_ty.errorUnionSet(zcu);
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
+ const err_abi_size: u32 = @intCast(err_ty.abiSize(zcu));
try self.asmMemoryImmediate(
.{ ._, .mov },
.{
@@ -5453,8 +5453,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -5475,13 +5475,13 @@ fn genUnwrapErrUnionPayloadMir(
err_union: MCValue,
) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const zcu = pt.zcu;
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const result: MCValue = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
- const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, pt));
+ const payload_off: u31 = @intCast(errUnionPayloadOffset(payload_ty, zcu));
switch (err_union) {
.load_frame => |frame_addr| break :result .{ .load_frame = .{
.index = frame_addr.index,
@@ -5525,12 +5525,12 @@ fn genUnwrapErrUnionPayloadPtrMir(
ptr_mcv: MCValue,
) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- const err_union_ty = ptr_ty.childType(mod);
- const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const zcu = pt.zcu;
+ const err_union_ty = ptr_ty.childType(zcu);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
const result: MCValue = result: {
- const payload_off = errUnionPayloadOffset(payload_ty, pt);
+ const payload_off = errUnionPayloadOffset(payload_ty, zcu);
const result_mcv: MCValue = if (maybe_inst) |inst|
try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr_mcv)
else
@@ -5560,15 +5560,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
const pl_ty = self.typeOf(ty_op.operand);
- if (!pl_ty.hasRuntimeBits(pt)) break :result .{ .immediate = 1 };
+ if (!pl_ty.hasRuntimeBits(zcu)) break :result .{ .immediate = 1 };
const opt_ty = self.typeOfIndex(inst);
const pl_mcv = try self.resolveInst(ty_op.operand);
- const same_repr = opt_ty.optionalReprIsPayload(mod);
+ const same_repr = opt_ty.optionalReprIsPayload(zcu);
if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv;
const pl_lock: ?RegisterLock = switch (pl_mcv) {
@@ -5581,7 +5581,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
try self.genCopy(pl_ty, opt_mcv, pl_mcv, .{});
if (!same_repr) {
- const pl_abi_size: i32 = @intCast(pl_ty.abiSize(pt));
+ const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu));
switch (opt_mcv) {
else => unreachable,
@@ -5615,20 +5615,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const eu_ty = ty_op.ty.toType();
- const pl_ty = eu_ty.errorUnionPayload(mod);
- const err_ty = eu_ty.errorUnionSet(mod);
+ const pl_ty = eu_ty.errorUnionPayload(zcu);
+ const err_ty = eu_ty.errorUnionSet(zcu);
const operand = try self.resolveInst(ty_op.operand);
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .{ .immediate = 0 };
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .{ .immediate = 0 };
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand, .{});
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }, .{});
break :result .{ .load_frame = .{ .index = frame_index } };
@@ -5639,19 +5639,19 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const eu_ty = ty_op.ty.toType();
- const pl_ty = eu_ty.errorUnionPayload(mod);
- const err_ty = eu_ty.errorUnionSet(mod);
+ const pl_ty = eu_ty.errorUnionPayload(zcu);
+ const err_ty = eu_ty.errorUnionSet(zcu);
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result try self.resolveInst(ty_op.operand);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result try self.resolveInst(ty_op.operand);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, pt));
- const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
- const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(eu_ty, zcu));
+ const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef, .{});
const operand = try self.resolveInst(ty_op.operand);
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand, .{});
@@ -5719,7 +5719,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -5767,7 +5767,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi
fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const slice_ty = self.typeOf(lhs);
const slice_mcv = try self.resolveInst(lhs);
const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) {
@@ -5776,9 +5776,9 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
};
defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_ty = slice_ty.childType(mod);
- const elem_size = elem_ty.abiSize(pt);
- const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
+ const elem_ty = slice_ty.childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
+ const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
const index_ty = self.typeOf(rhs);
const index_mcv = try self.resolveInst(rhs);
@@ -5804,15 +5804,15 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = result: {
const elem_ty = self.typeOfIndex(inst);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
const slice_ty = self.typeOf(bin_op.lhs);
- const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod);
+ const slice_ptr_field_type = slice_ty.slicePtrFieldType(zcu);
const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs);
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.load(dst_mcv, slice_ptr_field_type, elem_ptr);
@@ -5830,12 +5830,12 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const result: MCValue = result: {
const array_ty = self.typeOf(bin_op.lhs);
- const elem_ty = array_ty.childType(mod);
+ const elem_ty = array_ty.childType(zcu);
const array_mcv = try self.resolveInst(bin_op.lhs);
const array_lock: ?RegisterLock = switch (array_mcv) {
@@ -5853,7 +5853,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
try self.spillEflagsIfOccupied();
- if (array_ty.isVector(mod) and elem_ty.bitSize(pt) == 1) {
+ if (array_ty.isVector(zcu) and elem_ty.bitSize(zcu) == 1) {
const index_reg = switch (index_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(index_ty, index_mcv),
@@ -5866,7 +5866,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
index_reg.to64(),
),
.sse => {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu));
try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{});
try self.asmMemoryRegister(
.{ ._, .bt },
@@ -5904,14 +5904,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
break :result .{ .register = dst_reg };
}
- const elem_abi_size = elem_ty.abiSize(pt);
+ const elem_abi_size = elem_ty.abiSize(zcu);
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
defer self.register_manager.unlockReg(addr_lock);
switch (array_mcv) {
.register => {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, zcu));
try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array_mcv, .{});
try self.asmRegisterMemory(
.{ ._, .lea },
@@ -5960,7 +5960,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
@@ -5968,10 +5968,10 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
// additional `mov` is needed at the end to get the actual value
const result = result: {
- const elem_ty = ptr_ty.elemType2(mod);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+ const elem_ty = ptr_ty.elemType2(zcu);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
- const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
+ const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const index_ty = self.typeOf(bin_op.rhs);
const index_mcv = try self.resolveInst(bin_op.rhs);
const index_lock = switch (index_mcv) {
@@ -6011,7 +6011,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -6026,15 +6026,15 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (base_ptr_lock) |lock| self.register_manager.unlockReg(lock);
- if (elem_ptr_ty.ptrInfo(mod).flags.vector_index != .none) {
+ if (elem_ptr_ty.ptrInfo(zcu).flags.vector_index != .none) {
break :result if (self.reuseOperand(inst, extra.lhs, 0, base_ptr_mcv))
base_ptr_mcv
else
try self.copyToRegisterWithInstTracking(inst, elem_ptr_ty, base_ptr_mcv);
}
- const elem_ty = base_ptr_ty.elemType2(mod);
- const elem_abi_size = elem_ty.abiSize(pt);
+ const elem_ty = base_ptr_ty.elemType2(zcu);
+ const elem_abi_size = elem_ty.abiSize(zcu);
const index_ty = self.typeOf(extra.rhs);
const index_mcv = try self.resolveInst(extra.rhs);
const index_lock: ?RegisterLock = switch (index_mcv) {
@@ -6057,12 +6057,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_union_ty = self.typeOf(bin_op.lhs);
- const union_ty = ptr_union_ty.childType(mod);
+ const union_ty = ptr_union_ty.childType(zcu);
const tag_ty = self.typeOf(bin_op.rhs);
- const layout = union_ty.unionGetLayout(pt);
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) {
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -6101,12 +6101,12 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
}
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
- const pt = self.pt;
+ const zcu = self.pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const tag_ty = self.typeOfIndex(inst);
const union_ty = self.typeOf(ty_op.operand);
- const layout = union_ty.unionGetLayout(pt);
+ const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) {
return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none });
@@ -6120,7 +6120,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
- const tag_abi_size = tag_ty.abiSize(pt);
+ const tag_abi_size = tag_ty.abiSize(zcu);
const dst_mcv: MCValue = blk: {
switch (operand) {
.load_frame => |frame_addr| {
@@ -6159,14 +6159,14 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = result: {
try self.spillEflagsIfOccupied();
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
- if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{
+ if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement airClz for {}", .{
src_ty.fmt(pt),
});
@@ -6186,8 +6186,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const abi_size: u31 = @intCast(src_ty.abiSize(pt));
- const src_bits: u31 = @intCast(src_ty.bitSize(pt));
+ const abi_size: u31 = @intCast(src_ty.abiSize(zcu));
+ const src_bits: u31 = @intCast(src_ty.bitSize(zcu));
const has_lzcnt = self.hasFeature(.lzcnt);
if (src_bits > @as(u32, if (has_lzcnt) 128 else 64)) {
const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable;
@@ -6297,7 +6297,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
}
assert(src_bits <= 64);
- const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(zcu))), 2);
if (math.isPowerOfTwo(src_bits)) {
const imm_reg = try self.copyToTmpRegister(dst_ty, .{
.immediate = src_bits ^ (src_bits - 1),
@@ -6356,14 +6356,14 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = result: {
try self.spillEflagsIfOccupied();
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
- if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airCtz for {}", .{
+ if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement airCtz for {}", .{
src_ty.fmt(pt),
});
@@ -6383,8 +6383,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const abi_size: u31 = @intCast(src_ty.abiSize(pt));
- const src_bits: u31 = @intCast(src_ty.bitSize(pt));
+ const abi_size: u31 = @intCast(src_ty.abiSize(zcu));
+ const src_bits: u31 = @intCast(src_ty.bitSize(zcu));
const has_bmi = self.hasFeature(.bmi);
if (src_bits > @as(u32, if (has_bmi) 128 else 64)) {
const limbs_len = math.divCeil(u32, abi_size, 8) catch unreachable;
@@ -6505,7 +6505,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .bsf }, wide_ty, dst_mcv, .{ .register = wide_reg });
} else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv);
- const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(pt))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(zcu))), 2);
try self.asmCmovccRegisterRegister(
.z,
registerAlias(dst_reg, cmov_abi_size),
@@ -6518,14 +6518,14 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = result: {
try self.spillEflagsIfOccupied();
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
- if (src_ty.zigTypeTag(mod) == .Vector or src_abi_size > 16)
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
+ if (src_ty.zigTypeTag(zcu) == .Vector or src_abi_size > 16)
return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(pt)});
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -6562,7 +6562,7 @@ fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
mat_src_mcv
else
.{ .register = mat_src_mcv.register_pair[0] }, false);
- const src_info = src_ty.intInfo(mod);
+ const src_info = src_ty.intInfo(zcu);
const hi_ty = try pt.intType(src_info.signedness, (src_info.bits - 1) % 64 + 1);
try self.genPopCount(tmp_regs[1], hi_ty, if (mat_src_mcv.isMemory())
mat_src_mcv.address().offset(8).deref()
@@ -6583,7 +6583,7 @@ fn genPopCount(
) !void {
const pt = self.pt;
- const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(pt.zcu));
if (self.hasFeature(.popcnt)) return self.genBinOpMir(
.{ ._, .popcnt },
if (src_abi_size > 1) src_ty else Type.u32,
@@ -6674,11 +6674,11 @@ fn genByteSwap(
mem_ok: bool,
) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const has_movbe = self.hasFeature(.movbe);
- if (src_ty.zigTypeTag(mod) == .Vector) return self.fail(
+ if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail(
"TODO implement genByteSwap for {}",
.{src_ty.fmt(pt)},
);
@@ -6689,7 +6689,7 @@ fn genByteSwap(
};
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
- const abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(src_ty.abiSize(zcu));
switch (abi_size) {
0 => unreachable,
1 => return if ((mem_ok or src_mcv.isRegister()) and
@@ -6838,35 +6838,35 @@ fn genByteSwap(
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
- const src_bits: u32 = @intCast(src_ty.bitSize(pt));
+ const src_bits: u32 = @intCast(src_ty.bitSize(zcu));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, true);
try self.genShiftBinOpMir(
- .{ ._r, switch (if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned) {
+ .{ ._r, switch (if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned) {
.signed => .sa,
.unsigned => .sh,
} },
src_ty,
dst_mcv,
if (src_bits > 256) Type.u16 else Type.u8,
- .{ .immediate = src_ty.abiSize(pt) * 8 - src_bits },
+ .{ .immediate = src_ty.abiSize(zcu) * 8 - src_bits },
);
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const src_ty = self.typeOf(ty_op.operand);
- const abi_size: u32 = @intCast(src_ty.abiSize(pt));
- const bit_size: u32 = @intCast(src_ty.bitSize(pt));
+ const abi_size: u32 = @intCast(src_ty.abiSize(zcu));
+ const bit_size: u32 = @intCast(src_ty.bitSize(zcu));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, false);
@@ -6973,7 +6973,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
const extra_bits = abi_size * 8 - bit_size;
const signedness: std.builtin.Signedness =
- if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
+ if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned;
if (extra_bits > 0) try self.genShiftBinOpMir(switch (signedness) {
.signed => .{ ._r, .sa },
.unsigned => .{ ._r, .sh },
@@ -6984,13 +6984,13 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const result = result: {
- const scalar_bits = ty.scalarType(mod).floatBits(self.target.*);
+ const scalar_bits = ty.scalarType(zcu).floatBits(self.target.*);
if (scalar_bits == 80) {
- if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement floatSign for {}", .{
+ if (ty.zigTypeTag(zcu) != .Float) return self.fail("TODO implement floatSign for {}", .{
ty.fmt(pt),
});
@@ -7011,7 +7011,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
break :result dst_mcv;
}
- const abi_size: u32 = switch (ty.abiSize(pt)) {
+ const abi_size: u32 = switch (ty.abiSize(zcu)) {
1...16 => 16,
17...32 => 32,
else => return self.fail("TODO implement floatSign for {}", .{
@@ -7161,23 +7161,23 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: RoundMode) !void {
fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag {
const pt = self.pt;
- const mod = pt.zcu;
- return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen(mod)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(zcu)) {
1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round },
5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null,
else => null,
},
- 64 => switch (ty.vectorLen(mod)) {
+ 64 => switch (ty.vectorLen(zcu)) {
1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round },
3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null,
@@ -7194,10 +7194,10 @@ fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag {
fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
if (self.getRoundTag(ty)) |_| return .none;
- if (ty.zigTypeTag(mod) != .Float)
+ if (ty.zigTypeTag(zcu) != .Float)
return self.fail("TODO implement genRound for {}", .{ty.fmt(pt)});
var callee_buf: ["__trunc?".len]u8 = undefined;
@@ -7223,7 +7223,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro
const result = try self.genRoundLibcall(ty, src_mcv, mode);
return self.genSetReg(dst_reg, ty, result, .{});
};
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const abi_size: u32 = @intCast(ty.abiSize(pt.zcu));
const dst_alias = registerAlias(dst_reg, abi_size);
switch (mir_tag[0]) {
.v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate(
@@ -7261,14 +7261,14 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro
fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = self.typeOf(ty_op.operand);
const result: MCValue = result: {
- const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) {
+ const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
else => null,
- .Int => switch (ty.abiSize(pt)) {
+ .Int => switch (ty.abiSize(zcu)) {
0 => unreachable,
1...8 => {
try self.spillEflagsIfOccupied();
@@ -7277,7 +7277,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
try self.genUnOpMir(.{ ._, .neg }, ty, dst_mcv);
- const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(pt))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(zcu))), 2);
switch (src_mcv) {
.register => |val_reg| try self.asmCmovccRegisterRegister(
.l,
@@ -7336,7 +7336,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
},
else => {
- const abi_size: u31 = @intCast(ty.abiSize(pt));
+ const abi_size: u31 = @intCast(ty.abiSize(zcu));
const limb_len = math.divCeil(u31, abi_size, 8) catch unreachable;
const tmp_regs =
@@ -7397,11 +7397,11 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
},
},
.Float => return self.floatSign(inst, ty_op.operand, ty),
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
else => null,
- .Int => switch (ty.childType(mod).intInfo(mod).bits) {
+ .Int => switch (ty.childType(zcu).intInfo(zcu).bits) {
else => null,
- 8 => switch (ty.vectorLen(mod)) {
+ 8 => switch (ty.vectorLen(zcu)) {
else => null,
1...16 => if (self.hasFeature(.avx))
.{ .vp_b, .abs }
@@ -7411,7 +7411,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
null,
17...32 => if (self.hasFeature(.avx2)) .{ .vp_b, .abs } else null,
},
- 16 => switch (ty.vectorLen(mod)) {
+ 16 => switch (ty.vectorLen(zcu)) {
else => null,
1...8 => if (self.hasFeature(.avx))
.{ .vp_w, .abs }
@@ -7421,7 +7421,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
null,
9...16 => if (self.hasFeature(.avx2)) .{ .vp_w, .abs } else null,
},
- 32 => switch (ty.vectorLen(mod)) {
+ 32 => switch (ty.vectorLen(zcu)) {
else => null,
1...4 => if (self.hasFeature(.avx))
.{ .vp_d, .abs }
@@ -7436,7 +7436,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
},
}) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_reg = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv.getReg().?
@@ -7462,13 +7462,13 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const ty = self.typeOf(un_op);
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
const result: MCValue = result: {
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Float => {
const float_bits = ty.floatBits(self.target.*);
if (switch (float_bits) {
@@ -7500,7 +7500,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) {
+ const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(self.target.*)) {
16 => {
assert(self.hasFeature(.f16c));
@@ -7522,9 +7522,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
64 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
else => unreachable,
},
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
- 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(zcu)) {
1 => {
try self.asmRegisterRegister(
.{ .v_ps, .cvtph2 },
@@ -7575,13 +7575,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
},
else => null,
} else null,
- 32 => switch (ty.vectorLen(mod)) {
+ 32 => switch (ty.vectorLen(zcu)) {
1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt },
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt },
5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null,
else => null,
},
- 64 => switch (ty.vectorLen(mod)) {
+ 64 => switch (ty.vectorLen(zcu)) {
1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt },
3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null,
@@ -7708,14 +7708,14 @@ fn reuseOperandAdvanced(
fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
const val_ty = Type.fromInterned(ptr_info.child);
- if (!val_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
- const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+ if (!val_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
+ const val_abi_size: u32 = @intCast(val_ty.abiSize(zcu));
- const val_bit_size: u32 = @intCast(val_ty.bitSize(pt));
+ const val_bit_size: u32 = @intCast(val_ty.bitSize(zcu));
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
@@ -7821,9 +7821,9 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const dst_ty = ptr_ty.childType(mod);
- if (!dst_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
+ const zcu = pt.zcu;
+ const dst_ty = ptr_ty.childType(zcu);
+ if (!dst_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
switch (ptr_mcv) {
.none,
.unreach,
@@ -7864,18 +7864,18 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = self.typeOfIndex(inst);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr_ty = self.typeOf(ty_op.operand);
- const elem_size = elem_ty.abiSize(pt);
+ const elem_size = elem_ty.abiSize(zcu);
const elem_rc = self.regClassForType(elem_ty);
const ptr_rc = self.regClassForType(ptr_ty);
@@ -7888,14 +7888,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
else
try self.allocRegOrMem(inst, true);
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) {
try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv);
} else {
try self.load(dst_mcv, ptr_ty, ptr_mcv);
}
- if (elem_ty.isAbiInt(mod) and elem_size * 8 > elem_ty.bitSize(pt)) {
+ if (elem_ty.isAbiInt(zcu) and elem_size * 8 > elem_ty.bitSize(zcu)) {
const high_mcv: MCValue = switch (dst_mcv) {
.register => |dst_reg| .{ .register = dst_reg },
.register_pair => |dst_regs| .{ .register = dst_regs[1] },
@@ -7923,16 +7923,16 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const zcu = pt.zcu;
+ const ptr_info = ptr_ty.ptrInfo(zcu);
const src_ty = Type.fromInterned(ptr_info.child);
- if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const limb_ty = try pt.intType(.unsigned, limb_abi_bits);
- const src_bit_size = src_ty.bitSize(pt);
+ const src_bit_size = src_ty.bitSize(zcu);
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
@@ -8029,9 +8029,9 @@ fn store(
opts: CopyOptions,
) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const src_ty = ptr_ty.childType(mod);
- if (!src_ty.hasRuntimeBitsIgnoreComptime(pt)) return;
+ const zcu = pt.zcu;
+ const src_ty = ptr_ty.childType(zcu);
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
switch (ptr_mcv) {
.none,
.unreach,
@@ -8072,7 +8072,7 @@ fn store(
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
result: {
@@ -8086,7 +8086,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const ptr_mcv = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) {
try self.packedStore(ptr_ty, ptr_mcv, src_mcv);
} else {
@@ -8111,16 +8111,16 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ptr_field_ty = self.typeOfIndex(inst);
const ptr_container_ty = self.typeOf(operand);
- const container_ty = ptr_container_ty.childType(mod);
+ const container_ty = ptr_container_ty.childType(zcu);
- const field_off: i32 = switch (container_ty.containerLayout(mod)) {
- .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, pt)),
- .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(mod).packed_offset.bit_offset) +
- (if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
- ptr_field_ty.ptrInfo(mod).packed_offset.bit_offset, 8),
+ const field_off: i32 = switch (container_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, zcu)),
+ .@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(zcu).packed_offset.bit_offset) +
+ (if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
+ ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
};
const src_mcv = try self.resolveInst(operand);
@@ -8134,7 +8134,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const result: MCValue = result: {
@@ -8143,15 +8143,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const container_ty = self.typeOf(operand);
const container_rc = self.regClassForType(container_ty);
- const field_ty = container_ty.structFieldType(index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) break :result .none;
+ const field_ty = container_ty.structFieldType(index, zcu);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
const field_rc = self.regClassForType(field_ty);
const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp);
const src_mcv = try self.resolveInst(operand);
- const field_off: u32 = switch (container_ty.containerLayout(mod)) {
- .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, pt) * 8),
- .@"packed" => if (mod.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0,
+ const field_off: u32 = switch (container_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(extra.field_index, zcu) * 8),
+ .@"packed" => if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0,
};
switch (src_mcv) {
@@ -8182,7 +8182,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
);
}
if (abi.RegisterClass.gp.isSet(RegisterManager.indexOfRegIntoTracked(dst_reg).?) and
- container_ty.abiSize(pt) * 8 > field_ty.bitSize(pt))
+ container_ty.abiSize(zcu) * 8 > field_ty.bitSize(zcu))
try self.truncateRegister(field_ty, dst_reg);
break :result if (field_off == 0 or field_rc.supersetOf(abi.RegisterClass.gp))
@@ -8194,7 +8194,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const src_regs_lock = self.register_manager.lockRegsAssumeUnused(2, src_regs);
defer for (src_regs_lock) |lock| self.register_manager.unlockReg(lock);
- const field_bit_size: u32 = @intCast(field_ty.bitSize(pt));
+ const field_bit_size: u32 = @intCast(field_ty.bitSize(zcu));
const src_reg = if (field_off + field_bit_size <= 64)
src_regs[0]
else if (field_off >= 64)
@@ -8293,15 +8293,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
},
.load_frame => |frame_addr| {
- const field_abi_size: u32 = @intCast(field_ty.abiSize(pt));
+ const field_abi_size: u32 = @intCast(field_ty.abiSize(zcu));
if (field_off % 8 == 0) {
const field_byte_off = @divExact(field_off, 8);
const off_mcv = src_mcv.address().offset(@intCast(field_byte_off)).deref();
- const field_bit_size = field_ty.bitSize(pt);
+ const field_bit_size = field_ty.bitSize(zcu);
if (field_abi_size <= 8) {
const int_ty = try pt.intType(
- if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned,
+ if (field_ty.isAbiInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned,
@intCast(field_bit_size),
);
@@ -8321,7 +8321,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv);
}
- const container_abi_size: u32 = @intCast(container_ty.abiSize(pt));
+ const container_abi_size: u32 = @intCast(container_ty.abiSize(zcu));
const dst_mcv = if (field_byte_off + field_abi_size <= container_abi_size and
self.reuseOperand(inst, operand, 0, src_mcv))
off_mcv
@@ -8423,17 +8423,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const inst_ty = self.typeOfIndex(inst);
- const parent_ty = inst_ty.childType(mod);
- const field_off: i32 = switch (parent_ty.containerLayout(mod)) {
- .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, pt)),
- .@"packed" => @divExact(@as(i32, inst_ty.ptrInfo(mod).packed_offset.bit_offset) +
- (if (mod.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) -
- self.typeOf(extra.field_ptr).ptrInfo(mod).packed_offset.bit_offset, 8),
+ const parent_ty = inst_ty.childType(zcu);
+ const field_off: i32 = switch (parent_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => @intCast(parent_ty.structFieldOffset(extra.field_index, zcu)),
+ .@"packed" => @divExact(@as(i32, inst_ty.ptrInfo(zcu).packed_offset.bit_offset) +
+ (if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, extra.field_index) else 0) -
+ self.typeOf(extra.field_ptr).ptrInfo(zcu).packed_offset.bit_offset, 8),
};
const src_mcv = try self.resolveInst(extra.field_ptr);
@@ -8448,9 +8448,9 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const src_ty = self.typeOf(src_air);
- if (src_ty.zigTypeTag(mod) == .Vector)
+ if (src_ty.zigTypeTag(zcu) == .Vector)
return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(pt)});
var src_mcv = try self.resolveInst(src_air);
@@ -8486,14 +8486,14 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
};
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const abi_size: u16 = @intCast(src_ty.abiSize(pt));
+ const abi_size: u16 = @intCast(src_ty.abiSize(zcu));
switch (tag) {
.not => {
const limb_abi_size: u16 = @min(abi_size, 8);
const int_info = if (src_ty.ip_index == .bool_type)
std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 }
else
- src_ty.intInfo(mod);
+ src_ty.intInfo(zcu);
var byte_off: i32 = 0;
while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) {
const limb_bits: u16 = @intCast(@min(switch (int_info.signedness) {
@@ -8514,7 +8514,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
},
.neg => {
try self.genUnOpMir(.{ ._, .neg }, src_ty, dst_mcv);
- const bit_size = src_ty.intInfo(mod).bits;
+ const bit_size = src_ty.intInfo(zcu).bits;
if (abi_size * 8 > bit_size) {
if (dst_mcv.isRegister()) {
try self.truncateRegister(src_ty, dst_mcv.getReg().?);
@@ -8537,7 +8537,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void {
const pt = self.pt;
- const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(pt) });
switch (dst_mcv) {
.none,
@@ -8586,8 +8586,9 @@ fn genShiftBinOpMir(
rhs_mcv: MCValue,
) !void {
const pt = self.pt;
- const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
- const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(pt));
+ const zcu = pt.zcu;
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
+ const shift_abi_size: u32 = @intCast(rhs_ty.abiSize(zcu));
try self.spillEflagsIfOccupied();
if (abi_size > 16) {
@@ -9243,8 +9244,8 @@ fn genShiftBinOp(
rhs_ty: Type,
) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- if (lhs_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{
+ const zcu = pt.zcu;
+ if (lhs_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{
lhs_ty.fmt(pt),
});
@@ -9274,7 +9275,7 @@ fn genShiftBinOp(
break :dst dst_mcv;
};
- const signedness = lhs_ty.intInfo(mod).signedness;
+ const signedness = lhs_ty.intInfo(zcu).signedness;
try self.genShiftBinOpMir(switch (air_tag) {
.shl, .shl_exact => switch (signedness) {
.signed => .{ ._l, .sa },
@@ -9302,13 +9303,13 @@ fn genMulDivBinOp(
rhs_mcv: MCValue,
) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) return self.fail(
+ const zcu = pt.zcu;
+ if (dst_ty.zigTypeTag(zcu) == .Vector or dst_ty.zigTypeTag(zcu) == .Float) return self.fail(
"TODO implement genMulDivBinOp for {s} from {} to {}",
.{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) },
);
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
- const src_abi_size: u32 = @intCast(src_ty.abiSize(pt));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
assert(self.register_manager.isRegFree(.rax));
assert(self.register_manager.isRegFree(.rcx));
@@ -9384,7 +9385,7 @@ fn genMulDivBinOp(
.mul, .mul_wrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
.div_trunc, .div_floor, .div_exact, .rem, .mod => dst_abi_size != src_abi_size,
} or src_abi_size > 8) {
- const src_info = src_ty.intInfo(mod);
+ const src_info = src_ty.intInfo(zcu);
switch (tag) {
.mul, .mul_wrap => {
const slow_inc = self.hasFeature(.slow_incdec);
@@ -9555,7 +9556,7 @@ fn genMulDivBinOp(
const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
- const signedness = ty.intInfo(mod).signedness;
+ const signedness = ty.intInfo(zcu).signedness;
switch (tag) {
.mul,
.mul_wrap,
@@ -9714,10 +9715,10 @@ fn genBinOp(
rhs_air: Air.Inst.Ref,
) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const lhs_ty = self.typeOf(lhs_air);
const rhs_ty = self.typeOf(rhs_air);
- const abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
if (lhs_ty.isRuntimeFloat()) libcall: {
const float_bits = lhs_ty.floatBits(self.target.*);
@@ -9889,23 +9890,23 @@ fn genBinOp(
};
}
- const sse_op = switch (lhs_ty.zigTypeTag(mod)) {
+ const sse_op = switch (lhs_ty.zigTypeTag(zcu)) {
else => false,
.Float => true,
- .Vector => switch (lhs_ty.childType(mod).toIntern()) {
+ .Vector => switch (lhs_ty.childType(zcu).toIntern()) {
.bool_type, .u1_type => false,
else => true,
},
};
- if (sse_op and ((lhs_ty.scalarType(mod).isRuntimeFloat() and
- lhs_ty.scalarType(mod).floatBits(self.target.*) == 80) or
- lhs_ty.abiSize(pt) > @as(u6, if (self.hasFeature(.avx)) 32 else 16)))
+ if (sse_op and ((lhs_ty.scalarType(zcu).isRuntimeFloat() and
+ lhs_ty.scalarType(zcu).floatBits(self.target.*) == 80) or
+ lhs_ty.abiSize(zcu) > @as(u6, if (self.hasFeature(.avx)) 32 else 16)))
return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(pt) });
const maybe_mask_reg = switch (air_tag) {
else => null,
.rem, .mod => unreachable,
- .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias(
+ .max, .min => if (lhs_ty.scalarType(zcu).isRuntimeFloat()) registerAlias(
if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: {
try self.register_manager.getKnownReg(.xmm0, null);
break :mask .xmm0;
@@ -9917,8 +9918,8 @@ fn genBinOp(
if (maybe_mask_reg) |mask_reg| self.register_manager.lockRegAssumeUnused(mask_reg) else null;
defer if (mask_lock) |lock| self.register_manager.unlockReg(lock);
- const ordered_air: [2]Air.Inst.Ref = if (lhs_ty.isVector(mod) and
- switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ const ordered_air: [2]Air.Inst.Ref = if (lhs_ty.isVector(zcu) and
+ switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
.Bool => false,
.Int => switch (air_tag) {
.cmp_lt, .cmp_gte => true,
@@ -9931,7 +9932,7 @@ fn genBinOp(
else => unreachable,
}) .{ rhs_air, lhs_air } else .{ lhs_air, rhs_air };
- if (lhs_ty.isAbiInt(mod)) for (ordered_air) |op_air| {
+ if (lhs_ty.isAbiInt(zcu)) for (ordered_air) |op_air| {
switch (try self.resolveInst(op_air)) {
.register => |op_reg| switch (op_reg.class()) {
.sse => try self.register_manager.getReg(op_reg, null),
@@ -10056,7 +10057,7 @@ fn genBinOp(
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const elem_size = lhs_ty.elemType2(mod).abiSize(pt);
+ const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu);
try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size });
try self.genBinOpMir(
switch (air_tag) {
@@ -10112,7 +10113,7 @@ fn genBinOp(
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const signed = lhs_ty.isSignedInt(mod);
+ const signed = lhs_ty.isSignedInt(zcu);
const cc: Condition = switch (air_tag) {
.min => if (signed) .nl else .nb,
.max => if (signed) .nge else .nae,
@@ -10188,7 +10189,7 @@ fn genBinOp(
try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv);
- const int_info = lhs_ty.intInfo(mod);
+ const int_info = lhs_ty.intInfo(zcu);
const cc: Condition = switch (int_info.signedness) {
.unsigned => switch (air_tag) {
.min => .a,
@@ -10202,7 +10203,7 @@ fn genBinOp(
},
};
- const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(pt))), 2);
+ const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(zcu))), 2);
const tmp_reg = switch (dst_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
@@ -10271,7 +10272,7 @@ fn genBinOp(
},
.cmp_eq, .cmp_neq => {
- assert(lhs_ty.isVector(mod) and lhs_ty.childType(mod).toIntern() == .bool_type);
+ assert(lhs_ty.isVector(zcu) and lhs_ty.childType(zcu).toIntern() == .bool_type);
try self.genBinOpMir(.{ ._, .xor }, lhs_ty, dst_mcv, src_mcv);
switch (air_tag) {
.cmp_eq => try self.genUnOpMir(.{ ._, .not }, lhs_ty, dst_mcv),
@@ -10288,7 +10289,7 @@ fn genBinOp(
}
const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size);
- const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+ const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
else => unreachable,
.Float => switch (lhs_ty.floatBits(self.target.*)) {
16 => {
@@ -10383,10 +10384,10 @@ fn genBinOp(
80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
else => null,
- .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) {
- 8 => switch (lhs_ty.vectorLen(mod)) {
+ .Int => switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
+ 8 => switch (lhs_ty.vectorLen(zcu)) {
1...16 => switch (air_tag) {
.add,
.add_wrap,
@@ -10400,7 +10401,7 @@ fn genBinOp(
.{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_b, .mins }
else if (self.hasFeature(.sse4_1))
@@ -10414,7 +10415,7 @@ fn genBinOp(
else
null,
},
- .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_b, .maxs }
else if (self.hasFeature(.sse4_1))
@@ -10432,7 +10433,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gte,
.cmp_gt,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_b, .cmpgt }
else
@@ -10454,11 +10455,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null,
},
- .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null,
},
@@ -10466,7 +10467,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gte,
.cmp_gt,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx)) .{ .vp_b, .cmpgt } else null,
.unsigned => null,
},
@@ -10477,7 +10478,7 @@ fn genBinOp(
},
else => null,
},
- 16 => switch (lhs_ty.vectorLen(mod)) {
+ 16 => switch (lhs_ty.vectorLen(zcu)) {
1...8 => switch (air_tag) {
.add,
.add_wrap,
@@ -10494,7 +10495,7 @@ fn genBinOp(
.{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .mins }
else
@@ -10504,7 +10505,7 @@ fn genBinOp(
else
.{ .p_w, .minu },
},
- .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .maxs }
else
@@ -10518,7 +10519,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gte,
.cmp_gt,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .cmpgt }
else
@@ -10543,11 +10544,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null,
},
- .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null,
},
@@ -10555,7 +10556,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gte,
.cmp_gt,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx)) .{ .vp_w, .cmpgt } else null,
.unsigned => null,
},
@@ -10566,7 +10567,7 @@ fn genBinOp(
},
else => null,
},
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => switch (air_tag) {
.add,
.add_wrap,
@@ -10588,7 +10589,7 @@ fn genBinOp(
.{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .mins }
else if (self.hasFeature(.sse4_1))
@@ -10602,7 +10603,7 @@ fn genBinOp(
else
null,
},
- .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .maxs }
else if (self.hasFeature(.sse4_1))
@@ -10620,7 +10621,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gte,
.cmp_gt,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .cmpgt }
else
@@ -10645,11 +10646,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null,
},
- .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null,
},
@@ -10657,7 +10658,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gte,
.cmp_gt,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
.unsigned => null,
},
@@ -10668,7 +10669,7 @@ fn genBinOp(
},
else => null,
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
1...2 => switch (air_tag) {
.add,
.add_wrap,
@@ -10686,7 +10687,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gte,
.cmp_gt,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_q, .cmpgt }
else if (self.hasFeature(.sse4_2))
@@ -10722,7 +10723,7 @@ fn genBinOp(
.cmp_lte,
.cmp_gt,
.cmp_gte,
- => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ => switch (lhs_ty.childType(zcu).intInfo(zcu).signedness) {
.signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
.unsigned => null,
},
@@ -10732,10 +10733,10 @@ fn genBinOp(
},
else => null,
},
- .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
16 => tag: {
assert(self.hasFeature(.f16c));
- switch (lhs_ty.vectorLen(mod)) {
+ switch (lhs_ty.vectorLen(zcu)) {
1 => {
const tmp_reg = (try self.register_manager.allocReg(
null,
@@ -10923,7 +10924,7 @@ fn genBinOp(
else => break :tag null,
}
},
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
1 => switch (air_tag) {
.add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add },
.sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub },
@@ -10976,7 +10977,7 @@ fn genBinOp(
} else null,
else => null,
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
1 => switch (air_tag) {
.add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add },
.sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub },
@@ -11052,7 +11053,7 @@ fn genBinOp(
mir_tag,
dst_reg,
lhs_reg,
- try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+ try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
.Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
@@ -11070,7 +11071,7 @@ fn genBinOp(
if (src_mcv.isMemory()) try self.asmRegisterMemory(
mir_tag,
dst_reg,
- try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+ try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
.Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
@@ -11098,7 +11099,7 @@ fn genBinOp(
mir_tag,
dst_reg,
lhs_reg,
- try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+ try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
.Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
@@ -11118,7 +11119,7 @@ fn genBinOp(
if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate(
mir_tag,
dst_reg,
- try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(mod)) {
+ try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
.Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
@@ -11151,21 +11152,21 @@ fn genBinOp(
const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size);
try self.asmRegisterRegisterRegisterImmediate(
- @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+ @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .cmp },
64 => .{ .v_sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ .v_ss, .cmp },
2...8 => .{ .v_ps, .cmp },
else => null,
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ .v_sd, .cmp },
2...4 => .{ .v_pd, .cmp },
else => null,
@@ -11185,20 +11186,20 @@ fn genBinOp(
Immediate.u(3), // unord
);
try self.asmRegisterRegisterRegisterRegister(
- @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+ @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ .v_ps, .blendv },
64 => .{ .v_pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
1...8 => .{ .v_ps, .blendv },
else => null,
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => .{ .v_pd, .blendv },
else => null,
},
@@ -11219,21 +11220,21 @@ fn genBinOp(
} else {
const has_blend = self.hasFeature(.sse4_1);
try self.asmRegisterRegisterImmediate(
- @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+ @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ss, .cmp },
64 => .{ ._sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ ._ss, .cmp },
2...4 => .{ ._ps, .cmp },
else => null,
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ ._sd, .cmp },
2 => .{ ._pd, .cmp },
else => null,
@@ -11252,20 +11253,20 @@ fn genBinOp(
Immediate.u(if (has_blend) 3 else 7), // unord, ord
);
if (has_blend) try self.asmRegisterRegisterRegister(
- @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
+ @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ps, .blendv },
64 => .{ ._pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => .{ ._ps, .blendv },
else => null,
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
1...2 => .{ ._pd, .blendv },
else => null,
},
@@ -11282,20 +11283,20 @@ fn genBinOp(
lhs_copy_reg.?,
mask_reg,
) else {
- const mir_fixes = @as(?Mir.Inst.Fixes, switch (lhs_ty.zigTypeTag(mod)) {
+ const mir_fixes = @as(?Mir.Inst.Fixes, switch (lhs_ty.zigTypeTag(zcu)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => ._ps,
64 => ._pd,
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen(mod)) {
+ .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => ._ps,
else => null,
},
- 64 => switch (lhs_ty.vectorLen(mod)) {
+ 64 => switch (lhs_ty.vectorLen(zcu)) {
1...2 => ._pd,
else => null,
},
@@ -11314,7 +11315,7 @@ fn genBinOp(
}
},
.cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => {
- switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
.Int => switch (air_tag) {
.cmp_lt,
.cmp_eq,
@@ -11395,8 +11396,8 @@ fn genBinOpMir(
src_mcv: MCValue,
) !void {
const pt = self.pt;
- const mod = pt.zcu;
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const zcu = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
try self.spillEflagsIfOccupied();
switch (dst_mcv) {
.none,
@@ -11643,7 +11644,7 @@ fn genBinOpMir(
defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock);
const ty_signedness =
- if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned;
+ if (ty.isAbiInt(zcu)) ty.intInfo(zcu).signedness else .unsigned;
const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) {
.signed => Type.usize,
.unsigned => Type.isize,
@@ -11820,7 +11821,7 @@ fn genBinOpMir(
/// Does not support byte-size operands.
fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
const pt = self.pt;
- const abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(dst_ty.abiSize(pt.zcu));
try self.spillEflagsIfOccupied();
switch (dst_mcv) {
.none,
@@ -12009,7 +12010,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
try self.genInlineMemset(
dst_mcv.address().offset(@intFromBool(regs_frame_addr.regs > 0)),
.{ .immediate = 0 },
- .{ .immediate = arg_ty.abiSize(pt) - @intFromBool(regs_frame_addr.regs > 0) },
+ .{ .immediate = arg_ty.abiSize(zcu) - @intFromBool(regs_frame_addr.regs > 0) },
.{},
);
@@ -12296,7 +12297,7 @@ fn genCall(self: *Self, info: union(enum) {
try reg_locks.appendSlice(&self.register_manager.lockRegs(2, regs));
},
.indirect => |reg_off| {
- frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, pt));
+ frame_index.* = try self.allocFrameIndex(FrameAlloc.initType(arg_ty, zcu));
try self.genSetMem(.{ .frame = frame_index.* }, 0, arg_ty, src_arg, .{});
try self.register_manager.getReg(reg_off.reg, null);
try reg_locks.append(self.register_manager.lockReg(reg_off.reg));
@@ -12368,7 +12369,7 @@ fn genCall(self: *Self, info: union(enum) {
.none, .unreach => {},
.indirect => |reg_off| {
const ret_ty = Type.fromInterned(fn_info.return_type);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, zcu));
try self.genSetReg(reg_off.reg, Type.usize, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
}, .{});
@@ -12383,14 +12384,14 @@ fn genCall(self: *Self, info: union(enum) {
.none, .load_frame => {},
.register => |dst_reg| switch (fn_info.cc) {
else => try self.genSetReg(
- registerAlias(dst_reg, @intCast(arg_ty.abiSize(pt))),
+ registerAlias(dst_reg, @intCast(arg_ty.abiSize(zcu))),
arg_ty,
src_arg,
.{},
),
.C, .SysV, .Win64 => {
const promoted_ty = self.promoteInt(arg_ty);
- const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(pt));
+ const promoted_abi_size: u32 = @intCast(promoted_ty.abiSize(zcu));
const dst_alias = registerAlias(dst_reg, promoted_abi_size);
try self.genSetReg(dst_alias, promoted_ty, src_arg, .{});
if (promoted_ty.toIntern() != arg_ty.toIntern())
@@ -12514,10 +12515,10 @@ fn genCall(self: *Self, info: union(enum) {
fn airRet(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const ret_ty = self.fn_type.fnReturnType(mod);
+ const ret_ty = self.fn_type.fnReturnType(zcu);
switch (self.ret_mcv.short) {
.none => {},
.register,
@@ -12570,7 +12571,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
var ty = self.typeOf(bin_op.lhs);
var null_compare: ?Mir.Inst.Index = null;
@@ -12602,7 +12603,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
};
defer for (rhs_locks) |rhs_lock| if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Float => {
const float_bits = ty.floatBits(self.target.*);
if (switch (float_bits) {
@@ -12638,11 +12639,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
};
}
},
- .Optional => if (!ty.optionalReprIsPayload(mod)) {
+ .Optional => if (!ty.optionalReprIsPayload(zcu)) {
const opt_ty = ty;
- const opt_abi_size: u31 = @intCast(opt_ty.abiSize(pt));
- ty = opt_ty.optionalChild(mod);
- const payload_abi_size: u31 = @intCast(ty.abiSize(pt));
+ const opt_abi_size: u31 = @intCast(opt_ty.abiSize(zcu));
+ ty = opt_ty.optionalChild(zcu);
+ const payload_abi_size: u31 = @intCast(ty.abiSize(zcu));
const temp_lhs_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const temp_lhs_lock = self.register_manager.lockRegAssumeUnused(temp_lhs_reg);
@@ -12699,9 +12700,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
else => {},
}
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
else => {
- const abi_size: u16 = @intCast(ty.abiSize(pt));
+ const abi_size: u16 = @intCast(ty.abiSize(zcu));
const may_flip: enum {
may_flip,
must_flip,
@@ -12734,7 +12735,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
break :result Condition.fromCompareOperator(
- if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned,
+ if (ty.isAbiInt(zcu)) ty.intInfo(zcu).signedness else .unsigned,
result_op: {
const flipped_op = if (flipped) op.reverse() else op;
if (abi_size > 8) switch (flipped_op) {
@@ -13029,6 +13030,7 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
@@ -13040,7 +13042,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
try self.spillEflagsIfOccupied();
const op_ty = self.typeOf(un_op);
- const op_abi_size: u32 = @intCast(op_ty.abiSize(pt));
+ const op_abi_size: u32 = @intCast(op_ty.abiSize(zcu));
const op_mcv = try self.resolveInst(un_op);
const dst_reg = switch (op_mcv) {
.register => |reg| reg,
@@ -13164,7 +13166,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !Mir.Inst.Index {
const pt = self.pt;
- const abi_size = ty.abiSize(pt);
+ const abi_size = ty.abiSize(pt.zcu);
switch (mcv) {
.eflags => |cc| {
// Here we map the opposites since the jump is to the false branch.
@@ -13237,7 +13239,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
switch (opt_mcv) {
.register_overflow => |ro| return .{ .eflags = ro.eflags.negate() },
else => {},
@@ -13245,12 +13247,12 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
try self.spillEflagsIfOccupied();
- const pl_ty = opt_ty.optionalChild(mod);
+ const pl_ty = opt_ty.optionalChild(zcu);
- const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
- .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
+ const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
+ .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
else
- .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
+ .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool };
self.eflags_inst = inst;
switch (opt_mcv) {
@@ -13279,14 +13281,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.register => |opt_reg| {
if (some_info.off == 0) {
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
const alias_reg = registerAlias(opt_reg, some_abi_size);
assert(some_abi_size * 8 == alias_reg.bitSize());
try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
return .{ .eflags = .z };
}
assert(some_info.ty.ip_index == .bool_type);
- const opt_abi_size: u32 = @intCast(opt_ty.abiSize(pt));
+ const opt_abi_size: u32 = @intCast(opt_ty.abiSize(zcu));
try self.asmRegisterImmediate(
.{ ._, .bt },
registerAlias(opt_reg, opt_abi_size),
@@ -13306,7 +13308,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
defer self.register_manager.unlockReg(addr_reg_lock);
try self.genSetReg(addr_reg, Type.usize, opt_mcv.address(), .{});
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
try self.asmMemoryImmediate(
.{ ._, .cmp },
.{
@@ -13322,7 +13324,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
},
.indirect, .load_frame => {
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
try self.asmMemoryImmediate(
.{ ._, .cmp },
switch (opt_mcv) {
@@ -13351,16 +13353,16 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- const opt_ty = ptr_ty.childType(mod);
- const pl_ty = opt_ty.optionalChild(mod);
+ const zcu = pt.zcu;
+ const opt_ty = ptr_ty.childType(zcu);
+ const pl_ty = opt_ty.optionalChild(zcu);
try self.spillEflagsIfOccupied();
- const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
- .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
+ const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(zcu))
+ .{ .off = 0, .ty = if (pl_ty.isSlice(zcu)) pl_ty.slicePtrFieldType(zcu) else pl_ty }
else
- .{ .off = @intCast(pl_ty.abiSize(pt)), .ty = Type.bool };
+ .{ .off = @intCast(pl_ty.abiSize(zcu)), .ty = Type.bool };
const ptr_reg = switch (ptr_mcv) {
.register => |reg| reg,
@@ -13369,7 +13371,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const ptr_lock = self.register_manager.lockReg(ptr_reg);
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
+ const some_abi_size: u32 = @intCast(some_info.ty.abiSize(zcu));
try self.asmMemoryImmediate(
.{ ._, .cmp },
.{
@@ -13388,13 +13390,13 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- const err_ty = eu_ty.errorUnionSet(mod);
- if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false
+ const zcu = pt.zcu;
+ const err_ty = eu_ty.errorUnionSet(zcu);
+ if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
try self.spillEflagsIfOccupied();
- const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt));
+ const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
switch (eu_mcv) {
.register => |reg| {
const eu_lock = self.register_manager.lockReg(reg);
@@ -13437,10 +13439,10 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, eu_ty: Type, eu_mcv: MCValue)
fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
const pt = self.pt;
- const mod = pt.zcu;
- const eu_ty = ptr_ty.childType(mod);
- const err_ty = eu_ty.errorUnionSet(mod);
- if (err_ty.errorSetIsEmpty(mod)) return MCValue{ .immediate = 0 }; // always false
+ const zcu = pt.zcu;
+ const eu_ty = ptr_ty.childType(zcu);
+ const err_ty = eu_ty.errorUnionSet(zcu);
+ if (err_ty.errorSetIsEmpty(zcu)) return MCValue{ .immediate = 0 }; // always false
try self.spillEflagsIfOccupied();
@@ -13451,7 +13453,7 @@ fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCV
const ptr_lock = self.register_manager.lockReg(ptr_reg);
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), pt));
+ const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(zcu), zcu));
try self.asmMemoryImmediate(
.{ ._, .cmp },
.{
@@ -13724,12 +13726,12 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) void {
}
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
- const pt = self.pt;
+ const zcu = self.pt.zcu;
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block_ty = self.typeOfIndex(br.block_inst);
const block_unused =
- !block_ty.hasRuntimeBitsIgnoreComptime(pt) or self.liveness.isUnused(br.block_inst);
+ !block_ty.hasRuntimeBitsIgnoreComptime(zcu) or self.liveness.isUnused(br.block_inst);
const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
const block_data = self.blocks.getPtr(br.block_inst).?;
const first_br = block_data.relocs.items.len == 0;
@@ -13786,7 +13788,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const clobbers_len: u31 = @truncate(extra.data.flags);
@@ -13825,7 +13827,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
};
const ty = switch (output) {
.none => self.typeOfIndex(inst),
- else => self.typeOf(output).childType(mod),
+ else => self.typeOf(output).childType(zcu),
};
const is_read = switch (constraint[0]) {
'=' => false,
@@ -13850,7 +13852,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
'x' => abi.RegisterClass.sse,
else => unreachable,
}) orelse return self.fail("ran out of registers lowering inline asm", .{}),
- @intCast(ty.abiSize(pt)),
+ @intCast(ty.abiSize(zcu)),
)
else if (mem.eql(u8, rest, "m"))
if (output != .none) null else return self.fail(
@@ -13920,7 +13922,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
break :arg input_mcv;
const reg = try self.register_manager.allocReg(null, rc);
try self.genSetReg(reg, ty, input_mcv, .{});
- break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(pt))) };
+ break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(zcu))) };
} else if (mem.eql(u8, constraint, "i") or mem.eql(u8, constraint, "n"))
switch (input_mcv) {
.immediate => |imm| .{ .immediate = imm },
@@ -14497,18 +14499,18 @@ const MoveStrategy = union(enum) {
};
fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !MoveStrategy {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
switch (class) {
.general_purpose, .segment => return .{ .move = .{ ._, .mov } },
.x87 => return .x87_load_store,
.mmx => {},
- .sse => switch (ty.zigTypeTag(mod)) {
+ .sse => switch (ty.zigTypeTag(zcu)) {
else => {
- const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none);
+ const classes = mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .other), .none);
assert(std.mem.indexOfNone(abi.Class, classes, &.{
.integer, .sse, .sseup, .memory, .float, .float_combine,
}) == null);
- const abi_size = ty.abiSize(pt);
+ const abi_size = ty.abiSize(zcu);
if (abi_size < 4 or
std.mem.indexOfScalar(abi.Class, classes, .integer) != null) switch (abi_size) {
1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{
@@ -14579,16 +14581,16 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
else => {},
},
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
- .Bool => switch (ty.vectorLen(mod)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .Bool => switch (ty.vectorLen(zcu)) {
33...64 => return .{ .move = if (self.hasFeature(.avx))
.{ .v_q, .mov }
else
.{ ._q, .mov } },
else => {},
},
- .Int => switch (ty.childType(mod).intInfo(mod).bits) {
- 1...8 => switch (ty.vectorLen(mod)) {
+ .Int => switch (ty.childType(zcu).intInfo(zcu).bits) {
+ 1...8 => switch (ty.vectorLen(zcu)) {
1...16 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14599,7 +14601,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_, .movdqu } },
else => {},
},
- 9...16 => switch (ty.vectorLen(mod)) {
+ 9...16 => switch (ty.vectorLen(zcu)) {
1...8 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14610,7 +14612,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_, .movdqu } },
else => {},
},
- 17...32 => switch (ty.vectorLen(mod)) {
+ 17...32 => switch (ty.vectorLen(zcu)) {
1...4 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14621,7 +14623,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_, .movdqu } },
else => {},
},
- 33...64 => switch (ty.vectorLen(mod)) {
+ 33...64 => switch (ty.vectorLen(zcu)) {
1...2 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14632,7 +14634,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_, .movdqu } },
else => {},
},
- 65...128 => switch (ty.vectorLen(mod)) {
+ 65...128 => switch (ty.vectorLen(zcu)) {
1 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14643,7 +14645,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_, .movdqu } },
else => {},
},
- 129...256 => switch (ty.vectorLen(mod)) {
+ 129...256 => switch (ty.vectorLen(zcu)) {
1 => if (self.hasFeature(.avx))
return .{ .move = if (aligned)
.{ .v_, .movdqa }
@@ -14653,8 +14655,8 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
},
else => {},
},
- .Pointer, .Optional => if (ty.childType(mod).isPtrAtRuntime(mod))
- switch (ty.vectorLen(mod)) {
+ .Pointer, .Optional => if (ty.childType(zcu).isPtrAtRuntime(zcu))
+ switch (ty.vectorLen(zcu)) {
1...2 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14667,8 +14669,8 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
}
else
unreachable,
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
- 16 => switch (ty.vectorLen(mod)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ 16 => switch (ty.vectorLen(zcu)) {
1...8 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14679,7 +14681,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_, .movdqu } },
else => {},
},
- 32 => switch (ty.vectorLen(mod)) {
+ 32 => switch (ty.vectorLen(zcu)) {
1...4 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }
else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } },
@@ -14690,7 +14692,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_ps, .movu } },
else => {},
},
- 64 => switch (ty.vectorLen(mod)) {
+ 64 => switch (ty.vectorLen(zcu)) {
1...2 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu }
else if (aligned) .{ ._pd, .mova } else .{ ._pd, .movu } },
@@ -14701,7 +14703,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
.{ .v_pd, .movu } },
else => {},
},
- 128 => switch (ty.vectorLen(mod)) {
+ 128 => switch (ty.vectorLen(zcu)) {
1 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -14804,7 +14806,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: Copy
} },
else => unreachable,
}, opts);
- part_disp += @intCast(dst_ty.abiSize(pt));
+ part_disp += @intCast(dst_ty.abiSize(pt.zcu));
}
},
.indirect => |reg_off| try self.genSetMem(
@@ -14846,9 +14848,9 @@ fn genSetReg(
opts: CopyOptions,
) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const abi_size: u32 = @intCast(ty.abiSize(pt));
- if (ty.bitSize(pt) > dst_reg.bitSize())
+ const zcu = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
+ if (ty.bitSize(zcu) > dst_reg.bitSize())
return self.fail("genSetReg called with a value larger than dst_reg", .{});
switch (src_mcv) {
.none,
@@ -14965,13 +14967,13 @@ fn genSetReg(
),
.x87, .mmx, .ip => unreachable,
.sse => try self.asmRegisterRegister(
- @as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) {
+ @as(?Mir.Inst.FixedTag, switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
else => switch (abi_size) {
1...16 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else .{ ._, .movdqa },
17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null,
else => null,
},
- .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) {
+ .Float => switch (ty.scalarType(zcu).floatBits(self.target.*)) {
16, 128 => switch (abi_size) {
2...16 => if (self.hasFeature(.avx))
.{ .v_, .movdqa }
@@ -15035,7 +15037,7 @@ fn genSetReg(
return (try self.moveStrategy(
ty,
dst_reg.class(),
- ty.abiAlignment(pt).check(@as(u32, @bitCast(small_addr))),
+ ty.abiAlignment(zcu).check(@as(u32, @bitCast(small_addr))),
)).read(self, registerAlias(dst_reg, abi_size), .{
.base = .{ .reg = .ds },
.mod = .{ .rm = .{
@@ -15136,8 +15138,8 @@ fn genSetMem(
opts: CopyOptions,
) InnerError!void {
const pt = self.pt;
- const mod = pt.zcu;
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const zcu = pt.zcu;
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
const dst_ptr_mcv: MCValue = switch (base) {
.none => .{ .immediate = @bitCast(@as(i64, disp)) },
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
@@ -15159,8 +15161,8 @@ fn genSetMem(
),
.immediate => |imm| switch (abi_size) {
1, 2, 4 => {
- const immediate = switch (if (ty.isAbiInt(mod))
- ty.intInfo(mod).signedness
+ const immediate = switch (if (ty.isAbiInt(zcu))
+ ty.intInfo(zcu).signedness
else
.unsigned) {
.signed => Immediate.s(@truncate(@as(i64, @bitCast(imm)))),
@@ -15193,7 +15195,7 @@ fn genSetMem(
.size = .dword,
.disp = disp + offset,
} } },
- if (ty.isSignedInt(mod)) Immediate.s(
+ if (ty.isSignedInt(zcu)) Immediate.s(
@truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)),
) else Immediate.u(
@as(u32, @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0)),
@@ -15263,33 +15265,33 @@ fn genSetMem(
var part_disp: i32 = disp;
for (try self.splitType(ty), src_regs) |src_ty, src_reg| {
try self.genSetMem(base, part_disp, src_ty, .{ .register = src_reg }, opts);
- part_disp += @intCast(src_ty.abiSize(pt));
+ part_disp += @intCast(src_ty.abiSize(zcu));
}
},
- .register_overflow => |ro| switch (ty.zigTypeTag(mod)) {
+ .register_overflow => |ro| switch (ty.zigTypeTag(zcu)) {
.Struct => {
try self.genSetMem(
base,
- disp + @as(i32, @intCast(ty.structFieldOffset(0, pt))),
- ty.structFieldType(0, mod),
+ disp + @as(i32, @intCast(ty.structFieldOffset(0, zcu))),
+ ty.structFieldType(0, zcu),
.{ .register = ro.reg },
opts,
);
try self.genSetMem(
base,
- disp + @as(i32, @intCast(ty.structFieldOffset(1, pt))),
- ty.structFieldType(1, mod),
+ disp + @as(i32, @intCast(ty.structFieldOffset(1, zcu))),
+ ty.structFieldType(1, zcu),
.{ .eflags = ro.eflags },
opts,
);
},
.Optional => {
- assert(!ty.optionalReprIsPayload(mod));
- const child_ty = ty.optionalChild(mod);
+ assert(!ty.optionalReprIsPayload(zcu));
+ const child_ty = ty.optionalChild(zcu);
try self.genSetMem(base, disp, child_ty, .{ .register = ro.reg }, opts);
try self.genSetMem(
base,
- disp + @as(i32, @intCast(child_ty.abiSize(pt))),
+ disp + @as(i32, @intCast(child_ty.abiSize(zcu))),
Type.bool,
.{ .eflags = ro.eflags },
opts,
@@ -15521,14 +15523,14 @@ fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
- if (dst_ty.isPtrAtRuntime(mod) and src_ty.isPtrAtRuntime(mod)) switch (src_mcv) {
+ if (dst_ty.isPtrAtRuntime(zcu) and src_ty.isPtrAtRuntime(zcu)) switch (src_mcv) {
.lea_frame => break :result src_mcv,
else => if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv,
};
@@ -15539,10 +15541,10 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(pt) <= src_ty.abiSize(pt) and
+ const dst_mcv = if (dst_rc.supersetOf(src_rc) and dst_ty.abiSize(zcu) <= src_ty.abiSize(zcu) and
self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: {
const dst_mcv = try self.allocRegOrMem(inst, true);
- try self.genCopy(switch (math.order(dst_ty.abiSize(pt), src_ty.abiSize(pt))) {
+ try self.genCopy(switch (math.order(dst_ty.abiSize(zcu), src_ty.abiSize(zcu))) {
.lt => dst_ty,
.eq => if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty,
.gt => src_ty,
@@ -15552,12 +15554,12 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
if (dst_ty.isRuntimeFloat()) break :result dst_mcv;
- if (dst_ty.isAbiInt(mod) and src_ty.isAbiInt(mod) and
- dst_ty.intInfo(mod).signedness == src_ty.intInfo(mod).signedness) break :result dst_mcv;
+ if (dst_ty.isAbiInt(zcu) and src_ty.isAbiInt(zcu) and
+ dst_ty.intInfo(zcu).signedness == src_ty.intInfo(zcu).signedness) break :result dst_mcv;
- const abi_size = dst_ty.abiSize(pt);
- const bit_size = dst_ty.bitSize(pt);
- if (abi_size * 8 <= bit_size or dst_ty.isVector(mod)) break :result dst_mcv;
+ const abi_size = dst_ty.abiSize(zcu);
+ const bit_size = dst_ty.bitSize(zcu);
+ if (abi_size * 8 <= bit_size or dst_ty.isVector(zcu)) break :result dst_mcv;
const dst_limbs_len = math.divCeil(i32, @intCast(bit_size), 64) catch unreachable;
const high_mcv: MCValue = switch (dst_mcv) {
@@ -15586,20 +15588,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const slice_ty = self.typeOfIndex(inst);
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
- const array_ty = ptr_ty.childType(mod);
- const array_len = array_ty.arrayLen(mod);
+ const array_ty = ptr_ty.childType(zcu);
+ const array_len = array_ty.arrayLen(zcu);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(slice_ty, zcu));
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr, .{});
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(ptr_ty.abiSize(pt)),
+ @intCast(ptr_ty.abiSize(zcu)),
Type.usize,
.{ .immediate = array_len },
.{},
@@ -15611,16 +15613,16 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
const dst_bits = dst_ty.floatBits(self.target.*);
const src_ty = self.typeOf(ty_op.operand);
- const src_bits: u32 = @intCast(src_ty.bitSize(pt));
+ const src_bits: u32 = @intCast(src_ty.bitSize(zcu));
const src_signedness =
- if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
+ if (src_ty.isAbiInt(zcu)) src_ty.intInfo(zcu).signedness else .unsigned;
const src_size = math.divCeil(u32, @max(switch (src_signedness) {
.signed => src_bits,
.unsigned => src_bits + 1,
@@ -15666,7 +15668,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) {
+ const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(zcu)) {
.Float => switch (dst_ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
@@ -15691,13 +15693,13 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_bits: u32 = @intCast(dst_ty.bitSize(pt));
+ const dst_bits: u32 = @intCast(dst_ty.bitSize(zcu));
const dst_signedness =
- if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
+ if (dst_ty.isAbiInt(zcu)) dst_ty.intInfo(zcu).signedness else .unsigned;
const dst_size = math.divCeil(u32, @max(switch (dst_signedness) {
.signed => dst_bits,
.unsigned => dst_bits + 1,
@@ -15768,7 +15770,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(extra.ptr);
const val_ty = self.typeOf(extra.expected_value);
- const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+ const val_abi_size: u32 = @intCast(val_ty.abiSize(pt.zcu));
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
@@ -15859,7 +15861,7 @@ fn atomicOp(
order: std.builtin.AtomicOrder,
) InnerError!MCValue {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ptr_lock = switch (ptr_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
@@ -15872,7 +15874,7 @@ fn atomicOp(
};
defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
- const val_abi_size: u32 = @intCast(val_ty.abiSize(pt));
+ const val_abi_size: u32 = @intCast(val_ty.abiSize(zcu));
const mem_size = Memory.Size.fromSize(val_abi_size);
const ptr_mem: Memory = switch (ptr_mcv) {
.immediate, .register, .register_offset, .lea_frame => try ptr_mcv.deref().mem(self, mem_size),
@@ -16031,8 +16033,8 @@ fn atomicOp(
.Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv),
.Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv),
.Min, .Max => {
- const cc: Condition = switch (if (val_ty.isAbiInt(mod))
- val_ty.intInfo(mod).signedness
+ const cc: Condition = switch (if (val_ty.isAbiInt(zcu))
+ val_ty.intInfo(zcu).signedness
else
.unsigned) {
.unsigned => switch (op) {
@@ -16156,8 +16158,8 @@ fn atomicOp(
try self.asmRegisterMemory(.{ ._, .xor }, .rcx, val_hi_mem);
},
.Min, .Max => {
- const cc: Condition = switch (if (val_ty.isAbiInt(mod))
- val_ty.intInfo(mod).signedness
+ const cc: Condition = switch (if (val_ty.isAbiInt(zcu))
+ val_ty.intInfo(zcu).signedness
else
.unsigned) {
.unsigned => switch (op) {
@@ -16264,7 +16266,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
result: {
@@ -16290,19 +16292,19 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
};
defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_abi_size: u31 = @intCast(elem_ty.abiSize(pt));
+ const elem_abi_size: u31 = @intCast(elem_ty.abiSize(zcu));
if (elem_abi_size == 1) {
- const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
+ const ptr: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
// TODO: this only handles slices stored in the stack
.Slice => dst_ptr,
.One => dst_ptr,
.C, .Many => unreachable,
};
- const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
+ const len: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
// TODO: this only handles slices stored in the stack
.Slice => dst_ptr.address().offset(8).deref(),
- .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) },
+ .One => .{ .immediate = dst_ptr_ty.childType(zcu).arrayLen(zcu) },
.C, .Many => unreachable,
};
const len_lock: ?RegisterLock = switch (len) {
@@ -16318,9 +16320,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
// Store the first element, and then rely on memcpy copying forwards.
// Length zero requires a runtime check - so we handle arrays specially
// here to elide it.
- switch (dst_ptr_ty.ptrSize(mod)) {
+ switch (dst_ptr_ty.ptrSize(zcu)) {
.Slice => {
- const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(mod);
+ const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(zcu);
// TODO: this only handles slices stored in the stack
const ptr = dst_ptr;
@@ -16365,7 +16367,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
.One => {
const elem_ptr_ty = try pt.singleMutPtrType(elem_ty);
- const len = dst_ptr_ty.childType(mod).arrayLen(mod);
+ const len = dst_ptr_ty.childType(zcu).arrayLen(zcu);
assert(len != 0); // prevented by Sema
try self.store(elem_ptr_ty, dst_ptr, src_val, .{ .safety = safety });
@@ -16393,7 +16395,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
@@ -16415,7 +16417,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
+ const len: MCValue = switch (dst_ptr_ty.ptrSize(zcu)) {
.Slice => len: {
const len_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const len_lock = self.register_manager.lockRegAssumeUnused(len_reg);
@@ -16425,13 +16427,13 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
.{ .i_, .mul },
len_reg,
try dst_ptr.address().offset(8).deref().mem(self, .qword),
- Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(pt))),
+ Immediate.s(@intCast(dst_ptr_ty.childType(zcu).abiSize(zcu))),
);
break :len .{ .register = len_reg };
},
.One => len: {
- const array_ty = dst_ptr_ty.childType(mod);
- break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(pt) };
+ const array_ty = dst_ptr_ty.childType(zcu);
+ break :len .{ .immediate = array_ty.arrayLen(zcu) * array_ty.childType(zcu).abiSize(zcu) };
},
.C, .Many => unreachable,
};
@@ -16449,6 +16451,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
+ const zcu = pt.zcu;
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const inst_ty = self.typeOfIndex(inst);
const enum_ty = self.typeOf(un_op);
@@ -16457,8 +16460,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
// We need a properly aligned and sized call frame to be able to call this function.
{
const needed_call_frame = FrameAlloc.init(.{
- .size = inst_ty.abiSize(pt),
- .alignment = inst_ty.abiAlignment(pt),
+ .size = inst_ty.abiSize(zcu),
+ .alignment = inst_ty.abiAlignment(zcu),
});
const frame_allocs_slice = self.frame_allocs.slice();
const stack_frame_size =
@@ -16590,15 +16593,15 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const vector_ty = self.typeOfIndex(inst);
- const vector_len = vector_ty.vectorLen(mod);
+ const vector_len = vector_ty.vectorLen(zcu);
const dst_rc = self.regClassForType(vector_ty);
const scalar_ty = self.typeOf(ty_op.operand);
const result: MCValue = result: {
- switch (scalar_ty.zigTypeTag(mod)) {
+ switch (scalar_ty.zigTypeTag(zcu)) {
else => {},
.Bool => {
const regs =
@@ -16641,7 +16644,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
break :result .{ .register = regs[0] };
},
.Int => if (self.hasFeature(.avx2)) avx2: {
- const mir_tag = @as(?Mir.Inst.FixedTag, switch (scalar_ty.intInfo(mod).bits) {
+ const mir_tag = @as(?Mir.Inst.FixedTag, switch (scalar_ty.intInfo(zcu).bits) {
else => null,
1...8 => switch (vector_len) {
else => null,
@@ -16672,15 +16675,15 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
const src_mcv = try self.resolveInst(ty_op.operand);
if (src_mcv.isMemory()) try self.asmRegisterMemory(
mir_tag,
- registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))),
+ registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu))),
try src_mcv.mem(self, self.memSize(scalar_ty)),
) else {
if (mir_tag[0] == .v_i128) break :avx2;
try self.genSetReg(dst_reg, scalar_ty, src_mcv, .{});
try self.asmRegisterRegister(
mir_tag,
- registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt))),
- registerAlias(dst_reg, @intCast(scalar_ty.abiSize(pt))),
+ registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu))),
+ registerAlias(dst_reg, @intCast(scalar_ty.abiSize(zcu))),
);
}
break :result .{ .register = dst_reg };
@@ -16692,8 +16695,8 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetReg(dst_reg, scalar_ty, .{ .air_ref = ty_op.operand }, .{});
if (vector_len == 1) break :result .{ .register = dst_reg };
- const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(pt)));
- const scalar_bits = scalar_ty.intInfo(mod).bits;
+ const dst_alias = registerAlias(dst_reg, @intCast(vector_ty.abiSize(zcu)));
+ const scalar_bits = scalar_ty.intInfo(zcu).bits;
if (switch (scalar_bits) {
1...8 => true,
9...128 => false,
@@ -16929,14 +16932,14 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ty = self.typeOfIndex(inst);
- const vec_len = ty.vectorLen(mod);
- const elem_ty = ty.childType(mod);
- const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const vec_len = ty.vectorLen(zcu);
+ const elem_ty = ty.childType(zcu);
+ const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
const pred_ty = self.typeOf(pl_op.operand);
const result = result: {
@@ -17160,7 +17163,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(mod).zigTypeTag(mod)) {
+ const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(zcu).zigTypeTag(zcu)) {
else => null,
.Int => switch (abi_size) {
0 => unreachable,
@@ -17176,7 +17179,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
null,
else => null,
},
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
else => unreachable,
16, 80, 128 => null,
32 => switch (vec_len) {
@@ -17230,7 +17233,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
try self.copyToTmpRegister(ty, lhs_mcv), abi_size),
mask_alias,
) else {
- const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(mod)) {
+ const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(zcu)) {
else => null,
.Int => .p_,
.Float => switch (elem_ty.floatBits(self.target.*)) {
@@ -17262,18 +17265,18 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const dst_ty = self.typeOfIndex(inst);
- const elem_ty = dst_ty.childType(mod);
- const elem_abi_size: u16 = @intCast(elem_ty.abiSize(pt));
- const dst_abi_size: u32 = @intCast(dst_ty.abiSize(pt));
+ const elem_ty = dst_ty.childType(zcu);
+ const elem_abi_size: u16 = @intCast(elem_ty.abiSize(zcu));
+ const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
const lhs_ty = self.typeOf(extra.a);
- const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(pt));
+ const lhs_abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
const rhs_ty = self.typeOf(extra.b);
- const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(pt));
+ const rhs_abi_size: u32 = @intCast(rhs_ty.abiSize(zcu));
const max_abi_size = @max(dst_abi_size, lhs_abi_size, rhs_abi_size);
const ExpectedContents = [32]?i32;
@@ -17286,10 +17289,10 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
for (mask_elems, 0..) |*mask_elem, elem_index| {
const mask_elem_val =
Value.fromInterned(extra.mask).elemValue(pt, elem_index) catch unreachable;
- mask_elem.* = if (mask_elem_val.isUndef(mod))
+ mask_elem.* = if (mask_elem_val.isUndef(zcu))
null
else
- @intCast(mask_elem_val.toSignedInt(pt));
+ @intCast(mask_elem_val.toSignedInt(zcu));
}
const has_avx = self.hasFeature(.avx);
@@ -18028,7 +18031,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
);
if (has_avx) try self.asmRegisterRegisterRegister(
- .{ switch (elem_ty.zigTypeTag(mod)) {
+ .{ switch (elem_ty.zigTypeTag(zcu)) {
else => break :result null,
.Int => .vp_,
.Float => switch (elem_ty.floatBits(self.target.*)) {
@@ -18042,7 +18045,7 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
lhs_temp_alias,
rhs_temp_alias,
) else try self.asmRegisterRegister(
- .{ switch (elem_ty.zigTypeTag(mod)) {
+ .{ switch (elem_ty.zigTypeTag(zcu)) {
else => break :result null,
.Int => .p_,
.Float => switch (elem_ty.floatBits(self.target.*)) {
@@ -18068,19 +18071,19 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
const result: MCValue = result: {
const operand_ty = self.typeOf(reduce.operand);
- if (operand_ty.isVector(mod) and operand_ty.childType(mod).toIntern() == .bool_type) {
+ if (operand_ty.isVector(zcu) and operand_ty.childType(zcu).toIntern() == .bool_type) {
try self.spillEflagsIfOccupied();
const operand_mcv = try self.resolveInst(reduce.operand);
- const mask_len = (math.cast(u6, operand_ty.vectorLen(mod)) orelse
+ const mask_len = (math.cast(u6, operand_ty.vectorLen(zcu)) orelse
return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(pt)}));
const mask = (@as(u64, 1) << mask_len) - 1;
- const abi_size: u32 = @intCast(operand_ty.abiSize(pt));
+ const abi_size: u32 = @intCast(operand_ty.abiSize(zcu));
switch (reduce.operation) {
.Or => {
if (operand_mcv.isMemory()) try self.asmMemoryImmediate(
@@ -18126,36 +18129,36 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const result_ty = self.typeOfIndex(inst);
- const len: usize = @intCast(result_ty.arrayLen(mod));
+ const len: usize = @intCast(result_ty.arrayLen(zcu));
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = result: {
- switch (result_ty.zigTypeTag(mod)) {
+ switch (result_ty.zigTypeTag(zcu)) {
.Struct => {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
- if (result_ty.containerLayout(mod) == .@"packed") {
- const struct_obj = mod.typeToStruct(result_ty).?;
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
+ if (result_ty.containerLayout(zcu) == .@"packed") {
+ const struct_obj = zcu.typeToStruct(result_ty).?;
try self.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } },
.{ .immediate = 0 },
- .{ .immediate = result_ty.abiSize(pt) },
+ .{ .immediate = result_ty.abiSize(zcu) },
.{},
);
for (elements, 0..) |elem, elem_i_usize| {
const elem_i: u32 = @intCast(elem_i_usize);
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
- const elem_ty = result_ty.structFieldType(elem_i, mod);
- const elem_bit_size: u32 = @intCast(elem_ty.bitSize(pt));
+ const elem_ty = result_ty.structFieldType(elem_i, zcu);
+ const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu));
if (elem_bit_size > 64) {
return self.fail(
"TODO airAggregateInit implement packed structs with large fields",
.{},
);
}
- const elem_abi_size: u32 = @intCast(elem_ty.abiSize(pt));
+ const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const elem_abi_bits = elem_abi_size * 8;
const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
@@ -18229,8 +18232,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
} else for (elements, 0..) |elem, elem_i| {
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
- const elem_ty = result_ty.structFieldType(elem_i, mod);
- const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, pt));
+ const elem_ty = result_ty.structFieldType(elem_i, zcu);
+ const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
@@ -18241,9 +18244,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
break :result .{ .load_frame = .{ .index = frame_index } };
},
.Array, .Vector => {
- const elem_ty = result_ty.childType(mod);
- if (result_ty.isVector(mod) and elem_ty.toIntern() == .bool_type) {
- const result_size: u32 = @intCast(result_ty.abiSize(pt));
+ const elem_ty = result_ty.childType(zcu);
+ if (result_ty.isVector(zcu) and elem_ty.toIntern() == .bool_type) {
+ const result_size: u32 = @intCast(result_ty.abiSize(zcu));
const dst_reg = try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
try self.asmRegisterRegister(
.{ ._, .xor },
@@ -18274,8 +18277,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
break :result .{ .register = dst_reg };
} else {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, pt));
- const elem_size: u32 = @intCast(elem_ty.abiSize(pt));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
+ const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
@@ -18292,7 +18295,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.{},
);
}
- if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem(
+ if (result_ty.sentinel(zcu)) |sentinel| try self.genSetMem(
.{ .frame = frame_index },
@intCast(elem_size * elements.len),
elem_ty,
@@ -18318,18 +18321,18 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result: MCValue = result: {
const union_ty = self.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(pt);
+ const layout = union_ty.unionGetLayout(zcu);
const src_ty = self.typeOf(extra.init);
const src_mcv = try self.resolveInst(extra.init);
if (layout.tag_size == 0) {
- if (layout.abi_size <= src_ty.abiSize(pt) and
+ if (layout.abi_size <= src_ty.abiSize(zcu) and
self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv;
const dst_mcv = try self.allocRegOrMem(inst, true);
@@ -18339,13 +18342,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
- const union_obj = mod.typeToUnion(union_ty).?;
+ const union_obj = zcu.typeToUnion(union_ty).?;
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_ty = Type.fromInterned(union_obj.enum_tag_ty);
- const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
+ const field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
- const tag_int = tag_int_val.toUnsignedInt(pt);
+ const tag_int = tag_int_val.toUnsignedInt(zcu);
const tag_off: i32 = @intCast(layout.tagOffset());
try self.genCopy(
tag_ty,
@@ -18369,19 +18372,19 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ty = self.typeOfIndex(inst);
const ops = [3]Air.Inst.Ref{ extra.lhs, extra.rhs, pl_op.operand };
const result = result: {
- if (switch (ty.scalarType(mod).floatBits(self.target.*)) {
+ if (switch (ty.scalarType(zcu).floatBits(self.target.*)) {
16, 80, 128 => true,
32, 64 => !self.hasFeature(.fma),
else => unreachable,
}) {
- if (ty.zigTypeTag(mod) != .Float) return self.fail("TODO implement airMulAdd for {}", .{
+ if (ty.zigTypeTag(zcu) != .Float) return self.fail("TODO implement airMulAdd for {}", .{
ty.fmt(pt),
});
@@ -18430,21 +18433,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
const mir_tag = @as(?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or
mem.eql(u2, &order, &.{ 3, 1, 2 }))
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd132 },
64 => .{ .v_sd, .fmadd132 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen(mod)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd132 },
2...8 => .{ .v_ps, .fmadd132 },
else => null,
},
- 64 => switch (ty.vectorLen(mod)) {
+ 64 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_sd, .fmadd132 },
2...4 => .{ .v_pd, .fmadd132 },
else => null,
@@ -18457,21 +18460,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}
else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 }))
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd213 },
64 => .{ .v_sd, .fmadd213 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen(mod)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd213 },
2...8 => .{ .v_ps, .fmadd213 },
else => null,
},
- 64 => switch (ty.vectorLen(mod)) {
+ 64 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_sd, .fmadd213 },
2...4 => .{ .v_pd, .fmadd213 },
else => null,
@@ -18484,21 +18487,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}
else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 }))
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd231 },
64 => .{ .v_sd, .fmadd231 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
- .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen(mod)) {
+ .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd231 },
2...8 => .{ .v_ps, .fmadd231 },
else => null,
},
- 64 => switch (ty.vectorLen(mod)) {
+ 64 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_sd, .fmadd231 },
2...4 => .{ .v_pd, .fmadd231 },
else => null,
@@ -18516,7 +18519,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
var mops: [3]MCValue = undefined;
for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv;
- const abi_size: u32 = @intCast(ty.abiSize(pt));
+ const abi_size: u32 = @intCast(ty.abiSize(zcu));
const mop1_reg = registerAlias(mops[0].getReg().?, abi_size);
const mop2_reg = registerAlias(mops[1].getReg().?, abi_size);
if (mops[2].isRegister()) try self.asmRegisterRegisterRegister(
@@ -18537,17 +18540,17 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const va_list_ty = self.air.instructions.items(.data)[@intFromEnum(inst)].ty;
const ptr_anyopaque_ty = try pt.singleMutPtrType(Type.anyopaque);
const result: MCValue = switch (abi.resolveCallingConvention(
- self.fn_type.fnCallingConvention(mod),
+ self.fn_type.fnCallingConvention(zcu),
self.target.*,
)) {
.SysV => result: {
const info = self.va_info.sysv;
- const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, pt));
+ const dst_fi = try self.allocFrameIndex(FrameAlloc.initSpill(va_list_ty, zcu));
var field_off: u31 = 0;
// gp_offset: c_uint,
try self.genSetMem(
@@ -18557,7 +18560,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .immediate = info.gp_count * 8 },
.{},
);
- field_off += @intCast(Type.c_uint.abiSize(pt));
+ field_off += @intCast(Type.c_uint.abiSize(zcu));
// fp_offset: c_uint,
try self.genSetMem(
.{ .frame = dst_fi },
@@ -18566,7 +18569,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .immediate = abi.SysV.c_abi_int_param_regs.len * 8 + info.fp_count * 16 },
.{},
);
- field_off += @intCast(Type.c_uint.abiSize(pt));
+ field_off += @intCast(Type.c_uint.abiSize(zcu));
// overflow_arg_area: *anyopaque,
try self.genSetMem(
.{ .frame = dst_fi },
@@ -18575,7 +18578,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .lea_frame = info.overflow_arg_area },
.{},
);
- field_off += @intCast(ptr_anyopaque_ty.abiSize(pt));
+ field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
// reg_save_area: *anyopaque,
try self.genSetMem(
.{ .frame = dst_fi },
@@ -18584,7 +18587,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
.{ .lea_frame = info.reg_save_area },
.{},
);
- field_off += @intCast(ptr_anyopaque_ty.abiSize(pt));
+ field_off += @intCast(ptr_anyopaque_ty.abiSize(zcu));
break :result .{ .load_frame = .{ .index = dst_fi } };
},
.Win64 => return self.fail("TODO implement c_va_start for Win64", .{}),
@@ -18595,7 +18598,7 @@ fn airVaStart(self: *Self, inst: Air.Inst.Index) !void {
fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = self.typeOfIndex(inst);
const promote_ty = self.promoteVarArg(ty);
@@ -18603,7 +18606,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const unused = self.liveness.isUnused(inst);
const result: MCValue = switch (abi.resolveCallingConvention(
- self.fn_type.fnCallingConvention(mod),
+ self.fn_type.fnCallingConvention(zcu),
self.target.*,
)) {
.SysV => result: {
@@ -18633,7 +18636,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const overflow_arg_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 8 } };
const reg_save_area: MCValue = .{ .indirect = .{ .reg = ptr_arg_list_reg, .off = 16 } };
- const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, pt, self.target.*, .arg), .none);
+ const classes = mem.sliceTo(&abi.classifySystemV(promote_ty, zcu, self.target.*, .arg), .none);
switch (classes[0]) {
.integer => {
assert(classes.len == 1);
@@ -18668,7 +18671,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
.base = .{ .reg = addr_reg },
.mod = .{ .rm = .{
.size = .qword,
- .disp = @intCast(@max(promote_ty.abiSize(pt), 8)),
+ .disp = @intCast(@max(promote_ty.abiSize(zcu), 8)),
} },
});
try self.genCopy(
@@ -18716,7 +18719,7 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
.base = .{ .reg = addr_reg },
.mod = .{ .rm = .{
.size = .qword,
- .disp = @intCast(@max(promote_ty.abiSize(pt), 8)),
+ .disp = @intCast(@max(promote_ty.abiSize(zcu), 8)),
} },
});
try self.genCopy(
@@ -18806,11 +18809,11 @@ fn airVaEnd(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
- const pt = self.pt;
+ const zcu = self.pt.zcu;
const ty = self.typeOf(ref);
// If the type has no codegen bits, no need to store it.
- if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return .none;
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
const mcv = if (ref.toIndex()) |inst| mcv: {
break :mcv self.inst_tracking.getPtr(inst).?.short;
@@ -18927,8 +18930,8 @@ fn resolveCallingConventionValues(
stack_frame_base: FrameIndex,
) !CallMCValues {
const pt = self.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const cc = fn_info.cc;
const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len);
defer self.gpa.free(param_types);
@@ -18970,15 +18973,15 @@ fn resolveCallingConventionValues(
.SysV => {},
.Win64 => {
// Align the stack to 16bytes before allocating shadow stack space (if any).
- result.stack_byte_count += @intCast(4 * Type.usize.abiSize(pt));
+ result.stack_byte_count += @intCast(4 * Type.usize.abiSize(zcu));
},
else => unreachable,
}
// Return values
- if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
result.return_value = InstTracking.init(.unreach);
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// TODO: is this even possible for C calling convention?
result.return_value = InstTracking.init(.none);
} else {
@@ -18986,15 +18989,15 @@ fn resolveCallingConventionValues(
var ret_tracking_i: usize = 0;
const classes = switch (resolved_cc) {
- .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, pt, self.target.*, .ret), .none),
- .Win64 => &.{abi.classifyWindows(ret_ty, pt)},
+ .SysV => mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, self.target.*, .ret), .none),
+ .Win64 => &.{abi.classifyWindows(ret_ty, zcu)},
else => unreachable,
};
for (classes) |class| switch (class) {
.integer => {
const ret_int_reg = registerAlias(
abi.getCAbiIntReturnRegs(resolved_cc)[ret_int_reg_i],
- @intCast(@min(ret_ty.abiSize(pt), 8)),
+ @intCast(@min(ret_ty.abiSize(zcu), 8)),
);
ret_int_reg_i += 1;
@@ -19004,7 +19007,7 @@ fn resolveCallingConventionValues(
.sse, .float, .float_combine, .win_i128 => {
const ret_sse_reg = registerAlias(
abi.getCAbiSseReturnRegs(resolved_cc)[ret_sse_reg_i],
- @intCast(ret_ty.abiSize(pt)),
+ @intCast(ret_ty.abiSize(zcu)),
);
ret_sse_reg_i += 1;
@@ -19047,7 +19050,7 @@ fn resolveCallingConventionValues(
// Input params
for (param_types, result.args) |ty, *arg| {
- assert(ty.hasRuntimeBitsIgnoreComptime(pt));
+ assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
switch (resolved_cc) {
.SysV => {},
.Win64 => {
@@ -19061,8 +19064,8 @@ fn resolveCallingConventionValues(
var arg_mcv_i: usize = 0;
const classes = switch (resolved_cc) {
- .SysV => mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .arg), .none),
- .Win64 => &.{abi.classifyWindows(ty, pt)},
+ .SysV => mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .arg), .none),
+ .Win64 => &.{abi.classifyWindows(ty, zcu)},
else => unreachable,
};
for (classes) |class| switch (class) {
@@ -19072,7 +19075,7 @@ fn resolveCallingConventionValues(
const param_int_reg = registerAlias(
abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i],
- @intCast(@min(ty.abiSize(pt), 8)),
+ @intCast(@min(ty.abiSize(zcu), 8)),
);
param_int_reg_i += 1;
@@ -19085,7 +19088,7 @@ fn resolveCallingConventionValues(
const param_sse_reg = registerAlias(
abi.getCAbiSseParamRegs(resolved_cc)[param_sse_reg_i],
- @intCast(ty.abiSize(pt)),
+ @intCast(ty.abiSize(zcu)),
);
param_sse_reg_i += 1;
@@ -19098,7 +19101,7 @@ fn resolveCallingConventionValues(
.x87, .x87up, .complex_x87, .memory => break,
else => unreachable,
},
- .Win64 => if (ty.abiSize(pt) > 8) {
+ .Win64 => if (ty.abiSize(zcu) > 8) {
const param_int_reg =
abi.getCAbiIntParamRegs(resolved_cc)[param_int_reg_i].to64();
param_int_reg_i += 1;
@@ -19117,10 +19120,10 @@ fn resolveCallingConventionValues(
param_int_reg_i = param_int_regs_len;
const frame_elem_align = 8;
- const frame_elems_len = ty.vectorLen(mod) - remaining_param_int_regs;
+ const frame_elems_len = ty.vectorLen(zcu) - remaining_param_int_regs;
const frame_elem_size = mem.alignForward(
u64,
- ty.childType(mod).abiSize(pt),
+ ty.childType(zcu).abiSize(zcu),
frame_elem_align,
);
const frame_size: u31 = @intCast(frame_elems_len * frame_elem_size);
@@ -19144,9 +19147,9 @@ fn resolveCallingConventionValues(
continue;
}
- const param_size: u31 = @intCast(ty.abiSize(pt));
+ const param_size: u31 = @intCast(ty.abiSize(zcu));
const param_align: u31 =
- @intCast(@max(ty.abiAlignment(pt).toByteUnits().?, 8));
+ @intCast(@max(ty.abiAlignment(zcu).toByteUnits().?, 8));
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -19164,13 +19167,13 @@ fn resolveCallingConventionValues(
result.stack_align = .@"16";
// Return values
- if (ret_ty.zigTypeTag(mod) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
result.return_value = InstTracking.init(.unreach);
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
result.return_value = InstTracking.init(.none);
} else {
const ret_reg = abi.getCAbiIntReturnRegs(resolved_cc)[0];
- const ret_ty_size: u31 = @intCast(ret_ty.abiSize(pt));
+ const ret_ty_size: u31 = @intCast(ret_ty.abiSize(zcu));
if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) {
const aliased_reg = registerAlias(ret_reg, ret_ty_size);
result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none };
@@ -19185,12 +19188,12 @@ fn resolveCallingConventionValues(
// Input params
for (param_types, result.args) |ty, *arg| {
- if (!ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
arg.* = .none;
continue;
}
- const param_size: u31 = @intCast(ty.abiSize(pt));
- const param_align: u31 = @intCast(ty.abiAlignment(pt).toByteUnits().?);
+ const param_size: u31 = @intCast(ty.abiSize(zcu));
+ const param_align: u31 = @intCast(ty.abiAlignment(zcu).toByteUnits().?);
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -19276,25 +19279,26 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
fn memSize(self: *Self, ty: Type) Memory.Size {
const pt = self.pt;
- const mod = pt.zcu;
- return switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ return switch (ty.zigTypeTag(zcu)) {
.Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)),
- else => Memory.Size.fromSize(@intCast(ty.abiSize(pt))),
+ else => Memory.Size.fromSize(@intCast(ty.abiSize(zcu))),
};
}
fn splitType(self: *Self, ty: Type) ![2]Type {
const pt = self.pt;
- const classes = mem.sliceTo(&abi.classifySystemV(ty, pt, self.target.*, .other), .none);
+ const zcu = pt.zcu;
+ const classes = mem.sliceTo(&abi.classifySystemV(ty, zcu, self.target.*, .other), .none);
var parts: [2]Type = undefined;
if (classes.len == 2) for (&parts, classes, 0..) |*part, class, part_i| {
part.* = switch (class) {
.integer => switch (part_i) {
0 => Type.u64,
1 => part: {
- const elem_size = ty.abiAlignment(pt).minStrict(.@"8").toByteUnits().?;
+ const elem_size = ty.abiAlignment(zcu).minStrict(.@"8").toByteUnits().?;
const elem_ty = try pt.intType(.unsigned, @intCast(elem_size * 8));
- break :part switch (@divExact(ty.abiSize(pt) - 8, elem_size)) {
+ break :part switch (@divExact(ty.abiSize(zcu) - 8, elem_size)) {
1 => elem_ty,
else => |len| try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern() }),
};
@@ -19306,7 +19310,7 @@ fn splitType(self: *Self, ty: Type) ![2]Type {
.sse => Type.f64,
else => break,
};
- } else if (parts[0].abiSize(pt) + parts[1].abiSize(pt) == ty.abiSize(pt)) return parts;
+ } else if (parts[0].abiSize(zcu) + parts[1].abiSize(zcu) == ty.abiSize(zcu)) return parts;
return self.fail("TODO implement splitType for {}", .{ty.fmt(pt)});
}
@@ -19314,10 +19318,10 @@ fn splitType(self: *Self, ty: Type) ![2]Type {
/// Clobbers any remaining bits.
fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
const pt = self.pt;
- const mod = pt.zcu;
- const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
+ const zcu = pt.zcu;
+ const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(ty.bitSize(pt)),
+ .bits = @intCast(ty.bitSize(zcu)),
};
const shift = math.cast(u6, 64 - int_info.bits % 64) orelse return;
try self.spillEflagsIfOccupied();
@@ -19362,9 +19366,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
fn regBitSize(self: *Self, ty: Type) u64 {
const pt = self.pt;
- const mod = pt.zcu;
- const abi_size = ty.abiSize(pt);
- return switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const abi_size = ty.abiSize(zcu);
+ return switch (ty.zigTypeTag(zcu)) {
else => switch (abi_size) {
1 => 8,
2 => 16,
@@ -19381,7 +19385,7 @@ fn regBitSize(self: *Self, ty: Type) u64 {
}
fn regExtraBits(self: *Self, ty: Type) u64 {
- return self.regBitSize(ty) - ty.bitSize(self.pt);
+ return self.regBitSize(ty) - ty.bitSize(self.pt.zcu);
}
fn hasFeature(self: *Self, feature: Target.x86.Feature) bool {
@@ -19396,14 +19400,14 @@ fn hasAllFeatures(self: *Self, features: anytype) bool {
fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
const pt = self.pt;
- const mod = pt.zcu;
- return self.air.typeOf(inst, &mod.intern_pool);
+ const zcu = pt.zcu;
+ return self.air.typeOf(inst, &zcu.intern_pool);
}
fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
const pt = self.pt;
- const mod = pt.zcu;
- return self.air.typeOfIndex(inst, &mod.intern_pool);
+ const zcu = pt.zcu;
+ return self.air.typeOfIndex(inst, &zcu.intern_pool);
}
fn intCompilerRtAbiName(int_bits: u32) u8 {
@@ -19455,17 +19459,17 @@ fn floatLibcAbiSuffix(ty: Type) []const u8 {
fn promoteInt(self: *Self, ty: Type) Type {
const pt = self.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const int_info: InternPool.Key.IntType = switch (ty.toIntern()) {
.bool_type => .{ .signedness = .unsigned, .bits = 1 },
- else => if (ty.isAbiInt(mod)) ty.intInfo(mod) else return ty,
+ else => if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else return ty,
};
for ([_]Type{
Type.c_int, Type.c_uint,
Type.c_long, Type.c_ulong,
Type.c_longlong, Type.c_ulonglong,
}) |promote_ty| {
- const promote_info = promote_ty.intInfo(mod);
+ const promote_info = promote_ty.intInfo(zcu);
if (int_info.signedness == .signed and promote_info.signedness == .unsigned) continue;
if (int_info.bits + @intFromBool(int_info.signedness == .unsigned and
promote_info.signedness == .signed) <= promote_info.bits) return promote_ty;