aboutsummaryrefslogtreecommitdiff
path: root/src/arch/wasm/CodeGen.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
-rw-r--r--src/arch/wasm/CodeGen.zig1081
1 files changed, 542 insertions, 539 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index d2e9db8062..f36df7c444 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -788,10 +788,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
assert(!gop.found_existing);
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const val = (try func.air.value(ref, pt)).?;
const ty = func.typeOf(ref);
- if (!ty.hasRuntimeBitsIgnoreComptime(pt) and !ty.isInt(mod) and !ty.isError(mod)) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu) and !ty.isInt(zcu) and !ty.isError(zcu)) {
gop.value_ptr.* = .none;
return gop.value_ptr.*;
}
@@ -1001,9 +1001,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
/// Using a given `Type`, returns the corresponding valtype for .auto callconv
fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- return switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ return switch (ty.zigTypeTag(zcu)) {
.Float => switch (ty.floatBits(target)) {
16 => .i32, // stored/loaded as u16
32 => .f32,
@@ -1011,26 +1011,26 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
80, 128 => .i32,
else => unreachable,
},
- .Int, .Enum => switch (ty.intInfo(pt.zcu).bits) {
+ .Int, .Enum => switch (ty.intInfo(zcu).bits) {
0...32 => .i32,
33...64 => .i64,
else => .i32,
},
.Struct => blk: {
- if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| {
+ if (zcu.typeToPackedStruct(ty)) |packed_struct| {
const backing_int_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
break :blk typeToValtype(backing_int_ty, pt, target);
} else {
break :blk .i32;
}
},
- .Vector => switch (determineSimdStoreStrategy(ty, pt, target)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, zcu, target)) {
.direct => .v128,
.unrolled => .i32,
},
- .Union => switch (ty.containerLayout(pt.zcu)) {
+ .Union => switch (ty.containerLayout(zcu)) {
.@"packed" => blk: {
- const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(pt)))) catch @panic("out of memory");
+ const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(zcu)))) catch @panic("out of memory");
break :blk typeToValtype(int_ty, pt, target);
},
else => .i32,
@@ -1148,7 +1148,7 @@ fn genFunctype(
pt: Zcu.PerThread,
target: std.Target,
) !wasm.Type {
- const mod = pt.zcu;
+ const zcu = pt.zcu;
var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
defer temp_params.deinit();
var returns = std.ArrayList(wasm.Valtype).init(gpa);
@@ -1156,30 +1156,30 @@ fn genFunctype(
if (firstParamSRet(cc, return_type, pt, target)) {
try temp_params.append(.i32); // memory address is always a 32-bit handle
- } else if (return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+ } else if (return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
if (cc == .C) {
- const res_classes = abi.classifyType(return_type, pt);
+ const res_classes = abi.classifyType(return_type, zcu);
assert(res_classes[0] == .direct and res_classes[1] == .none);
- const scalar_type = abi.scalarType(return_type, pt);
+ const scalar_type = abi.scalarType(return_type, zcu);
try returns.append(typeToValtype(scalar_type, pt, target));
} else {
try returns.append(typeToValtype(return_type, pt, target));
}
- } else if (return_type.isError(mod)) {
+ } else if (return_type.isError(zcu)) {
try returns.append(.i32);
}
// param types
for (params) |param_type_ip| {
const param_type = Type.fromInterned(param_type_ip);
- if (!param_type.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(zcu)) continue;
switch (cc) {
.C => {
- const param_classes = abi.classifyType(param_type, pt);
+ const param_classes = abi.classifyType(param_type, zcu);
if (param_classes[1] == .none) {
if (param_classes[0] == .direct) {
- const scalar_type = abi.scalarType(param_type, pt);
+ const scalar_type = abi.scalarType(param_type, zcu);
try temp_params.append(typeToValtype(scalar_type, pt, target));
} else {
try temp_params.append(typeToValtype(param_type, pt, target));
@@ -1242,10 +1242,10 @@ pub fn generate(
fn genFunc(func: *CodeGen) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const fn_ty = mod.navValue(func.owner_nav).typeOf(mod);
- const fn_info = mod.typeToFunc(fn_ty).?;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu);
+ const fn_info = zcu.typeToFunc(fn_ty).?;
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeNavType(func.owner_nav, func_type);
@@ -1273,7 +1273,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
const inst: Air.Inst.Index = @enumFromInt(func.air.instructions.len - 1);
const last_inst_ty = func.typeOfIndex(inst);
- if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(pt) or last_inst_ty.isNoReturn(mod)) {
+ if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(zcu) or last_inst_ty.isNoReturn(zcu)) {
try func.addTag(.@"unreachable");
}
}
@@ -1356,9 +1356,9 @@ const CallWValues = struct {
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const fn_info = mod.typeToFunc(fn_ty).?;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const fn_info = zcu.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
var result: CallWValues = .{
.args = &.{},
@@ -1381,7 +1381,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
for (fn_info.param_types.get(ip)) |ty| {
- if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(zcu)) {
continue;
}
@@ -1391,7 +1391,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
},
.C => {
for (fn_info.param_types.get(ip)) |ty| {
- const ty_classes = abi.classifyType(Type.fromInterned(ty), pt);
+ const ty_classes = abi.classifyType(Type.fromInterned(ty), zcu);
for (ty_classes) |class| {
if (class == .none) continue;
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -1409,7 +1409,7 @@ fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.
switch (cc) {
.Unspecified, .Inline => return isByRef(return_type, pt, target),
.C => {
- const ty_classes = abi.classifyType(return_type, pt);
+ const ty_classes = abi.classifyType(return_type, pt.zcu);
if (ty_classes[0] == .indirect) return true;
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
return false;
@@ -1426,16 +1426,16 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
}
const pt = func.pt;
- const mod = pt.zcu;
- const ty_classes = abi.classifyType(ty, pt);
+ const zcu = pt.zcu;
+ const ty_classes = abi.classifyType(ty, zcu);
assert(ty_classes[0] != .none);
- switch (ty.zigTypeTag(mod)) {
+ switch (ty.zigTypeTag(zcu)) {
.Struct, .Union => {
if (ty_classes[0] == .indirect) {
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct);
- const scalar_type = abi.scalarType(ty, pt);
+ const scalar_type = abi.scalarType(ty, zcu);
switch (value) {
.memory,
.memory_offset,
@@ -1450,7 +1450,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
- assert(ty.abiSize(pt) == 16);
+ assert(ty.abiSize(zcu) == 16);
// in this case we have an integer or float that must be lowered as 2 i64's.
try func.emitWValue(value);
try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
@@ -1517,18 +1517,18 @@ fn restoreStackPointer(func: *CodeGen) !void {
///
/// Asserts Type has codegenbits
fn allocStack(func: *CodeGen, ty: Type) !WValue {
- const pt = func.pt;
- assert(ty.hasRuntimeBitsIgnoreComptime(pt));
+ const zcu = func.pt.zcu;
+ assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
if (func.initial_stack_value == .none) {
try func.initializeStack();
}
- const abi_size = std.math.cast(u32, ty.abiSize(pt)) orelse {
+ const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- ty.fmt(pt), ty.abiSize(pt),
+ ty.fmt(func.pt), ty.abiSize(zcu),
});
};
- const abi_align = ty.abiAlignment(pt);
+ const abi_align = ty.abiAlignment(zcu);
func.stack_alignment = func.stack_alignment.max(abi_align);
@@ -1544,22 +1544,22 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
/// if it is set, to ensure the stack alignment will be set correctly.
fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ptr_ty = func.typeOfIndex(inst);
- const pointee_ty = ptr_ty.childType(mod);
+ const pointee_ty = ptr_ty.childType(zcu);
if (func.initial_stack_value == .none) {
try func.initializeStack();
}
- if (!pointee_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!pointee_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return func.allocStack(Type.usize); // create a value containing just the stack pointer.
}
- const abi_alignment = ptr_ty.ptrAlignment(pt);
- const abi_size = std.math.cast(u32, pointee_ty.abiSize(pt)) orelse {
+ const abi_alignment = ptr_ty.ptrAlignment(zcu);
+ const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- pointee_ty.fmt(pt), pointee_ty.abiSize(pt),
+ pointee_ty.fmt(pt), pointee_ty.abiSize(zcu),
});
};
func.stack_alignment = func.stack_alignment.max(abi_alignment);
@@ -1716,9 +1716,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ switch (ty.zigTypeTag(zcu)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -1738,41 +1738,41 @@ fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
.Array,
.Frame,
- => return ty.hasRuntimeBitsIgnoreComptime(pt),
+ => return ty.hasRuntimeBitsIgnoreComptime(zcu),
.Union => {
- if (mod.typeToUnion(ty)) |union_obj| {
+ if (zcu.typeToUnion(ty)) |union_obj| {
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
- return ty.abiSize(pt) > 8;
+ return ty.abiSize(zcu) > 8;
}
}
- return ty.hasRuntimeBitsIgnoreComptime(pt);
+ return ty.hasRuntimeBitsIgnoreComptime(zcu);
},
.Struct => {
- if (mod.typeToPackedStruct(ty)) |packed_struct| {
+ if (zcu.typeToPackedStruct(ty)) |packed_struct| {
return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt, target);
}
- return ty.hasRuntimeBitsIgnoreComptime(pt);
+ return ty.hasRuntimeBitsIgnoreComptime(zcu);
},
- .Vector => return determineSimdStoreStrategy(ty, pt, target) == .unrolled,
- .Int => return ty.intInfo(mod).bits > 64,
- .Enum => return ty.intInfo(mod).bits > 64,
+ .Vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
+ .Int => return ty.intInfo(zcu).bits > 64,
+ .Enum => return ty.intInfo(zcu).bits > 64,
.Float => return ty.floatBits(target) > 64,
.ErrorUnion => {
- const pl_ty = ty.errorUnionPayload(mod);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const pl_ty = ty.errorUnionPayload(zcu);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return false;
}
return true;
},
.Optional => {
- if (ty.isPtrLikeOptional(mod)) return false;
- const pl_type = ty.optionalChild(mod);
- if (pl_type.zigTypeTag(mod) == .ErrorSet) return false;
- return pl_type.hasRuntimeBitsIgnoreComptime(pt);
+ if (ty.isPtrLikeOptional(zcu)) return false;
+ const pl_type = ty.optionalChild(zcu);
+ if (pl_type.zigTypeTag(zcu) == .ErrorSet) return false;
+ return pl_type.hasRuntimeBitsIgnoreComptime(zcu);
},
.Pointer => {
// Slices act like struct and will be passed by reference
- if (ty.isSlice(mod)) return true;
+ if (ty.isSlice(zcu)) return true;
return false;
},
}
@@ -1787,9 +1787,9 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread, target: std.Target) SimdStoreStrategy {
- std.debug.assert(ty.zigTypeTag(pt.zcu) == .Vector);
- if (ty.bitSize(pt) != 128) return .unrolled;
+fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStoreStrategy {
+ std.debug.assert(ty.zigTypeTag(zcu) == .Vector);
+ if (ty.bitSize(zcu) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
const features = target.cpu.features;
if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
@@ -2069,8 +2069,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
for (body) |inst| {
if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) {
@@ -2091,37 +2091,37 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
- const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
+ const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
const ret_ty = Type.fromInterned(fn_info.return_type);
// result must be stored in the stack and we return a pointer
// to the stack instead
if (func.return_value != .none) {
try func.store(func.return_value, operand, ret_ty, 0);
- } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- switch (ret_ty.zigTypeTag(mod)) {
+ } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ switch (ret_ty.zigTypeTag(zcu)) {
// Aggregate types can be lowered as a singular value
.Struct, .Union => {
- const scalar_type = abi.scalarType(ret_ty, pt);
+ const scalar_type = abi.scalarType(ret_ty, zcu);
try func.emitWValue(operand);
const opcode = buildOpcode(.{
.op = .load,
- .width = @as(u8, @intCast(scalar_type.abiSize(pt) * 8)),
- .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+ .width = @as(u8, @intCast(scalar_type.abiSize(zcu) * 8)),
+ .signedness = if (scalar_type.isSignedInt(zcu)) .signed else .unsigned,
.valtype1 = typeToValtype(scalar_type, pt, func.target.*),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
- .alignment = @intCast(scalar_type.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(scalar_type.abiAlignment(zcu).toByteUnits().?),
});
},
else => try func.emitWValue(operand),
}
} else {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and ret_ty.isError(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and ret_ty.isError(zcu)) {
try func.addImm32(0);
} else {
try func.emitWValue(operand);
@@ -2135,15 +2135,15 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
- const child_type = func.typeOfIndex(inst).childType(mod);
+ const zcu = pt.zcu;
+ const child_type = func.typeOfIndex(inst).childType(zcu);
const result = result: {
- if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(pt)) {
+ if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
break :result try func.allocStack(Type.usize); // create pointer to void
}
- const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
+ const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
break :result func.return_value;
}
@@ -2156,14 +2156,14 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
- const ret_ty = func.typeOf(un_op).childType(mod);
+ const ret_ty = func.typeOf(un_op).childType(zcu);
- const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?;
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- if (ret_ty.isError(mod)) {
+ const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ if (ret_ty.isError(zcu)) {
try func.addImm32(0);
}
} else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) {
@@ -2184,15 +2184,15 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const ty = func.typeOf(pl_op.operand);
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- const fn_ty = switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const fn_ty = switch (ty.zigTypeTag(zcu)) {
.Fn => ty,
- .Pointer => ty.childType(mod),
+ .Pointer => ty.childType(zcu),
else => unreachable,
};
- const ret_ty = fn_ty.fnReturnType(mod);
- const fn_info = mod.typeToFunc(fn_ty).?;
+ const ret_ty = fn_ty.fnReturnType(zcu);
+ const fn_info = zcu.typeToFunc(fn_ty).?;
const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*);
const callee: ?InternPool.Nav.Index = blk: {
@@ -2205,7 +2205,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
},
.@"extern" => |@"extern"| {
const ext_nav = ip.getNav(@"extern".owner_nav);
- const ext_info = mod.typeToFunc(Type.fromInterned(@"extern".ty)).?;
+ const ext_info = zcu.typeToFunc(Type.fromInterned(@"extern".ty)).?;
var func_type = try genFunctype(
func.gpa,
ext_info.cc,
@@ -2248,9 +2248,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const arg_val = try func.resolveInst(arg);
const arg_ty = func.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
- try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
+ try func.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
}
if (callee) |direct| {
@@ -2259,7 +2259,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else {
// in this case we call a function pointer
// so load its value onto the stack
- std.debug.assert(ty.zigTypeTag(mod) == .Pointer);
+ std.debug.assert(ty.zigTypeTag(zcu) == .Pointer);
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
@@ -2271,18 +2271,18 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
const result_value = result_value: {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt) and !ret_ty.isError(mod)) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
break :result_value .none;
- } else if (ret_ty.isNoReturn(mod)) {
+ } else if (ret_ty.isNoReturn(zcu)) {
try func.addTag(.@"unreachable");
break :result_value .none;
} else if (first_param_sret) {
break :result_value sret;
// TODO: Make this less fragile and optimize
- } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
+ } else if (zcu.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(zcu) == .Struct or ret_ty.zigTypeTag(zcu) == .Union) {
const result_local = try func.allocLocal(ret_ty);
try func.addLabel(.local_set, result_local.local.value);
- const scalar_type = abi.scalarType(ret_ty, pt);
+ const scalar_type = abi.scalarType(ret_ty, zcu);
const result = try func.allocStack(scalar_type);
try func.store(result, result_local, scalar_type, 0);
break :result_value result;
@@ -2306,7 +2306,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -2317,8 +2317,8 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
const ptr_ty = func.typeOf(bin_op.lhs);
- const ptr_info = ptr_ty.ptrInfo(mod);
- const ty = ptr_ty.childType(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ const ty = ptr_ty.childType(zcu);
if (ptr_info.packed_offset.host_size == 0) {
try func.store(lhs, rhs, ty, 0);
@@ -2331,7 +2331,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
- var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(pt)))) - 1));
+ var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(zcu)))) - 1));
mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset));
mask ^= ~@as(u64, 0);
const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
@@ -2343,9 +2343,9 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
else
.{ .imm64 = mask };
const wrap_mask_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
- .{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt))) }
+ .{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu))) }
else
- .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(pt)) };
+ .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu)) };
try func.emitWValue(lhs);
const loaded = try func.load(lhs, int_elem_ty, 0);
@@ -2366,12 +2366,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
assert(!(lhs != .stack and rhs == .stack));
const pt = func.pt;
- const mod = pt.zcu;
- const abi_size = ty.abiSize(pt);
- switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const abi_size = ty.abiSize(zcu);
+ switch (ty.zigTypeTag(zcu)) {
.ErrorUnion => {
- const pl_ty = ty.errorUnionPayload(mod);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const pl_ty = ty.errorUnionPayload(zcu);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
@@ -2379,14 +2379,14 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.Optional => {
- if (ty.isPtrLikeOptional(mod)) {
+ if (ty.isPtrLikeOptional(zcu)) {
return func.store(lhs, rhs, Type.usize, 0);
}
- const pl_ty = ty.optionalChild(mod);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const pl_ty = ty.optionalChild(zcu);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return func.store(lhs, rhs, Type.u8, 0);
}
- if (pl_ty.zigTypeTag(mod) == .ErrorSet) {
+ if (pl_ty.zigTypeTag(zcu) == .ErrorSet) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
@@ -2397,7 +2397,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Vector => switch (determineSimdStoreStrategy(ty, pt, func.target.*)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, zcu, func.target.*)) {
.unrolled => {
const len: u32 = @intCast(abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2411,13 +2411,13 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
- @intCast(ty.abiAlignment(pt).toByteUnits() orelse 0),
+ @intCast(ty.abiAlignment(zcu).toByteUnits() orelse 0),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
},
.Pointer => {
- if (ty.isSlice(mod)) {
+ if (ty.isSlice(zcu)) {
// store pointer first
// lower it to the stack so we do not have to store rhs into a local first
try func.emitWValue(lhs);
@@ -2441,7 +2441,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.store(.stack, msb, Type.u64, 8 + lhs.offset());
return;
} else if (abi_size > 16) {
- try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(pt))) });
+ try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
},
else => if (abi_size > 8) {
return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
@@ -2467,21 +2467,21 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + lhs.offset(),
- .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
},
);
}
fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = ty_op.ty.toType();
const ptr_ty = func.typeOf(ty_op.operand);
- const ptr_info = ptr_ty.ptrInfo(mod);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
- if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{ty_op.operand});
+ if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
if (isByRef(ty, pt, func.target.*)) {
@@ -2515,36 +2515,36 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// NOTE: Leaves the value on the stack.
fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
// load local's value from memory by its stack position
try func.emitWValue(operand);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
// TODO: Add helper functions for simd opcodes
const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
- @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return .stack;
}
- const abi_size: u8 = @intCast(ty.abiSize(pt));
+ const abi_size: u8 = @intCast(ty.abiSize(zcu));
const opcode = buildOpcode(.{
.valtype1 = typeToValtype(ty, pt, func.target.*),
.width = abi_size * 8,
.op = .load,
- .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
+ .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
});
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + operand.offset(),
- .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
},
);
@@ -2553,13 +2553,13 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const arg_index = func.arg_index;
const arg = func.args[arg_index];
- const cc = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?.cc;
+ const cc = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?.cc;
const arg_ty = func.typeOfIndex(inst);
if (cc == .C) {
- const arg_classes = abi.classifyType(arg_ty, pt);
+ const arg_classes = abi.classifyType(arg_ty, zcu);
for (arg_classes) |class| {
if (class != .none) {
func.arg_index += 1;
@@ -2569,7 +2569,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When we have an argument that's passed using more than a single parameter,
// we combine them into a single stack value
if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
- if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) {
+ if (arg_ty.zigTypeTag(zcu) != .Int and arg_ty.zigTypeTag(zcu) != .Float) {
return func.fail(
"TODO: Implement C-ABI argument for type '{}'",
.{arg_ty.fmt(pt)},
@@ -2602,6 +2602,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const pt = func.pt;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -2615,10 +2616,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => result: {
- const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
else
@@ -2635,7 +2636,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
/// NOTE: THis leaves the value on top of the stack.
fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
assert(!(lhs != .stack and rhs == .stack));
if (ty.isAnyFloat()) {
@@ -2644,7 +2645,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
}
if (isByRef(ty, pt, func.target.*)) {
- if (ty.zigTypeTag(mod) == .Int) {
+ if (ty.zigTypeTag(zcu) == .Int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
return func.fail(
@@ -2657,7 +2658,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
const opcode: wasm.Opcode = buildOpcode(.{
.op = op,
.valtype1 = typeToValtype(ty, pt, func.target.*),
- .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
+ .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
});
try func.emitWValue(lhs);
try func.emitWValue(rhs);
@@ -2669,8 +2670,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- const int_info = ty.intInfo(mod);
+ const zcu = pt.zcu;
+ const int_info = ty.intInfo(zcu);
if (int_info.bits > 128) {
return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
}
@@ -2812,17 +2813,17 @@ const FloatOp = enum {
fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOf(ty_op.operand);
- const scalar_ty = ty.scalarType(mod);
+ const scalar_ty = ty.scalarType(zcu);
- switch (scalar_ty.zigTypeTag(mod)) {
- .Int => if (ty.zigTypeTag(mod) == .Vector) {
+ switch (scalar_ty.zigTypeTag(zcu)) {
+ .Int => if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
} else {
- const int_bits = ty.intInfo(mod).bits;
+ const int_bits = ty.intInfo(zcu).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits});
};
@@ -2903,8 +2904,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError
fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- if (ty.zigTypeTag(mod) == .Vector) {
+ const zcu = pt.zcu;
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: Implement floatOps for vectors", .{});
}
@@ -3010,7 +3011,7 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
@@ -3018,7 +3019,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const lhs_ty = func.typeOf(bin_op.lhs);
const rhs_ty = func.typeOf(bin_op.rhs);
- if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) {
+ if (lhs_ty.zigTypeTag(zcu) == .Vector or rhs_ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
@@ -3029,10 +3030,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => result: {
- const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(pt))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(pt))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
else
@@ -3058,9 +3059,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed.
fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- assert(ty.abiSize(pt) <= 16);
- const int_bits: u16 = @intCast(ty.bitSize(pt)); // TODO use ty.intInfo(mod).bits
+ const zcu = pt.zcu;
+ assert(ty.abiSize(zcu) <= 16);
+ const int_bits: u16 = @intCast(ty.bitSize(zcu)); // TODO use ty.intInfo(zcu).bits
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
};
@@ -3070,7 +3071,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
switch (wasm_bits) {
32 => {
try func.emitWValue(operand);
- if (ty.isSignedInt(mod)) {
+ if (ty.isSignedInt(zcu)) {
try func.addImm32(32 - int_bits);
try func.addTag(.i32_shl);
try func.addImm32(32 - int_bits);
@@ -3083,7 +3084,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
},
64 => {
try func.emitWValue(operand);
- if (ty.isSignedInt(mod)) {
+ if (ty.isSignedInt(zcu)) {
try func.addImm64(64 - int_bits);
try func.addTag(.i64_shl);
try func.addImm64(64 - int_bits);
@@ -3104,7 +3105,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
try func.emitWValue(result);
_ = try func.load(operand, Type.u64, 8);
- if (ty.isSignedInt(mod)) {
+ if (ty.isSignedInt(zcu)) {
try func.addImm64(128 - int_bits);
try func.addTag(.i64_shl);
try func.addImm64(128 - int_bits);
@@ -3145,13 +3146,13 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
};
},
.Struct => switch (base_ty.containerLayout(zcu)) {
- .auto => base_ty.structFieldOffset(@intCast(field.index), pt),
+ .auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
.Union => switch (base_ty.containerLayout(zcu)) {
.auto => off: {
// Keep in sync with the `un` case of `generateSymbol`.
- const layout = base_ty.unionGetLayout(pt);
+ const layout = base_ty.unionGetLayout(zcu);
if (layout.payload_size == 0) break :off 0;
if (layout.tag_size == 0) break :off 0;
if (layout.tag_align.compare(.gte, layout.payload_align)) {
@@ -3178,15 +3179,15 @@ fn lowerUavRef(
offset: u32,
) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- const ty = Type.fromInterned(mod.intern_pool.typeOf(uav.val));
+ const zcu = pt.zcu;
+ const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav.val));
- const is_fn_body = ty.zigTypeTag(mod) == .Fn;
- if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const is_fn_body = ty.zigTypeTag(zcu) == .Fn;
+ if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return .{ .imm32 = 0xaaaaaaaa };
}
- const decl_align = mod.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
+ const decl_align = zcu.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
const res = try func.bin_file.lowerUav(pt, uav.val, decl_align, func.src_loc);
const target_sym_index = switch (res) {
.mcv => |mcv| mcv.load_symbol,
@@ -3204,19 +3205,19 @@ fn lowerUavRef(
fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
// check if decl is an alias to a function, in which case we
// want to lower the actual decl, rather than the alias itself.
- const owner_nav = switch (ip.indexToKey(mod.navValue(nav_index).toIntern())) {
+ const owner_nav = switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) {
.func => |function| function.owner_nav,
.variable => |variable| variable.owner_nav,
.@"extern" => |@"extern"| @"extern".owner_nav,
else => nav_index,
};
const nav_ty = ip.getNav(owner_nav).typeOf(ip);
- if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(zcu)) {
return .{ .imm32 = 0xaaaaaaaa };
}
@@ -3234,10 +3235,10 @@ fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) Inn
/// Asserts that `isByRef` returns `false` for `ty`.
fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
assert(!isByRef(ty, pt, func.target.*));
- const ip = &mod.intern_pool;
- if (val.isUndefDeep(mod)) return func.emitUndefined(ty);
+ const ip = &zcu.intern_pool;
+ if (val.isUndefDeep(zcu)) return func.emitUndefined(ty);
switch (ip.indexToKey(val.ip_index)) {
.int_type,
@@ -3280,16 +3281,16 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.empty_enum_value,
=> unreachable, // non-runtime values
.int => {
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
- 0...32 => return .{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(pt)))) },
- 33...64 => return .{ .imm64 = @bitCast(val.toSignedInt(pt)) },
+ 0...32 => return .{ .imm32 = @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))) },
+ 33...64 => return .{ .imm64 = @bitCast(val.toSignedInt(zcu)) },
else => unreachable,
},
.unsigned => switch (int_info.bits) {
- 0...32 => return .{ .imm32 = @intCast(val.toUnsignedInt(pt)) },
- 33...64 => return .{ .imm64 = val.toUnsignedInt(pt) },
+ 0...32 => return .{ .imm32 = @intCast(val.toUnsignedInt(zcu)) },
+ 33...64 => return .{ .imm64 = val.toUnsignedInt(zcu) },
else => unreachable,
},
}
@@ -3302,9 +3303,9 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const err_int_ty = try pt.errorIntType();
const err_ty, const err_val = switch (error_union.val) {
.err_name => |err_name| .{
- ty.errorUnionSet(mod),
+ ty.errorUnionSet(zcu),
Value.fromInterned(try pt.intern(.{ .err = .{
- .ty = ty.errorUnionSet(mod).toIntern(),
+ .ty = ty.errorUnionSet(zcu).toIntern(),
.name = err_name,
} })),
},
@@ -3313,8 +3314,8 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
try pt.intValue(err_int_ty, 0),
},
};
- const payload_type = ty.errorUnionPayload(mod);
- if (!payload_type.hasRuntimeBitsIgnoreComptime(pt)) {
+ const payload_type = ty.errorUnionPayload(zcu);
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
// We use the error type directly as the type.
return func.lowerConstant(err_val, err_ty);
}
@@ -3339,20 +3340,20 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
},
},
.ptr => return func.lowerPtr(val.toIntern(), 0),
- .opt => if (ty.optionalReprIsPayload(mod)) {
- const pl_ty = ty.optionalChild(mod);
- if (val.optionalValue(mod)) |payload| {
+ .opt => if (ty.optionalReprIsPayload(zcu)) {
+ const pl_ty = ty.optionalChild(zcu);
+ if (val.optionalValue(zcu)) |payload| {
return func.lowerConstant(payload, pl_ty);
} else {
return .{ .imm32 = 0 };
}
} else {
- return .{ .imm32 = @intFromBool(!val.isNull(mod)) };
+ return .{ .imm32 = @intFromBool(!val.isNull(zcu)) };
},
.aggregate => switch (ip.indexToKey(ty.ip_index)) {
.array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
.vector_type => {
- assert(determineSimdStoreStrategy(ty, pt, func.target.*) == .direct);
+ assert(determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct);
var buf: [16]u8 = undefined;
val.writeToMemory(ty, pt, &buf) catch unreachable;
return func.storeSimdImmd(buf);
@@ -3378,8 +3379,8 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const constant_ty = if (un.tag == .none)
try ty.unionBackingType(pt)
else field_ty: {
- const union_obj = mod.typeToUnion(ty).?;
- const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+ const union_obj = zcu.typeToUnion(ty).?;
+ const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
break :field_ty Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
};
return func.lowerConstant(Value.fromInterned(un.val), constant_ty);
@@ -3398,11 +3399,11 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
- switch (ty.zigTypeTag(mod)) {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ switch (ty.zigTypeTag(zcu)) {
.Bool, .ErrorSet => return .{ .imm32 = 0xaaaaaaaa },
- .Int, .Enum => switch (ty.intInfo(mod).bits) {
+ .Int, .Enum => switch (ty.intInfo(zcu).bits) {
0...32 => return .{ .imm32 = 0xaaaaaaaa },
33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
@@ -3419,8 +3420,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
else => unreachable,
},
.Optional => {
- const pl_ty = ty.optionalChild(mod);
- if (ty.optionalReprIsPayload(mod)) {
+ const pl_ty = ty.optionalChild(zcu);
+ if (ty.optionalReprIsPayload(zcu)) {
return func.emitUndefined(pl_ty);
}
return .{ .imm32 = 0xaaaaaaaa };
@@ -3429,10 +3430,10 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
return .{ .imm32 = 0xaaaaaaaa };
},
.Struct => {
- const packed_struct = mod.typeToPackedStruct(ty).?;
+ const packed_struct = zcu.typeToPackedStruct(ty).?;
return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
},
- else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
+ else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
}
}
@@ -3441,8 +3442,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
/// as an integer value.
fn valueAsI32(func: *const CodeGen, val: Value) i32 {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
switch (val.toIntern()) {
.bool_true => return 1,
@@ -3465,12 +3466,13 @@ fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread
}
fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 {
+ const zcu = pt.zcu;
return switch (storage) {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
- .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(pt).toByteUnits() orelse 0)))),
- .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(pt))))),
+ .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0)))),
+ .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(zcu))))),
};
}
@@ -3599,10 +3601,10 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In
fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(!(lhs != .stack and rhs == .stack));
const pt = func.pt;
- const mod = pt.zcu;
- if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) {
- const payload_ty = ty.optionalChild(mod);
- if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const zcu = pt.zcu;
+ if (ty.zigTypeTag(zcu) == .Optional and !ty.optionalReprIsPayload(zcu)) {
+ const payload_ty = ty.optionalChild(zcu);
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
@@ -3616,10 +3618,10 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
+ if (ty.zigTypeTag(zcu) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk ty.intInfo(mod).signedness;
+ break :blk ty.intInfo(zcu).signedness;
};
// ensure that when we compare pointers, we emit
@@ -3708,12 +3710,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+ const zcu = func.pt.zcu;
const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block = func.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
- if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(pt)) {
+ if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) {
const operand = try func.resolveInst(br.operand);
try func.lowerToStack(operand);
@@ -3736,17 +3738,17 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.typeOf(ty_op.operand);
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const result = result: {
- if (operand_ty.zigTypeTag(mod) == .Bool) {
+ if (operand_ty.zigTypeTag(zcu) == .Bool) {
try func.emitWValue(operand);
try func.addTag(.i32_eqz);
const not_tmp = try func.allocLocal(operand_ty);
try func.addLabel(.local_set, not_tmp.local.value);
break :result not_tmp;
} else {
- const int_info = operand_ty.intInfo(mod);
+ const int_info = operand_ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
};
@@ -3816,14 +3818,14 @@ fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const wanted_ty = func.typeOfIndex(inst);
const given_ty = func.typeOf(ty_op.operand);
- const bit_size = given_ty.bitSize(pt);
- const needs_wrapping = (given_ty.isSignedInt(mod) != wanted_ty.isSignedInt(mod)) and
+ const bit_size = given_ty.bitSize(zcu);
+ const needs_wrapping = (given_ty.isSignedInt(zcu) != wanted_ty.isSignedInt(zcu)) and
bit_size != 32 and bit_size != 64 and bit_size != 128;
const result = result: {
@@ -3860,12 +3862,12 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
// if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand;
- if (wanted_ty.bitSize(pt) > 64) return operand;
- assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod)));
+ if (wanted_ty.bitSize(zcu) > 64) return operand;
+ assert((wanted_ty.isInt(zcu) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(zcu)));
const opcode = buildOpcode(.{
.op = .reinterpret,
@@ -3879,24 +3881,24 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.StructField, ty_pl.payload);
const struct_ptr = try func.resolveInst(extra.data.struct_operand);
const struct_ptr_ty = func.typeOf(extra.data.struct_operand);
- const struct_ty = struct_ptr_ty.childType(mod);
+ const struct_ty = struct_ptr_ty.childType(zcu);
const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index);
return func.finishAir(inst, result, &.{extra.data.struct_operand});
}
fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const struct_ptr = try func.resolveInst(ty_op.operand);
const struct_ptr_ty = func.typeOf(ty_op.operand);
- const struct_ty = struct_ptr_ty.childType(mod);
+ const struct_ty = struct_ptr_ty.childType(zcu);
const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index);
return func.finishAir(inst, result, &.{ty_op.operand});
@@ -3912,23 +3914,23 @@ fn structFieldPtr(
index: u32,
) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const result_ty = func.typeOfIndex(inst);
- const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
+ const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
- const offset = switch (struct_ty.containerLayout(mod)) {
- .@"packed" => switch (struct_ty.zigTypeTag(mod)) {
+ const offset = switch (struct_ty.containerLayout(zcu)) {
+ .@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
.Struct => offset: {
- if (result_ty.ptrInfo(mod).packed_offset.host_size != 0) {
+ if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) {
break :offset @as(u32, 0);
}
- const struct_type = mod.typeToStruct(struct_ty).?;
+ const struct_type = zcu.typeToStruct(struct_ty).?;
break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
},
.Union => 0,
else => unreachable,
},
- else => struct_ty.structFieldOffset(index, pt),
+ else => struct_ty.structFieldOffset(index, zcu),
};
// save a load and store when we can simply reuse the operand
if (offset == 0) {
@@ -3944,24 +3946,24 @@ fn structFieldPtr(
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = func.typeOf(struct_field.struct_operand);
const operand = try func.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
- const field_ty = struct_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+ const field_ty = struct_ty.fieldType(field_index, zcu);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
- const result: WValue = switch (struct_ty.containerLayout(mod)) {
- .@"packed" => switch (struct_ty.zigTypeTag(mod)) {
+ const result: WValue = switch (struct_ty.containerLayout(zcu)) {
+ .@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
.Struct => result: {
- const packed_struct = mod.typeToPackedStruct(struct_ty).?;
+ const packed_struct = zcu.typeToPackedStruct(struct_ty).?;
const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
- const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
+ const wasm_bits = toWasmBits(backing_ty.intInfo(zcu).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
const const_wvalue: WValue = if (wasm_bits == 32)
@@ -3977,16 +3979,16 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else
try func.binOp(operand, const_wvalue, backing_ty, .shr);
- if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+ if (field_ty.zigTypeTag(zcu) == .Float) {
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
break :result try func.bitcast(field_ty, int_type, truncated);
- } else if (field_ty.isPtrAtRuntime(mod) and packed_struct.field_types.len == 1) {
+ } else if (field_ty.isPtrAtRuntime(zcu) and packed_struct.field_types.len == 1) {
// In this case we do not have to perform any transformations,
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
- } else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+ } else if (field_ty.isPtrAtRuntime(zcu)) {
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
break :result try func.trunc(shifted_value, int_type, backing_ty);
}
break :result try func.trunc(shifted_value, field_ty, backing_ty);
@@ -4002,13 +4004,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(pt))));
- if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+ const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(zcu))));
+ if (field_ty.zigTypeTag(zcu) == .Float) {
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
const truncated = try func.trunc(operand, int_type, union_int_type);
break :result try func.bitcast(field_ty, int_type, truncated);
- } else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(pt))));
+ } else if (field_ty.isPtrAtRuntime(zcu)) {
+ const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
break :result try func.trunc(operand, int_type, union_int_type);
}
break :result try func.trunc(operand, field_ty, union_int_type);
@@ -4016,7 +4018,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
},
else => result: {
- const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, pt)) orelse {
+ const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
};
if (isByRef(field_ty, pt, func.target.*)) {
@@ -4036,7 +4038,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -4093,7 +4095,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the target is an integer size larger than u32, we have no way to use the value
// as an index, therefore we also use an if/else-chain for those cases.
// TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'.
- const is_sparse = highest - lowest > 50 or target_ty.bitSize(pt) > 32;
+ const is_sparse = highest - lowest > 50 or target_ty.bitSize(zcu) > 32;
const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra_index..][0..switch_br.data.else_body_len]);
const has_else_body = else_body.len != 0;
@@ -4138,7 +4140,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// for errors that are not present in any branch. This is fine as this default
// case will never be hit for those cases but we do save runtime cost and size
// by using a jump table for this instead of if-else chains.
- break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable;
+ break :blk if (has_else_body or target_ty.zigTypeTag(zcu) == .ErrorSet) case_i else unreachable;
};
func.mir_extra.appendAssumeCapacity(idx);
} else if (has_else_body) {
@@ -4149,10 +4151,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
+ if (target_ty.zigTypeTag(zcu) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk target_ty.intInfo(mod).signedness;
+ break :blk target_ty.intInfo(zcu).signedness;
};
try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @intFromBool(has_else_body));
@@ -4217,14 +4219,14 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const err_union_ty = func.typeOf(un_op);
- const pl_ty = err_union_ty.errorUnionPayload(mod);
+ const pl_ty = err_union_ty.errorUnionPayload(zcu);
const result: WValue = result: {
- if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
switch (opcode) {
.i32_ne => break :result .{ .imm32 = 0 },
.i32_eq => break :result .{ .imm32 = 1 },
@@ -4233,10 +4235,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
try func.emitWValue(operand);
- if (pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try func.addMemArg(.i32_load16_u, .{
- .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, pt))),
- .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?),
+ .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, zcu))),
+ .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
});
}
@@ -4250,23 +4252,23 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
- const payload_ty = err_ty.errorUnionPayload(mod);
+ const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
+ const payload_ty = err_ty.errorUnionPayload(zcu);
const result: WValue = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
if (op_is_ptr) {
break :result func.reuseOperand(ty_op.operand, operand);
}
break :result .none;
}
- const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt)));
+ const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
if (op_is_ptr or isByRef(payload_ty, pt, func.target.*)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -4278,30 +4280,30 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
- const payload_ty = err_ty.errorUnionPayload(mod);
+ const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
+ const payload_ty = err_ty.errorUnionPayload(zcu);
const result: WValue = result: {
- if (err_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (err_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
break :result .{ .imm32 = 0 };
}
- if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- break :result try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, pt)));
+ break :result try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu)));
};
return func.finishAir(inst, result, &.{ty_op.operand});
}
fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+ const zcu = func.pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4309,18 +4311,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
const pl_ty = func.typeOf(ty_op.operand);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
- const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new);
+ const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
try func.store(payload_ptr, operand, pl_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
try func.emitWValue(err_union);
try func.addImm32(0);
- const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+ const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
try func.addMemArg(.i32_store16, .{
.offset = err_union.offset() + err_val_offset,
.alignment = 2,
@@ -4332,25 +4334,25 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const err_ty = ty_op.ty.toType();
- const pl_ty = err_ty.errorUnionPayload(mod);
+ const pl_ty = err_ty.errorUnionPayload(zcu);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
// store error value
- try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, pt)));
+ try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu)));
// write 'undefined' to the payload
- const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, pt))), .new);
- const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(pt)));
+ const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
+ const len = @as(u32, @intCast(err_ty.errorUnionPayload(zcu).abiSize(zcu)));
try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
break :result err_union;
@@ -4365,16 +4367,16 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.typeOf(ty_op.operand);
const pt = func.pt;
- const mod = pt.zcu;
- if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) {
+ const zcu = pt.zcu;
+ if (ty.zigTypeTag(zcu) == .Vector or operand_ty.zigTypeTag(zcu) == .Vector) {
return func.fail("todo Wasm intcast for vectors", .{});
}
- if (ty.abiSize(pt) > 16 or operand_ty.abiSize(pt) > 16) {
+ if (ty.abiSize(zcu) > 16 or operand_ty.abiSize(zcu) > 16) {
return func.fail("todo Wasm intcast for bitsize > 128", .{});
}
- const op_bits = toWasmBits(@intCast(operand_ty.bitSize(pt))).?;
- const wanted_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
+ const op_bits = toWasmBits(@intCast(operand_ty.bitSize(zcu))).?;
+ const wanted_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
const result = if (op_bits == wanted_bits)
func.reuseOperand(ty_op.operand, operand)
else
@@ -4389,9 +4391,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// NOTE: May leave the result on the top of the stack.
fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- const given_bitsize = @as(u16, @intCast(given.bitSize(pt)));
- const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(pt)));
+ const zcu = pt.zcu;
+ const given_bitsize = @as(u16, @intCast(given.bitSize(zcu)));
+ const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(zcu)));
assert(given_bitsize <= 128);
assert(wanted_bitsize <= 128);
@@ -4407,7 +4409,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
return .stack;
} else if (op_bits == 32 and wanted_bits == 64) {
try func.emitWValue(operand);
- try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u);
+ try func.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u);
return .stack;
} else if (wanted_bits == 128) {
// for 128bit integers we store the integer in the virtual stack, rather than a local
@@ -4417,7 +4419,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
// for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
// meaning less store operations are required.
const lhs = if (op_bits == 32) blk: {
- const sign_ty = if (wanted.isSignedInt(mod)) Type.i64 else Type.u64;
+ const sign_ty = if (wanted.isSignedInt(zcu)) Type.i64 else Type.u64;
break :blk try (try func.intcast(operand, given, sign_ty)).toLocal(func, sign_ty);
} else operand;
@@ -4425,7 +4427,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
try func.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset());
// For signed integers we shift lsb by 63 (64bit integer - 1 sign bit) and store remaining value
- if (wanted.isSignedInt(mod)) {
+ if (wanted.isSignedInt(zcu)) {
try func.emitWValue(stack_ptr);
const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
try func.store(.stack, shr, Type.u64, 8 + stack_ptr.offset());
@@ -4439,12 +4441,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const op_ty = func.typeOf(un_op);
- const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty;
+ const optional_ty = if (op_kind == .ptr) op_ty.childType(zcu) else op_ty;
const result = try func.isNull(operand, optional_ty, opcode);
return func.finishAir(inst, result, &.{un_op});
}
@@ -4453,19 +4455,19 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
try func.emitWValue(operand);
- const payload_ty = optional_ty.optionalChild(mod);
- if (!optional_ty.optionalReprIsPayload(mod)) {
+ const payload_ty = optional_ty.optionalChild(zcu);
+ if (!optional_ty.optionalReprIsPayload(zcu)) {
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
- if (payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
- const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
};
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
- } else if (payload_ty.isSlice(mod)) {
+ } else if (payload_ty.isSlice(zcu)) {
switch (func.arch()) {
.wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
.wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
@@ -4482,17 +4484,17 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const opt_ty = func.typeOf(ty_op.operand);
const payload_ty = func.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return func.finishAir(inst, .none, &.{ty_op.operand});
}
const result = result: {
const operand = try func.resolveInst(ty_op.operand);
- if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand);
+ if (opt_ty.optionalReprIsPayload(zcu)) break :result func.reuseOperand(ty_op.operand, operand);
if (isByRef(payload_ty, pt, func.target.*)) {
break :result try func.buildPointerOffset(operand, 0, .new);
@@ -4505,14 +4507,14 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.typeOf(ty_op.operand).childType(mod);
+ const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
const result = result: {
- const payload_ty = opt_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt) or opt_ty.optionalReprIsPayload(mod)) {
+ const payload_ty = opt_ty.optionalChild(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or opt_ty.optionalReprIsPayload(zcu)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
@@ -4523,20 +4525,20 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.typeOf(ty_op.operand).childType(mod);
- const payload_ty = opt_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
+ const payload_ty = opt_ty.optionalChild(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
- if (opt_ty.optionalReprIsPayload(mod)) {
+ if (opt_ty.optionalReprIsPayload(zcu)) {
return func.finishAir(inst, operand, &.{ty_op.operand});
}
- const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+ const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
};
@@ -4552,10 +4554,10 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const payload_ty = func.typeOf(ty_op.operand);
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const non_null_bit = try func.allocStack(Type.u1);
try func.emitWValue(non_null_bit);
try func.addImm32(1);
@@ -4565,10 +4567,10 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOfIndex(inst);
- if (op_ty.optionalReprIsPayload(mod)) {
+ if (op_ty.optionalReprIsPayload(zcu)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const offset = std.math.cast(u32, payload_ty.abiSize(pt)) orelse {
+ const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
};
@@ -4610,14 +4612,14 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = func.typeOf(bin_op.lhs);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = slice_ty.childType(mod);
- const elem_size = elem_ty.abiSize(pt);
+ const elem_ty = slice_ty.childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
// load pointer onto stack
_ = try func.load(slice, Type.usize, 0);
@@ -4638,12 +4640,12 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- const elem_ty = ty_pl.ty.toType().childType(mod);
- const elem_size = elem_ty.abiSize(pt);
+ const elem_ty = ty_pl.ty.toType().childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4682,13 +4684,13 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const wanted_ty: Type = ty_op.ty.toType();
const op_ty = func.typeOf(ty_op.operand);
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
- if (wanted_ty.zigTypeTag(mod) == .Vector or op_ty.zigTypeTag(mod) == .Vector) {
+ if (wanted_ty.zigTypeTag(zcu) == .Vector or op_ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: trunc for vectors", .{});
}
- const result = if (op_ty.bitSize(pt) == wanted_ty.bitSize(pt))
+ const result = if (op_ty.bitSize(zcu) == wanted_ty.bitSize(zcu))
func.reuseOperand(ty_op.operand, operand)
else
try func.trunc(operand, wanted_ty, op_ty);
@@ -4700,13 +4702,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// NOTE: Resulting value is left on the stack.
fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
const pt = func.pt;
- const given_bits = @as(u16, @intCast(given_ty.bitSize(pt)));
+ const zcu = pt.zcu;
+ const given_bits = @as(u16, @intCast(given_ty.bitSize(zcu)));
if (toWasmBits(given_bits) == null) {
return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
}
var result = try func.intcast(operand, given_ty, wanted_ty);
- const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(pt)));
+ const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(zcu)));
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
result = try func.wrapOperand(result, wanted_ty);
@@ -4724,23 +4727,23 @@ fn airIntFromBool(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const array_ty = func.typeOf(ty_op.operand).childType(mod);
+ const array_ty = func.typeOf(ty_op.operand).childType(zcu);
const slice_ty = ty_op.ty.toType();
// create a slice on the stack
const slice_local = try func.allocStack(slice_ty);
// store the array ptr in the slice
- if (array_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (array_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try func.store(slice_local, operand, Type.usize, 0);
}
// store the length of the array in the slice
- const array_len: u32 = @intCast(array_ty.arrayLen(mod));
+ const array_len: u32 = @intCast(array_ty.arrayLen(zcu));
try func.store(slice_local, .{ .imm32 = array_len }, Type.usize, func.ptrSize());
return func.finishAir(inst, slice_local, &.{ty_op.operand});
@@ -4748,11 +4751,11 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try func.resolveInst(un_op);
const ptr_ty = func.typeOf(un_op);
- const result = if (ptr_ty.isSlice(mod))
+ const result = if (ptr_ty.isSlice(zcu))
try func.slicePtr(operand)
else switch (operand) {
// for stack offset, return a pointer to this offset.
@@ -4764,17 +4767,17 @@ fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = func.typeOf(bin_op.lhs);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = ptr_ty.childType(mod);
- const elem_size = elem_ty.abiSize(pt);
+ const elem_ty = ptr_ty.childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
// load pointer onto the stack
- if (ptr_ty.isSlice(mod)) {
+ if (ptr_ty.isSlice(zcu)) {
_ = try func.load(ptr, Type.usize, 0);
} else {
try func.lowerToStack(ptr);
@@ -4796,19 +4799,19 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = func.typeOf(bin_op.lhs);
- const elem_ty = ty_pl.ty.toType().childType(mod);
- const elem_size = elem_ty.abiSize(pt);
+ const elem_ty = ty_pl.ty.toType().childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
// load pointer onto the stack
- if (ptr_ty.isSlice(mod)) {
+ if (ptr_ty.isSlice(zcu)) {
_ = try func.load(ptr, Type.usize, 0);
} else {
try func.lowerToStack(ptr);
@@ -4825,16 +4828,16 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try func.resolveInst(bin_op.lhs);
const offset = try func.resolveInst(bin_op.rhs);
const ptr_ty = func.typeOf(bin_op.lhs);
- const pointee_ty = switch (ptr_ty.ptrSize(mod)) {
- .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
- else => ptr_ty.childType(mod),
+ const pointee_ty = switch (ptr_ty.ptrSize(zcu)) {
+ .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
+ else => ptr_ty.childType(zcu),
};
const valtype = typeToValtype(Type.usize, pt, func.target.*);
@@ -4843,7 +4846,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
try func.lowerToStack(ptr);
try func.emitWValue(offset);
- try func.addImm32(@intCast(pointee_ty.abiSize(pt)));
+ try func.addImm32(@intCast(pointee_ty.abiSize(zcu)));
try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
@@ -4852,7 +4855,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -4863,16 +4866,16 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const ptr = try func.resolveInst(bin_op.lhs);
const ptr_ty = func.typeOf(bin_op.lhs);
const value = try func.resolveInst(bin_op.rhs);
- const len = switch (ptr_ty.ptrSize(mod)) {
+ const len = switch (ptr_ty.ptrSize(zcu)) {
.Slice => try func.sliceLen(ptr),
- .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(mod).arrayLen(mod))) }),
+ .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(zcu).arrayLen(zcu))) }),
.C, .Many => unreachable,
};
- const elem_ty = if (ptr_ty.ptrSize(mod) == .One)
- ptr_ty.childType(mod).childType(mod)
+ const elem_ty = if (ptr_ty.ptrSize(zcu) == .One)
+ ptr_ty.childType(zcu).childType(zcu)
else
- ptr_ty.childType(mod);
+ ptr_ty.childType(zcu);
const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty);
try func.memset(elem_ty, dst_ptr, len, value);
@@ -4886,7 +4889,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
/// we implement it manually.
fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
const pt = func.pt;
- const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
+ const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt.zcu)));
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves.
@@ -4975,14 +4978,14 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue
fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const array_ty = func.typeOf(bin_op.lhs);
const array = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = array_ty.childType(mod);
- const elem_size = elem_ty.abiSize(pt);
+ const elem_ty = array_ty.childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
if (isByRef(array_ty, pt, func.target.*)) {
try func.lowerToStack(array);
@@ -4991,15 +4994,15 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
- std.debug.assert(array_ty.zigTypeTag(mod) == .Vector);
+ std.debug.assert(array_ty.zigTypeTag(zcu) == .Vector);
switch (index) {
inline .imm32, .imm64 => |lane| {
- const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(pt)) {
- 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
- 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
- 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane,
- 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane,
+ const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(zcu)) {
+ 8 => if (elem_ty.isSignedInt(zcu)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
+ 16 => if (elem_ty.isSignedInt(zcu)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
+ 32 => if (elem_ty.isInt(zcu)) .i32x4_extract_lane else .f32x4_extract_lane,
+ 64 => if (elem_ty.isInt(zcu)) .i64x2_extract_lane else .f64x2_extract_lane,
else => unreachable,
};
@@ -5037,7 +5040,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -5045,7 +5048,7 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const op_bits = op_ty.floatBits(func.target.*);
const dest_ty = func.typeOfIndex(inst);
- const dest_info = dest_ty.intInfo(mod);
+ const dest_info = dest_ty.intInfo(zcu);
if (dest_info.bits > 128) {
return func.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits});
@@ -5082,12 +5085,12 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- const op_info = op_ty.intInfo(mod);
+ const op_info = op_ty.intInfo(zcu);
const dest_ty = func.typeOfIndex(inst);
const dest_bits = dest_ty.floatBits(func.target.*);
@@ -5127,19 +5130,19 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOfIndex(inst);
- const elem_ty = ty.childType(mod);
+ const elem_ty = ty.childType(zcu);
- if (determineSimdStoreStrategy(ty, pt, func.target.*) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, zcu, func.target.*) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
// the scalar value onto the stack.
.stack_offset, .memory, .memory_offset => {
- const opcode = switch (elem_ty.bitSize(pt)) {
+ const opcode = switch (elem_ty.bitSize(zcu)) {
8 => std.wasm.simdOpcode(.v128_load8_splat),
16 => std.wasm.simdOpcode(.v128_load16_splat),
32 => std.wasm.simdOpcode(.v128_load32_splat),
@@ -5153,17 +5156,17 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
- @intCast(elem_ty.abiAlignment(pt).toByteUnits().?),
+ @intCast(elem_ty.abiAlignment(zcu).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return func.finishAir(inst, .stack, &.{ty_op.operand});
},
.local => {
- const opcode = switch (elem_ty.bitSize(pt)) {
+ const opcode = switch (elem_ty.bitSize(zcu)) {
8 => std.wasm.simdOpcode(.i8x16_splat),
16 => std.wasm.simdOpcode(.i16x8_splat),
- 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
- 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
+ 32 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
+ 64 => if (elem_ty.isInt(zcu)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
else => break :blk, // Cannot make use of simd-instructions
};
try func.emitWValue(operand);
@@ -5175,14 +5178,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
}
- const elem_size = elem_ty.bitSize(pt);
- const vector_len = @as(usize, @intCast(ty.vectorLen(mod)));
+ const elem_size = elem_ty.bitSize(zcu);
+ const vector_len = @as(usize, @intCast(ty.vectorLen(zcu)));
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
const result = try func.allocStack(ty);
- const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
+ const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
var index: usize = 0;
var offset: u32 = 0;
while (index < vector_len) : (index += 1) {
@@ -5203,7 +5206,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const inst_ty = func.typeOfIndex(inst);
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -5213,15 +5216,15 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
- const child_ty = inst_ty.childType(mod);
- const elem_size = child_ty.abiSize(pt);
+ const child_ty = inst_ty.childType(zcu);
+ const elem_size = child_ty.abiSize(zcu);
// TODO: One of them could be by ref; handle in loop
if (isByRef(func.typeOf(extra.a), pt, func.target.*) or isByRef(inst_ty, pt, func.target.*)) {
const result = try func.allocStack(inst_ty);
for (0..mask_len) |index| {
- const value = (try mask.elemValue(pt, index)).toSignedInt(pt);
+ const value = (try mask.elemValue(pt, index)).toSignedInt(zcu);
try func.emitWValue(result);
@@ -5241,7 +5244,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var lanes = mem.asBytes(operands[1..]);
for (0..@as(usize, @intCast(mask_len))) |index| {
- const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(pt);
+ const mask_elem = (try mask.elemValue(pt, index)).toSignedInt(zcu);
const base_index = if (mask_elem >= 0)
@as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem))
else
@@ -5273,20 +5276,20 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = func.typeOfIndex(inst);
- const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
+ const len = @as(usize, @intCast(result_ty.arrayLen(zcu)));
const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len]));
const result: WValue = result_value: {
- switch (result_ty.zigTypeTag(mod)) {
+ switch (result_ty.zigTypeTag(zcu)) {
.Array => {
const result = try func.allocStack(result_ty);
- const elem_ty = result_ty.childType(mod);
- const elem_size = @as(u32, @intCast(elem_ty.abiSize(pt)));
- const sentinel = if (result_ty.sentinel(mod)) |sent| blk: {
+ const elem_ty = result_ty.childType(zcu);
+ const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
+ const sentinel = if (result_ty.sentinel(zcu)) |sent| blk: {
break :blk try func.lowerConstant(sent, elem_ty);
} else null;
@@ -5321,18 +5324,18 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
break :result_value result;
},
- .Struct => switch (result_ty.containerLayout(mod)) {
+ .Struct => switch (result_ty.containerLayout(zcu)) {
.@"packed" => {
if (isByRef(result_ty, pt, func.target.*)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
- const packed_struct = mod.typeToPackedStruct(result_ty).?;
+ const packed_struct = zcu.typeToPackedStruct(result_ty).?;
const field_types = packed_struct.field_types;
const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
// ensure the result is zero'd
const result = try func.allocLocal(backing_type);
- if (backing_type.bitSize(pt) <= 32)
+ if (backing_type.bitSize(zcu) <= 32)
try func.addImm32(0)
else
try func.addImm64(0);
@@ -5341,15 +5344,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
const field_ty = Type.fromInterned(field_types.get(ip)[elem_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
- const shift_val: WValue = if (backing_type.bitSize(pt) <= 32)
+ const shift_val: WValue = if (backing_type.bitSize(zcu) <= 32)
.{ .imm32 = current_bit }
else
.{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
- const value_bit_size: u16 = @intCast(field_ty.bitSize(pt));
+ const value_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
const int_ty = try pt.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
@@ -5375,8 +5378,8 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
for (elements, 0..) |elem, elem_index| {
if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue;
- const elem_ty = result_ty.structFieldType(elem_index, mod);
- const field_offset = result_ty.structFieldOffset(elem_index, pt);
+ const elem_ty = result_ty.fieldType(elem_index, zcu);
+ const field_offset = result_ty.structFieldOffset(elem_index, zcu);
_ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
prev_field_offset = field_offset;
@@ -5404,21 +5407,21 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result = result: {
const union_ty = func.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(pt);
- const union_obj = mod.typeToUnion(union_ty).?;
+ const layout = union_ty.unionGetLayout(zcu);
+ const union_obj = zcu.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_int = blk: {
- const tag_ty = union_ty.unionTagTypeHypothetical(mod);
- const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?;
+ const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
+ const enum_field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
break :blk try func.lowerConstant(tag_val, tag_ty);
};
@@ -5458,13 +5461,13 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result result_ptr;
} else {
const operand = try func.resolveInst(extra.init);
- const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(pt))));
- if (field_ty.zigTypeTag(mod) == .Float) {
- const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt)));
+ const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(zcu))));
+ if (field_ty.zigTypeTag(zcu) == .Float) {
+ const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
const bitcasted = try func.bitcast(field_ty, int_type, operand);
break :result try func.trunc(bitcasted, int_type, union_int_type);
- } else if (field_ty.isPtrAtRuntime(mod)) {
- const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(pt)));
+ } else if (field_ty.isPtrAtRuntime(zcu)) {
+ const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
break :result try func.intcast(operand, int_type, union_int_type);
}
break :result try func.intcast(operand, field_ty, union_int_type);
@@ -5497,10 +5500,10 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- assert(operand_ty.hasRuntimeBitsIgnoreComptime(pt));
+ const zcu = pt.zcu;
+ assert(operand_ty.hasRuntimeBitsIgnoreComptime(zcu));
assert(op == .eq or op == .neq);
- const payload_ty = operand_ty.optionalChild(mod);
+ const payload_ty = operand_ty.optionalChild(zcu);
// We store the final result in here that will be validated
// if the optional is truly equal.
@@ -5534,11 +5537,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
/// TODO: Lower this to compiler_rt call when bitsize > 128
fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- assert(operand_ty.abiSize(pt) >= 16);
+ const zcu = pt.zcu;
+ assert(operand_ty.abiSize(zcu) >= 16);
assert(!(lhs != .stack and rhs == .stack));
- if (operand_ty.bitSize(pt) > 128) {
- return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(pt)});
+ if (operand_ty.bitSize(zcu) > 128) {
+ return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)});
}
var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
@@ -5561,7 +5564,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
},
else => {
- const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64;
+ const ty = if (operand_ty.isSignedInt(zcu)) Type.i64 else Type.u64;
// leave those value on top of the stack for '.select'
const lhs_lsb = try func.load(lhs, Type.u64, 0);
const rhs_lsb = try func.load(rhs, Type.u64, 0);
@@ -5577,11 +5580,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const un_ty = func.typeOf(bin_op.lhs).childType(mod);
+ const un_ty = func.typeOf(bin_op.lhs).childType(zcu);
const tag_ty = func.typeOf(bin_op.rhs);
- const layout = un_ty.unionGetLayout(pt);
+ const layout = un_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const union_ptr = try func.resolveInst(bin_op.lhs);
@@ -5601,12 +5604,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const pt = func.pt;
+ const zcu = func.pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const un_ty = func.typeOf(ty_op.operand);
const tag_ty = func.typeOfIndex(inst);
- const layout = un_ty.unionGetLayout(pt);
+ const layout = un_ty.unionGetLayout(zcu);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
@@ -5705,11 +5708,11 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const err_set_ty = func.typeOf(ty_op.operand).childType(mod);
- const payload_ty = err_set_ty.errorUnionPayload(mod);
+ const err_set_ty = func.typeOf(ty_op.operand).childType(zcu);
+ const payload_ty = err_set_ty.errorUnionPayload(zcu);
const operand = try func.resolveInst(ty_op.operand);
// set error-tag to '0' to annotate error union is non-error
@@ -5717,28 +5720,28 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
operand,
.{ .imm32 = 0 },
Type.anyerror,
- @intCast(errUnionErrorOffset(payload_ty, pt)),
+ @intCast(errUnionErrorOffset(payload_ty, zcu)),
);
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))), .new);
+ break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new);
};
return func.finishAir(inst, result, &.{ty_op.operand});
}
fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try func.resolveInst(extra.field_ptr);
- const parent_ty = ty_pl.ty.toType().childType(mod);
- const field_offset = parent_ty.structFieldOffset(extra.field_index, pt);
+ const parent_ty = ty_pl.ty.toType().childType(zcu);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new);
@@ -5754,8 +5757,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- if (ptr_ty.isSlice(mod)) {
+ const zcu = pt.zcu;
+ if (ptr_ty.isSlice(zcu)) {
return func.slicePtr(ptr);
} else {
return ptr;
@@ -5764,26 +5767,26 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue
fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dst = try func.resolveInst(bin_op.lhs);
const dst_ty = func.typeOf(bin_op.lhs);
- const ptr_elem_ty = dst_ty.childType(mod);
+ const ptr_elem_ty = dst_ty.childType(zcu);
const src = try func.resolveInst(bin_op.rhs);
const src_ty = func.typeOf(bin_op.rhs);
- const len = switch (dst_ty.ptrSize(mod)) {
+ const len = switch (dst_ty.ptrSize(zcu)) {
.Slice => blk: {
const slice_len = try func.sliceLen(dst);
- if (ptr_elem_ty.abiSize(pt) != 1) {
+ if (ptr_elem_ty.abiSize(zcu) != 1) {
try func.emitWValue(slice_len);
- try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(pt))) });
+ try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) });
try func.addTag(.i32_mul);
try func.addLabel(.local_set, slice_len.local.value);
}
break :blk slice_len;
},
.One => @as(WValue, .{
- .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(pt))),
+ .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(zcu) * ptr_elem_ty.childType(zcu).abiSize(zcu))),
}),
.C, .Many => unreachable,
};
@@ -5805,17 +5808,17 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- if (op_ty.zigTypeTag(mod) == .Vector) {
+ if (op_ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: Implement @popCount for vectors", .{});
}
- const int_info = op_ty.intInfo(mod);
+ const int_info = op_ty.intInfo(zcu);
const bits = int_info.bits;
const wasm_bits = toWasmBits(bits) orelse {
return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
@@ -5824,14 +5827,14 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (wasm_bits) {
32 => {
try func.emitWValue(operand);
- if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
+ if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
_ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
}
try func.addTag(.i32_popcnt);
},
64 => {
try func.emitWValue(operand);
- if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
+ if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
_ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
}
try func.addTag(.i64_popcnt);
@@ -5842,7 +5845,7 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try func.load(operand, Type.u64, 0);
try func.addTag(.i64_popcnt);
_ = try func.load(operand, Type.u64, 8);
- if (op_ty.isSignedInt(mod) and bits != wasm_bits) {
+ if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
_ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64));
}
try func.addTag(.i64_popcnt);
@@ -5857,17 +5860,17 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOf(ty_op.operand);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: Implement @bitReverse for vectors", .{});
}
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const bits = int_info.bits;
const wasm_bits = toWasmBits(bits) orelse {
return func.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits});
@@ -5933,7 +5936,7 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
defer tmp.free(func);
try func.addLabel(.local_tee, tmp.local.value);
try func.emitWValue(.{ .imm64 = 128 - bits });
- if (ty.isSignedInt(mod)) {
+ if (ty.isSignedInt(zcu)) {
try func.addTag(.i64_shr_s);
} else {
try func.addTag(.i64_shr_u);
@@ -5969,7 +5972,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
const error_table_symbol = try func.bin_file.getErrorTableSymbol(pt);
const name_ty = Type.slice_const_u8_sentinel_0;
- const abi_size = name_ty.abiSize(pt);
+ const abi_size = name_ty.abiSize(pt.zcu);
const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
try func.emitWValue(error_name_value);
@@ -6000,8 +6003,8 @@ fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerE
/// NOTE: Allocates place for result on virtual stack, when integer size > 64 bits
fn intZeroValue(func: *CodeGen, ty: Type) InnerError!WValue {
- const mod = func.bin_file.base.comp.module.?;
- const int_info = ty.intInfo(mod);
+ const zcu = func.bin_file.base.comp.zcu.?;
+ const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
};
@@ -6027,13 +6030,13 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const rhs = try func.resolveInst(extra.rhs);
const ty = func.typeOf(extra.lhs);
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 128) {
return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
@@ -6058,7 +6061,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
defer bit_tmp.free(func);
const result = try func.allocStack(func.typeOfIndex(inst));
- const offset: u32 = @intCast(ty.abiSize(pt));
+ const offset: u32 = @intCast(ty.abiSize(zcu));
try func.store(result, op_tmp, ty, 0);
try func.store(result, bit_tmp, Type.u1, offset);
@@ -6067,7 +6070,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -6076,18 +6079,18 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOf(extra.lhs);
const rhs_ty = func.typeOf(extra.rhs);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
};
// Ensure rhs is coerced to lhs as they must have the same WebAssembly types
// before we can perform any binary operation.
- const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?;
+ const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(zcu).bits).?;
// If wasm_bits == 128, compiler-rt expects i32 for shift
const rhs_final = if (wasm_bits != rhs_wasm_bits and wasm_bits == 64) blk: {
const rhs_casted = try func.intcast(rhs, rhs_ty, ty);
@@ -6105,7 +6108,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
defer overflow_local.free(func);
const result = try func.allocStack(func.typeOfIndex(inst));
- const offset: u32 = @intCast(ty.abiSize(pt));
+ const offset: u32 = @intCast(ty.abiSize(zcu));
try func.store(result, shl, ty, 0);
try func.store(result, overflow_local, Type.u1, offset);
@@ -6120,9 +6123,9 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const rhs = try func.resolveInst(extra.rhs);
const ty = func.typeOf(extra.lhs);
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
@@ -6131,7 +6134,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var overflow_bit = try func.ensureAllocLocal(Type.u1);
defer overflow_bit.free(func);
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
};
@@ -6238,7 +6241,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
defer bin_op_local.free(func);
const result = try func.allocStack(func.typeOfIndex(inst));
- const offset: u32 = @intCast(ty.abiSize(pt));
+ const offset: u32 = @intCast(ty.abiSize(zcu));
try func.store(result, bin_op_local, ty, 0);
try func.store(result, overflow_bit, Type.u1, offset);
@@ -6248,22 +6251,22 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .max or op == .min);
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = func.typeOfIndex(inst);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
}
- if (ty.abiSize(pt) > 16) {
+ if (ty.abiSize(zcu) > 16) {
return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
}
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- if (ty.zigTypeTag(mod) == .Float) {
+ if (ty.zigTypeTag(zcu) == .Float) {
var fn_name_buf: [64]u8 = undefined;
const float_bits = ty.floatBits(func.target.*);
const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{
@@ -6288,12 +6291,12 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
const ty = func.typeOfIndex(inst);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: `@mulAdd` for vectors", .{});
}
@@ -6323,16 +6326,16 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = func.typeOf(ty_op.operand);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: `@clz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -6374,17 +6377,17 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = func.typeOf(ty_op.operand);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: `@ctz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -6497,12 +6500,12 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try func.resolveInst(extra.data.ptr);
const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]);
- const err_union_ty = func.typeOf(extra.data.ptr).childType(mod);
+ const err_union_ty = func.typeOf(extra.data.ptr).childType(zcu);
const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true);
return func.finishAir(inst, result, &.{extra.data.ptr});
}
@@ -6516,25 +6519,25 @@ fn lowerTry(
operand_is_ptr: bool,
) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
if (operand_is_ptr) {
return func.fail("TODO: lowerTry for pointers", .{});
}
- const pl_ty = err_union_ty.errorUnionPayload(mod);
- const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(pt);
+ const pl_ty = err_union_ty.errorUnionPayload(zcu);
+ const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(zcu);
- if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
+ if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
// Block we can jump out of when error is not set
try func.startBlock(.block, wasm.block_empty);
// check if the error tag is set for the error union.
try func.emitWValue(err_union);
if (pl_has_bits) {
- const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, pt));
+ const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
- .alignment = @intCast(Type.anyerror.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
});
}
try func.addTag(.i32_eqz);
@@ -6556,7 +6559,7 @@ fn lowerTry(
return .none;
}
- const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, pt));
+ const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
if (isByRef(pl_ty, pt, func.target.*)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
@@ -6566,16 +6569,16 @@ fn lowerTry(
fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = func.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
- if (ty.zigTypeTag(mod) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .Vector) {
return func.fail("TODO: @byteSwap for vectors", .{});
}
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits});
};
@@ -6649,15 +6652,15 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- if (ty.isUnsignedInt(mod)) {
+ if (ty.isUnsignedInt(zcu)) {
_ = try func.binOp(lhs, rhs, ty, .div);
- } else if (ty.isSignedInt(mod)) {
- const int_bits = ty.intInfo(mod).bits;
+ } else if (ty.isSignedInt(zcu)) {
+ const int_bits = ty.intInfo(zcu).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
};
@@ -6767,19 +6770,19 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- if (ty.isUnsignedInt(mod)) {
+ if (ty.isUnsignedInt(zcu)) {
_ = try func.binOp(lhs, rhs, ty, .rem);
- } else if (ty.isSignedInt(mod)) {
+ } else if (ty.isSignedInt(zcu)) {
// The wasm rem instruction gives the remainder after truncating division (rounding towards
// 0), equivalent to @rem.
// We make use of the fact that:
// @mod(a, b) = @rem(@rem(a, b) + b, b)
- const int_bits = ty.intInfo(mod).bits;
+ const int_bits = ty.intInfo(zcu).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
};
@@ -6802,9 +6805,9 @@ fn airSatMul(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty = func.typeOfIndex(inst);
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
const lhs = try func.resolveInst(bin_op.lhs);
@@ -6903,12 +6906,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty = func.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
@@ -6950,8 +6953,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
const pt = func.pt;
- const mod = pt.zcu;
- const int_info = ty.intInfo(mod);
+ const zcu = pt.zcu;
+ const int_info = ty.intInfo(zcu);
const wasm_bits = toWasmBits(int_info.bits).?;
const is_wasm_bits = wasm_bits == int_info.bits;
const ext_ty = if (!is_wasm_bits) try pt.intType(int_info.signedness, wasm_bits) else ty;
@@ -7009,9 +7012,9 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty = func.typeOfIndex(inst);
- const int_info = ty.intInfo(mod);
+ const int_info = ty.intInfo(zcu);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
@@ -7130,7 +7133,7 @@ fn callIntrinsic(
// Always pass over C-ABI
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt, func.target.*);
defer func_type.deinit(func.gpa);
const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type);
@@ -7148,16 +7151,16 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
- assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(pt));
+ assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(zcu));
try func.lowerArg(.C, Type.fromInterned(param_types[arg_i]), arg);
}
// Actually call our intrinsic
try func.addLabel(.call, @intFromEnum(symbol_index));
- if (!return_type.hasRuntimeBitsIgnoreComptime(pt)) {
+ if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
return .none;
- } else if (return_type.isNoReturn(mod)) {
+ } else if (return_type.isNoReturn(zcu)) {
try func.addTag(.@"unreachable");
return .none;
} else if (want_sret_param) {
@@ -7184,8 +7187,8 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
defer arena_allocator.deinit();
@@ -7198,9 +7201,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
return @intFromEnum(loc.index);
}
- const int_tag_ty = enum_ty.intTagType(mod);
+ const int_tag_ty = enum_ty.intTagType(zcu);
- if (int_tag_ty.bitSize(pt) > 64) {
+ if (int_tag_ty.bitSize(zcu) > 64) {
return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
}
@@ -7220,7 +7223,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
// generate an if-else chain for each tag value as well as constant.
- const tag_names = enum_ty.enumFields(mod);
+ const tag_names = enum_ty.enumFields(zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = tag_names.get(ip)[tag_index];
const tag_name_len = tag_name.length(ip);
@@ -7345,15 +7348,15 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
- const ip = &mod.intern_pool;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const error_set_ty = ty_op.ty.toType();
const result = try func.allocLocal(Type.bool);
- const names = error_set_ty.errorSetNames(mod);
+ const names = error_set_ty.errorSetNames(zcu);
var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len);
defer values.deinit();
@@ -7432,12 +7435,12 @@ inline fn useAtomicFeature(func: *const CodeGen) bool {
fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr_ty = func.typeOf(extra.ptr);
- const ty = ptr_ty.childType(mod);
+ const ty = ptr_ty.childType(zcu);
const result_ty = func.typeOfIndex(inst);
const ptr_operand = try func.resolveInst(extra.ptr);
@@ -7451,7 +7454,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr_operand);
try func.lowerToStack(expected_val);
try func.lowerToStack(new_val);
- try func.addAtomicMemArg(switch (ty.abiSize(pt)) {
+ try func.addAtomicMemArg(switch (ty.abiSize(zcu)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7459,14 +7462,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
- .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
try func.addLabel(.local_set, cmp_result.local.value);
break :val val_local;
} else val: {
- if (ty.abiSize(pt) > 8) {
+ if (ty.abiSize(zcu) > 8) {
return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
}
const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
@@ -7490,7 +7493,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_and);
const and_result = try WValue.toLocal(.stack, func, Type.bool);
const result_ptr = try func.allocStack(result_ty);
- try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(pt))));
+ try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu))));
try func.store(result_ptr, ptr_val, ty, 0);
break :val result_ptr;
} else val: {
@@ -7511,7 +7514,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOfIndex(inst);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt.zcu)) {
1 => .i32_atomic_load8_u,
2 => .i32_atomic_load16_u,
4 => .i32_atomic_load,
@@ -7521,7 +7524,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(pt.zcu).toByteUnits().?),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -7532,7 +7535,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
@@ -7556,7 +7559,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.emitWValue(value);
if (op == .Nand) {
- const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
+ const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
const and_res = try func.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
@@ -7573,7 +7576,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.select);
}
try func.addAtomicMemArg(
- switch (ty.abiSize(pt)) {
+ switch (ty.abiSize(zcu)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7582,7 +7585,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
},
);
const select_res = try func.allocLocal(ty);
@@ -7601,7 +7604,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
try func.emitWValue(ptr);
try func.emitWValue(operand);
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
1 => switch (op) {
.Xchg => .i32_atomic_rmw8_xchg_u,
.Add => .i32_atomic_rmw8_add_u,
@@ -7642,7 +7645,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
return func.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand });
},
@@ -7670,7 +7673,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Xor => .xor,
else => unreachable,
});
- if (ty.isInt(mod) and (op == .Add or op == .Sub)) {
+ if (ty.isInt(zcu) and (op == .Add or op == .Sub)) {
_ = try func.wrapOperand(.stack, ty);
}
try func.store(.stack, .stack, ty, ptr.offset());
@@ -7686,7 +7689,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(.stack, .stack, ty, ptr.offset());
},
.Nand => {
- const wasm_bits = toWasmBits(@intCast(ty.bitSize(pt))).?;
+ const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
try func.emitWValue(ptr);
const and_res = try func.binOp(result, operand, ty, .@"and");
@@ -7721,16 +7724,16 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
- const mod = pt.zcu;
+ const zcu = pt.zcu;
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr = try func.resolveInst(bin_op.lhs);
const operand = try func.resolveInst(bin_op.rhs);
const ptr_ty = func.typeOf(bin_op.lhs);
- const ty = ptr_ty.childType(mod);
+ const ty = ptr_ty.childType(zcu);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(pt)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
1 => .i32_atomic_store8,
2 => .i32_atomic_store16,
4 => .i32_atomic_store,
@@ -7741,7 +7744,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = @intCast(ty.abiAlignment(pt).toByteUnits().?),
+ .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
});
} else {
try func.store(ptr, operand, ty, 0);
@@ -7760,12 +7763,12 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type {
const pt = func.pt;
- const mod = pt.zcu;
- return func.air.typeOf(inst, &mod.intern_pool);
+ const zcu = pt.zcu;
+ return func.air.typeOf(inst, &zcu.intern_pool);
}
fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type {
const pt = func.pt;
- const mod = pt.zcu;
- return func.air.typeOfIndex(inst, &mod.intern_pool);
+ const zcu = pt.zcu;
+ return func.air.typeOfIndex(inst, &zcu.intern_pool);
}