aboutsummaryrefslogtreecommitdiff
path: root/src/arch/wasm/CodeGen.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-05-02 15:01:45 -0700
committerAndrew Kelley <andrew@ziglang.org>2023-06-10 20:40:03 -0700
commit9aec2758cc29d27c31dcb0b4bb040484a885ef23 (patch)
treec171c40656f3b8f70375b4afca94a87784bb2dda /src/arch/wasm/CodeGen.zig
parent1e7dcaa3ae57294ab5998b44a8c13ccc5019e7ea (diff)
downloadzig-9aec2758cc29d27c31dcb0b4bb040484a885ef23.tar.gz
zig-9aec2758cc29d27c31dcb0b4bb040484a885ef23.zip
stage2: start the InternPool transition
Instead of doing everything at once which is a hopelessly large task, this introduces a piecemeal transition that can be done in small increments at a time. This is a minimal changeset that keeps the compiler compiling. It only uses the InternPool for a small set of types. Behavior tests are not passing. Air.Inst.Ref and Zir.Inst.Ref are separated into different enums but compile-time verified to have the same fields in the same order. The large set of changes is mainly to deal with the fact that most Type and Value methods now require a Module to be passed in, so that the InternPool object can be accessed.
Diffstat (limited to 'src/arch/wasm/CodeGen.zig')
-rw-r--r--src/arch/wasm/CodeGen.zig966
1 files changed, 504 insertions, 462 deletions
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index d4be9bf139..b592ffcb2a 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -788,9 +788,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref);
assert(!gop.found_existing);
- const val = func.air.value(ref).?;
+ const mod = func.bin_file.base.options.module.?;
+ const val = func.air.value(ref, mod).?;
const ty = func.air.typeOf(ref);
- if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) {
gop.value_ptr.* = WValue{ .none = {} };
return gop.value_ptr.*;
}
@@ -801,7 +802,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
//
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
- const result = if (isByRef(ty, func.target)) blk: {
+ const result = if (isByRef(ty, mod)) blk: {
const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index);
break :blk WValue{ .memory = sym_index };
} else try func.lowerConstant(val, ty);
@@ -987,8 +988,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
}
/// Using a given `Type`, returns the corresponding type
-fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
- return switch (ty.zigTypeTag()) {
+fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
+ const target = mod.getTarget();
+ return switch (ty.zigTypeTag(mod)) {
.Float => blk: {
const bits = ty.floatBits(target);
if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16
@@ -998,7 +1000,7 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
return wasm.Valtype.i32; // represented as pointer to stack
},
.Int, .Enum => blk: {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
if (info.bits <= 32) break :blk wasm.Valtype.i32;
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
break :blk wasm.Valtype.i32; // represented as pointer to stack
@@ -1006,22 +1008,18 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
.Struct => switch (ty.containerLayout()) {
.Packed => {
const struct_obj = ty.castTag(.@"struct").?.data;
- return typeToValtype(struct_obj.backing_int_ty, target);
+ return typeToValtype(struct_obj.backing_int_ty, mod);
},
else => wasm.Valtype.i32,
},
- .Vector => switch (determineSimdStoreStrategy(ty, target)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.direct => wasm.Valtype.v128,
.unrolled => wasm.Valtype.i32,
},
.Union => switch (ty.containerLayout()) {
.Packed => {
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.bitSize(target)),
- };
- const int_ty = Type.initPayload(&int_ty_payload.base);
- return typeToValtype(int_ty, target);
+ const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory");
+ return typeToValtype(int_ty, mod);
},
else => wasm.Valtype.i32,
},
@@ -1030,17 +1028,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
}
/// Using a given `Type`, returns the byte representation of its wasm value type
-fn genValtype(ty: Type, target: std.Target) u8 {
- return wasm.valtype(typeToValtype(ty, target));
+fn genValtype(ty: Type, mod: *Module) u8 {
+ return wasm.valtype(typeToValtype(ty, mod));
}
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
-fn genBlockType(ty: Type, target: std.Target) u8 {
+fn genBlockType(ty: Type, mod: *Module) u8 {
return switch (ty.tag()) {
.void, .noreturn => wasm.block_empty,
- else => genValtype(ty, target),
+ else => genValtype(ty, mod),
};
}
@@ -1101,7 +1099,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- const valtype = typeToValtype(ty, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const valtype = typeToValtype(ty, mod);
switch (valtype) {
.i32 => if (func.free_locals_i32.popOrNull()) |index| {
log.debug("reusing local ({d}) of type {}", .{ index, valtype });
@@ -1132,7 +1131,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// Ensures a new local will be created. This is useful when it's useful
/// to use a zero-initialized local.
fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- try func.locals.append(func.gpa, genValtype(ty, func.target));
+ const mod = func.bin_file.base.options.module.?;
+ try func.locals.append(func.gpa, genValtype(ty, mod));
const initial_index = func.local_index;
func.local_index += 1;
return WValue{ .local = .{ .value = initial_index, .references = 1 } };
@@ -1140,48 +1140,54 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// Generates a `wasm.Type` from a given function type.
/// Memory is owned by the caller.
-fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type {
+fn genFunctype(
+ gpa: Allocator,
+ cc: std.builtin.CallingConvention,
+ params: []const Type,
+ return_type: Type,
+ mod: *Module,
+) !wasm.Type {
var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
defer temp_params.deinit();
var returns = std.ArrayList(wasm.Valtype).init(gpa);
defer returns.deinit();
- if (firstParamSRet(cc, return_type, target)) {
+ if (firstParamSRet(cc, return_type, mod)) {
try temp_params.append(.i32); // memory address is always a 32-bit handle
- } else if (return_type.hasRuntimeBitsIgnoreComptime()) {
+ } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) {
if (cc == .C) {
- const res_classes = abi.classifyType(return_type, target);
+ const res_classes = abi.classifyType(return_type, mod);
assert(res_classes[0] == .direct and res_classes[1] == .none);
- const scalar_type = abi.scalarType(return_type, target);
- try returns.append(typeToValtype(scalar_type, target));
+ const scalar_type = abi.scalarType(return_type, mod);
+ try returns.append(typeToValtype(scalar_type, mod));
} else {
- try returns.append(typeToValtype(return_type, target));
+ try returns.append(typeToValtype(return_type, mod));
}
- } else if (return_type.isError()) {
+ } else if (return_type.isError(mod)) {
try returns.append(.i32);
}
// param types
for (params) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
switch (cc) {
.C => {
- const param_classes = abi.classifyType(param_type, target);
+ const param_classes = abi.classifyType(param_type, mod);
for (param_classes) |class| {
if (class == .none) continue;
if (class == .direct) {
- const scalar_type = abi.scalarType(param_type, target);
- try temp_params.append(typeToValtype(scalar_type, target));
+ const scalar_type = abi.scalarType(param_type, mod);
+ try temp_params.append(typeToValtype(scalar_type, mod));
} else {
- try temp_params.append(typeToValtype(param_type, target));
+ try temp_params.append(typeToValtype(param_type, mod));
}
}
},
- else => if (isByRef(param_type, target))
+ else => if (isByRef(param_type, mod))
try temp_params.append(.i32)
else
- try temp_params.append(typeToValtype(param_type, target)),
+ try temp_params.append(typeToValtype(param_type, mod)),
}
}
@@ -1227,7 +1233,8 @@ pub fn generate(
fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo();
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
@@ -1254,7 +1261,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
const inst = @intCast(u32, func.air.instructions.len - 1);
const last_inst_ty = func.air.typeOfIndex(inst);
- if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) {
+ if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) {
try func.addTag(.@"unreachable");
}
}
@@ -1335,6 +1342,7 @@ const CallWValues = struct {
};
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
+ const mod = func.bin_file.base.options.module.?;
const cc = fn_ty.fnCallingConvention();
const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen());
defer func.gpa.free(param_types);
@@ -1351,7 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
// Check if we store the result as a pointer to the stack rather than
// by value
const fn_info = fn_ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
@@ -1361,7 +1369,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
for (param_types) |ty| {
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
continue;
}
@@ -1371,7 +1379,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
},
.C => {
for (param_types) |ty| {
- const ty_classes = abi.classifyType(ty, func.target);
+ const ty_classes = abi.classifyType(ty, mod);
for (ty_classes) |class| {
if (class == .none) continue;
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -1385,11 +1393,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
return result;
}
-fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool {
+fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool {
switch (cc) {
- .Unspecified, .Inline => return isByRef(return_type, target),
+ .Unspecified, .Inline => return isByRef(return_type, mod),
.C => {
- const ty_classes = abi.classifyType(return_type, target);
+ const ty_classes = abi.classifyType(return_type, mod);
if (ty_classes[0] == .indirect) return true;
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
return false;
@@ -1405,16 +1413,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
- const ty_classes = abi.classifyType(ty, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const ty_classes = abi.classifyType(ty, mod);
assert(ty_classes[0] != .none);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct, .Union => {
if (ty_classes[0] == .indirect) {
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct);
- const scalar_type = abi.scalarType(ty, func.target);
- const abi_size = scalar_type.abiSize(func.target);
+ const scalar_type = abi.scalarType(ty, mod);
+ const abi_size = scalar_type.abiSize(mod);
try func.emitWValue(value);
// When the value lives in the virtual stack, we must load it onto the actual stack
@@ -1422,12 +1431,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
const opcode = buildOpcode(.{
.op = .load,
.width = @intCast(u8, abi_size),
- .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, func.target),
+ .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+ .valtype1 = typeToValtype(scalar_type, mod),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = value.offset(),
- .alignment = scalar_type.abiAlignment(func.target),
+ .alignment = scalar_type.abiAlignment(mod),
});
}
},
@@ -1436,7 +1445,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
- assert(ty.abiSize(func.target) == 16);
+ assert(ty.abiSize(mod) == 16);
// in this case we have an integer or float that must be lowered as 2 i64's.
try func.emitWValue(value);
try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
@@ -1503,18 +1512,18 @@ fn restoreStackPointer(func: *CodeGen) !void {
///
/// Asserts Type has codegenbits
fn allocStack(func: *CodeGen, ty: Type) !WValue {
- assert(ty.hasRuntimeBitsIgnoreComptime());
+ const mod = func.bin_file.base.options.module.?;
+ assert(ty.hasRuntimeBitsIgnoreComptime(mod));
if (func.initial_stack_value == .none) {
try func.initializeStack();
}
- const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
+ const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- ty.fmt(module), ty.abiSize(func.target),
+ ty.fmt(mod), ty.abiSize(mod),
});
};
- const abi_align = ty.abiAlignment(func.target);
+ const abi_align = ty.abiAlignment(mod);
if (abi_align > func.stack_alignment) {
func.stack_alignment = abi_align;
@@ -1531,6 +1540,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
/// This is different from allocStack where this will use the pointer's alignment
/// if it is set, to ensure the stack alignment will be set correctly.
fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
+ const mod = func.bin_file.base.options.module.?;
const ptr_ty = func.air.typeOfIndex(inst);
const pointee_ty = ptr_ty.childType();
@@ -1538,15 +1548,14 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
try func.initializeStack();
}
- if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.allocStack(Type.usize); // create a value containing just the stack pointer.
}
- const abi_alignment = ptr_ty.ptrAlignment(func.target);
- const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
+ const abi_alignment = ptr_ty.ptrAlignment(mod);
+ const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- pointee_ty.fmt(module), pointee_ty.abiSize(func.target),
+ pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
});
};
if (abi_alignment > func.stack_alignment) {
@@ -1704,8 +1713,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
-fn isByRef(ty: Type, target: std.Target) bool {
- switch (ty.zigTypeTag()) {
+fn isByRef(ty: Type, mod: *const Module) bool {
+ const target = mod.getTarget();
+ switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -1726,40 +1736,40 @@ fn isByRef(ty: Type, target: std.Target) bool {
.Array,
.Frame,
- => return ty.hasRuntimeBitsIgnoreComptime(),
+ => return ty.hasRuntimeBitsIgnoreComptime(mod),
.Union => {
if (ty.castTag(.@"union")) |union_ty| {
if (union_ty.data.layout == .Packed) {
- return ty.abiSize(target) > 8;
+ return ty.abiSize(mod) > 8;
}
}
- return ty.hasRuntimeBitsIgnoreComptime();
+ return ty.hasRuntimeBitsIgnoreComptime(mod);
},
.Struct => {
if (ty.castTag(.@"struct")) |struct_ty| {
const struct_obj = struct_ty.data;
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
- return isByRef(struct_obj.backing_int_ty, target);
+ return isByRef(struct_obj.backing_int_ty, mod);
}
}
- return ty.hasRuntimeBitsIgnoreComptime();
+ return ty.hasRuntimeBitsIgnoreComptime(mod);
},
- .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled,
- .Int => return ty.intInfo(target).bits > 64,
+ .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled,
+ .Int => return ty.intInfo(mod).bits > 64,
.Float => return ty.floatBits(target) > 64,
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload();
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
return true;
},
.Optional => {
- if (ty.isPtrLikeOptional()) return false;
+ if (ty.isPtrLikeOptional(mod)) return false;
var buf: Type.Payload.ElemType = undefined;
const pl_type = ty.optionalChild(&buf);
- if (pl_type.zigTypeTag() == .ErrorSet) return false;
- return pl_type.hasRuntimeBitsIgnoreComptime();
+ if (pl_type.zigTypeTag(mod) == .ErrorSet) return false;
+ return pl_type.hasRuntimeBitsIgnoreComptime(mod);
},
.Pointer => {
// Slices act like struct and will be passed by reference
@@ -1778,10 +1788,11 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy {
- std.debug.assert(ty.zigTypeTag() == .Vector);
- if (ty.bitSize(target) != 128) return .unrolled;
+fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy {
+ std.debug.assert(ty.zigTypeTag(mod) == .Vector);
+ if (ty.bitSize(mod) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
+ const target = mod.getTarget();
const features = target.cpu.features;
if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
return .direct;
@@ -2084,32 +2095,33 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(un_op);
const fn_info = func.decl.ty.fnInfo();
const ret_ty = fn_info.return_type;
+ const mod = func.bin_file.base.options.module.?;
// result must be stored in the stack and we return a pointer
// to the stack instead
if (func.return_value != .none) {
try func.store(func.return_value, operand, ret_ty, 0);
- } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) {
- switch (ret_ty.zigTypeTag()) {
+ } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ switch (ret_ty.zigTypeTag(mod)) {
// Aggregate types can be lowered as a singular value
.Struct, .Union => {
- const scalar_type = abi.scalarType(ret_ty, func.target);
+ const scalar_type = abi.scalarType(ret_ty, mod);
try func.emitWValue(operand);
const opcode = buildOpcode(.{
.op = .load,
- .width = @intCast(u8, scalar_type.abiSize(func.target) * 8),
- .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, func.target),
+ .width = @intCast(u8, scalar_type.abiSize(mod) * 8),
+ .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+ .valtype1 = typeToValtype(scalar_type, mod),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
- .alignment = scalar_type.abiAlignment(func.target),
+ .alignment = scalar_type.abiAlignment(mod),
});
},
else => try func.emitWValue(operand),
}
} else {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) {
try func.addImm32(0);
} else {
try func.emitWValue(operand);
@@ -2123,14 +2135,15 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const child_type = func.air.typeOfIndex(inst).childType();
+ const mod = func.bin_file.base.options.module.?;
var result = result: {
- if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) {
+ if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
break :result try func.allocStack(Type.usize); // create pointer to void
}
const fn_info = func.decl.ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
break :result func.return_value;
}
@@ -2141,16 +2154,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
const ret_ty = func.air.typeOf(un_op).childType();
const fn_info = func.decl.ty.fnInfo();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
- if (ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (ret_ty.isError(mod)) {
try func.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2167,26 +2181,26 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]);
const ty = func.air.typeOf(pl_op.operand);
- const fn_ty = switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(),
else => unreachable,
};
const ret_ty = fn_ty.fnReturnType();
const fn_info = fn_ty.fnInfo();
- const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
+ const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod);
const callee: ?Decl.Index = blk: {
- const func_val = func.air.value(pl_op.operand) orelse break :blk null;
- const module = func.bin_file.base.options.module.?;
+ const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null;
if (func_val.castTag(.function)) |function| {
_ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
- const ext_decl = module.declPtr(extern_fn.data.owner_decl);
+ const ext_decl = mod.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
+ var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
const atom = func.bin_file.getAtomPtr(atom_index);
@@ -2215,7 +2229,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const arg_val = try func.resolveInst(arg);
const arg_ty = func.air.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val);
}
@@ -2226,11 +2240,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else {
// in this case we call a function pointer
// so load its value onto the stack
- std.debug.assert(ty.zigTypeTag() == .Pointer);
+ std.debug.assert(ty.zigTypeTag(mod) == .Pointer);
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
@@ -2238,7 +2252,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
const result_value = result_value: {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
break :result_value WValue{ .none = {} };
} else if (ret_ty.isNoReturn()) {
try func.addTag(.@"unreachable");
@@ -2246,10 +2260,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (first_param_sret) {
break :result_value sret;
// TODO: Make this less fragile and optimize
- } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) {
+ } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
const result_local = try func.allocLocal(ret_ty);
try func.addLabel(.local_set, result_local.local.value);
- const scalar_type = abi.scalarType(ret_ty, func.target);
+ const scalar_type = abi.scalarType(ret_ty, mod);
const result = try func.allocStack(scalar_type);
try func.store(result, result_local, scalar_type, 0);
break :result_value result;
@@ -2272,6 +2286,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -2290,17 +2305,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
} else {
// at this point we have a non-natural alignment, we must
// load the value, and then shift+or the rhs into the result location.
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const int_elem_ty = Type.initPayload(&int_ty_payload.base);
+ const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
- if (isByRef(int_elem_ty, func.target)) {
+ if (isByRef(int_elem_ty, mod)) {
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
- var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1);
+ var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1);
mask <<= @intCast(u6, ptr_info.bit_offset);
mask ^= ~@as(u64, 0);
const shift_val = if (ptr_info.host_size <= 4)
@@ -2329,11 +2340,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
assert(!(lhs != .stack and rhs == .stack));
- const abi_size = ty.abiSize(func.target);
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = ty.abiSize(mod);
+ switch (ty.zigTypeTag(mod)) {
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload();
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
@@ -2341,26 +2353,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.Optional => {
- if (ty.isPtrLikeOptional()) {
+ if (ty.isPtrLikeOptional(mod)) {
return func.store(lhs, rhs, Type.usize, 0);
}
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.store(lhs, rhs, Type.u8, 0);
}
- if (pl_ty.zigTypeTag() == .ErrorSet) {
+ if (pl_ty.zigTypeTag(mod) == .ErrorSet) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Struct, .Array, .Union => if (isByRef(ty, func.target)) {
+ .Struct, .Array, .Union => if (isByRef(ty, mod)) {
const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Vector => switch (determineSimdStoreStrategy(ty, func.target)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.unrolled => {
const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2374,7 +2386,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
- ty.abiAlignment(func.target),
+ ty.abiAlignment(mod),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
@@ -2404,7 +2416,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
return;
} else if (abi_size > 16) {
- try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) });
+ try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) });
},
else => if (abi_size > 8) {
return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
@@ -2418,7 +2430,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// into lhs, so we calculate that and emit that instead
try func.lowerToStack(rhs);
- const valtype = typeToValtype(ty, func.target);
+ const valtype = typeToValtype(ty, mod);
const opcode = buildOpcode(.{
.valtype1 = valtype,
.width = @intCast(u8, abi_size * 8),
@@ -2428,21 +2440,22 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// store rhs value at stack pointer's location in memory
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) },
+ .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) },
);
}
fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.air.getRefType(ty_op.ty);
const ptr_ty = func.air.typeOf(ty_op.operand);
const ptr_info = ptr_ty.ptrInfo().data;
- if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand});
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
- if (isByRef(ty, func.target)) {
+ if (isByRef(ty, mod)) {
const new_local = try func.allocStack(ty);
try func.store(new_local, operand, ty, 0);
break :result new_local;
@@ -2455,11 +2468,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// at this point we have a non-natural alignment, we must
// shift the value to obtain the correct bit.
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const int_elem_ty = Type.initPayload(&int_ty_payload.base);
+ const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
const shift_val = if (ptr_info.host_size <= 4)
WValue{ .imm32 = ptr_info.bit_offset }
else if (ptr_info.host_size <= 8)
@@ -2479,25 +2488,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Loads an operand from the linear memory section.
/// NOTE: Leaves the value on the stack.
fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
// load local's value from memory by its stack position
try func.emitWValue(operand);
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
// TODO: Add helper functions for simd opcodes
const extra_index = @intCast(u32, func.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
- ty.abiAlignment(func.target),
+ ty.abiAlignment(mod),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
}
- const abi_size = @intCast(u8, ty.abiSize(func.target));
+ const abi_size = @intCast(u8, ty.abiSize(mod));
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, func.target),
+ .valtype1 = typeToValtype(ty, mod),
.width = abi_size * 8,
.op = .load,
.signedness = .unsigned,
@@ -2505,7 +2515,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) },
+ .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) },
);
return WValue{ .stack = {} };
@@ -2516,8 +2526,9 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const arg = func.args[arg_index];
const cc = func.decl.ty.fnInfo().cc;
const arg_ty = func.air.typeOfIndex(inst);
+ const mod = func.bin_file.base.options.module.?;
if (cc == .C) {
- const arg_classes = abi.classifyType(arg_ty, func.target);
+ const arg_classes = abi.classifyType(arg_ty, mod);
for (arg_classes) |class| {
if (class != .none) {
func.arg_index += 1;
@@ -2527,7 +2538,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When we have an argument that's passed using more than a single parameter,
// we combine them into a single stack value
if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
- if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) {
+ if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) {
return func.fail(
"TODO: Implement C-ABI argument for type '{}'",
.{arg_ty.fmt(func.bin_file.base.options.module.?)},
@@ -2557,6 +2568,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -2570,10 +2582,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -2593,6 +2605,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
/// Performs a binary operation on the given `WValue`'s
/// NOTE: THis leaves the value on top of the stack.
fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
assert(!(lhs != .stack and rhs == .stack));
if (ty.isAnyFloat()) {
@@ -2600,8 +2613,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
return func.floatOp(float_op, ty, &.{ lhs, rhs });
}
- if (isByRef(ty, func.target)) {
- if (ty.zigTypeTag() == .Int) {
+ if (isByRef(ty, mod)) {
+ if (ty.zigTypeTag(mod) == .Int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
return func.fail(
@@ -2613,8 +2626,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
const opcode: wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = typeToValtype(ty, func.target),
- .signedness = if (ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(ty, mod),
+ .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.emitWValue(lhs);
try func.emitWValue(rhs);
@@ -2625,7 +2638,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
}
fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- if (ty.intInfo(func.target).bits > 128) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.intInfo(mod).bits > 128) {
return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
}
@@ -2763,7 +2777,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError
}
fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement floatOps for vectors", .{});
}
@@ -2773,7 +2788,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
for (args) |operand| {
try func.emitWValue(operand);
}
- const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) });
+ const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
@@ -2827,6 +2842,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
}
fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
@@ -2834,7 +2850,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const lhs_ty = func.air.typeOf(bin_op.lhs);
const rhs_ty = func.air.typeOf(bin_op.rhs);
- if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
@@ -2845,10 +2861,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -2877,8 +2893,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
/// Asserts `Type` is <= 128 bits.
/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack.
fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- assert(ty.abiSize(func.target) <= 16);
- const bitsize = @intCast(u16, ty.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ assert(ty.abiSize(mod) <= 16);
+ const bitsize = @intCast(u16, ty.bitSize(mod));
const wasm_bits = toWasmBits(bitsize) orelse {
return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
};
@@ -2915,6 +2932,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
}
fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
@@ -2932,15 +2950,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
const parent_ty = field_ptr.container_ty;
- const field_offset = switch (parent_ty.zigTypeTag()) {
+ const field_offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => switch (parent_ty.containerLayout()) {
- .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target),
- else => parent_ty.structFieldOffset(field_ptr.field_index, func.target),
+ .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod),
+ else => parent_ty.structFieldOffset(field_ptr.field_index, mod),
},
.Union => switch (parent_ty.containerLayout()) {
.Packed => 0,
else => blk: {
- const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target);
+ const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) break :blk 0;
if (layout.payload_align > layout.tag_align) break :blk 0;
@@ -2964,7 +2982,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
const index = elem_ptr.index;
- const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target);
+ const elem_offset = index * elem_ptr.elem_ty.abiSize(mod);
return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset));
},
.opt_payload_ptr => {
@@ -2976,9 +2994,9 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
}
fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue {
- const module = func.bin_file.base.options.module.?;
- const decl = module.declPtr(decl_index);
- module.markDeclAlive(decl);
+ const mod = func.bin_file.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
+ mod.markDeclAlive(decl);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = decl.ty,
@@ -2992,18 +3010,18 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) };
}
- const module = func.bin_file.base.options.module.?;
- const decl = module.declPtr(decl_index);
- if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
+ const mod = func.bin_file.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
+ if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) {
return WValue{ .imm32 = 0xaaaaaaaa };
}
- module.markDeclAlive(decl);
+ mod.markDeclAlive(decl);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
const atom = func.bin_file.getAtom(atom_index);
const target_sym_index = atom.sym_index;
- if (decl.ty.zigTypeTag() == .Fn) {
+ if (decl.ty.zigTypeTag(mod) == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
} else if (offset == 0) {
@@ -3041,31 +3059,31 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const decl_index = decl_ref_mut.data.decl_index;
return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0);
}
- const target = func.target;
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
.Void => return WValue{ .none = {} },
.Int => {
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement(
- val.toSignedInt(target),
+ val.toSignedInt(mod),
@intCast(u6, int_info.bits),
)) },
33...64 => return WValue{ .imm64 = toTwosComplement(
- val.toSignedInt(target),
+ val.toSignedInt(mod),
@intCast(u7, int_info.bits),
) },
else => unreachable,
},
.unsigned => switch (int_info.bits) {
- 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
- 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) },
+ 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
+ 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) },
else => unreachable,
},
}
},
- .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
+ .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
.Float => switch (ty.floatBits(func.target)) {
16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) },
32 => return WValue{ .float32 = val.toFloat(f32) },
@@ -3074,7 +3092,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
},
.Pointer => switch (val.tag()) {
.field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0),
- .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
+ .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
.zero, .null_value => return WValue{ .imm32 = 0 },
else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}),
},
@@ -3100,8 +3118,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}),
}
} else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_buffer);
+ const int_tag_ty = ty.intTagType();
return func.lowerConstant(val, int_tag_ty);
}
},
@@ -3115,7 +3132,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
.ErrorUnion => {
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
const is_pl = val.errorUnionIsPayload();
const err_val = if (!is_pl) val else Value.initTag(.zero);
@@ -3123,12 +3140,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
}
return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
},
- .Optional => if (ty.optionalReprIsPayload()) {
+ .Optional => if (ty.optionalReprIsPayload(mod)) {
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
if (val.castTag(.opt_payload)) |payload| {
return func.lowerConstant(payload.data, pl_ty);
- } else if (val.isNull()) {
+ } else if (val.isNull(mod)) {
return WValue{ .imm32 = 0 };
} else {
return func.lowerConstant(val, pl_ty);
@@ -3150,7 +3167,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
return func.lowerConstant(int_val, struct_obj.backing_int_ty);
},
.Vector => {
- assert(determineSimdStoreStrategy(ty, target) == .direct);
+ assert(determineSimdStoreStrategy(ty, mod) == .direct);
var buf: [16]u8 = undefined;
val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable;
return func.storeSimdImmd(buf);
@@ -3176,9 +3193,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
}
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
.Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
- .Int, .Enum => switch (ty.intInfo(func.target).bits) {
+ .Int, .Enum => switch (ty.intInfo(mod).bits) {
0...32 => return WValue{ .imm32 = 0xaaaaaaaa },
33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
@@ -3197,7 +3215,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
return func.emitUndefined(pl_ty);
}
return WValue{ .imm32 = 0xaaaaaaaa };
@@ -3210,7 +3228,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
assert(struct_obj.layout == .Packed);
return func.emitUndefined(struct_obj.backing_int_ty);
},
- else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}),
+ else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
}
}
@@ -3218,8 +3236,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
- const target = func.target;
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
.Enum => {
if (val.castTag(.enum_field_index)) |field_index| {
switch (ty.tag()) {
@@ -3239,35 +3257,35 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
else => unreachable,
}
} else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_buffer);
+ const int_tag_ty = ty.intTagType();
return func.valueAsI32(val, int_tag_ty);
}
},
- .Int => switch (ty.intInfo(func.target).signedness) {
- .signed => return @truncate(i32, val.toSignedInt(target)),
- .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))),
+ .Int => switch (ty.intInfo(mod).signedness) {
+ .signed => return @truncate(i32, val.toSignedInt(mod)),
+ .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))),
},
.ErrorSet => {
const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
return @bitCast(i32, kv.value);
},
- .Bool => return @intCast(i32, val.toSignedInt(target)),
- .Pointer => return @intCast(i32, val.toSignedInt(target)),
+ .Bool => return @intCast(i32, val.toSignedInt(mod)),
+ .Pointer => return @intCast(i32, val.toSignedInt(mod)),
else => unreachable, // Programmer called this function for an illegal type
}
}
fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const block_ty = func.air.getRefType(ty_pl.ty);
- const wasm_block_ty = genBlockType(block_ty, func.target);
+ const wasm_block_ty = genBlockType(block_ty, mod);
const extra = func.air.extraData(Air.Block, ty_pl.payload);
const body = func.air.extra[extra.end..][0..extra.data.body_len];
// if wasm_block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
- const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty;
+ const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty;
break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else WValue.none;
@@ -3379,16 +3397,17 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In
/// NOTE: This leaves the result on top of the stack, rather than a new local.
fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(!(lhs != .stack and rhs == .stack));
- if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
return func.cmpOptionals(lhs, rhs, ty, op);
}
- } else if (isByRef(ty, func.target)) {
+ } else if (isByRef(ty, mod)) {
return func.cmpBigInt(lhs, rhs, ty, op);
} else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) {
return func.cmpFloat16(lhs, rhs, op);
@@ -3401,13 +3420,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (ty.zigTypeTag() != .Int) break :blk .unsigned;
+ if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk ty.intInfo(func.target).signedness;
+ break :blk ty.intInfo(mod).signedness;
};
const opcode: wasm.Opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, func.target),
+ .valtype1 = typeToValtype(ty, mod),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -3464,11 +3483,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const br = func.air.instructions.items(.data)[inst].br;
const block = func.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
- if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) {
+ if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) {
const operand = try func.resolveInst(br.operand);
try func.lowerToStack(operand);
@@ -3490,16 +3510,17 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
- if (operand_ty.zigTypeTag() == .Bool) {
+ if (operand_ty.zigTypeTag(mod) == .Bool) {
try func.emitWValue(operand);
try func.addTag(.i32_eqz);
const not_tmp = try func.allocLocal(operand_ty);
try func.addLabel(.local_set, not_tmp.local.value);
break :result not_tmp;
} else {
- const operand_bits = operand_ty.intInfo(func.target).bits;
+ const operand_bits = operand_ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(operand_bits) orelse {
return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits});
};
@@ -3566,16 +3587,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
// if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand;
- if (wanted_ty.bitSize(func.target) > 64) return operand;
- assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt()));
+ if (wanted_ty.bitSize(mod) > 64) return operand;
+ assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod)));
const opcode = buildOpcode(.{
.op = .reinterpret,
- .valtype1 = typeToValtype(wanted_ty, func.target),
- .valtype2 = typeToValtype(given_ty, func.target),
+ .valtype1 = typeToValtype(wanted_ty, mod),
+ .valtype2 = typeToValtype(given_ty, mod),
});
try func.emitWValue(operand);
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
@@ -3609,19 +3631,20 @@ fn structFieldPtr(
struct_ty: Type,
index: u32,
) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
const result_ty = func.air.typeOfIndex(inst);
const offset = switch (struct_ty.containerLayout()) {
- .Packed => switch (struct_ty.zigTypeTag()) {
+ .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => offset: {
if (result_ty.ptrInfo().data.host_size != 0) {
break :offset @as(u32, 0);
}
- break :offset struct_ty.packedStructFieldByteOffset(index, func.target);
+ break :offset struct_ty.packedStructFieldByteOffset(index, mod);
},
.Union => 0,
else => unreachable,
},
- else => struct_ty.structFieldOffset(index, func.target),
+ else => struct_ty.structFieldOffset(index, mod),
};
// save a load and store when we can simply reuse the operand
if (offset == 0) {
@@ -3636,6 +3659,7 @@ fn structFieldPtr(
}
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -3643,15 +3667,15 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
const result = switch (struct_ty.containerLayout()) {
- .Packed => switch (struct_ty.zigTypeTag()) {
+ .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const offset = struct_obj.packedFieldBitOffset(func.target, field_index);
+ const offset = struct_obj.packedFieldBitOffset(mod, field_index);
const backing_ty = struct_obj.backing_int_ty;
- const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse {
+ const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
const const_wvalue = if (wasm_bits == 32)
@@ -3667,25 +3691,17 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else
try func.binOp(operand, const_wvalue, backing_ty, .shr);
- if (field_ty.zigTypeTag() == .Float) {
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&payload.base);
+ if (field_ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
- } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) {
+ } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) {
// In this case we do not have to perform any transformations,
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
- } else if (field_ty.isPtrAtRuntime()) {
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&payload.base);
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
break :result try truncated.toLocal(func, field_ty);
}
@@ -3693,8 +3709,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try truncated.toLocal(func, field_ty);
},
.Union => result: {
- if (isByRef(struct_ty, func.target)) {
- if (!isByRef(field_ty, func.target)) {
+ if (isByRef(struct_ty, mod)) {
+ if (!isByRef(field_ty, mod)) {
const val = try func.load(operand, field_ty, 0);
break :result try val.toLocal(func, field_ty);
} else {
@@ -3704,26 +3720,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, struct_ty.bitSize(func.target)),
- };
- const union_int_type = Type.initPayload(&payload.base);
- if (field_ty.zigTypeTag() == .Float) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod)));
+ if (field_ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(operand, int_type, union_int_type);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
- } else if (field_ty.isPtrAtRuntime()) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(operand, int_type, union_int_type);
break :result try truncated.toLocal(func, field_ty);
}
@@ -3733,11 +3737,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
},
else => result: {
- const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)});
+ const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse {
+ return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)});
};
- if (isByRef(field_ty, func.target)) {
+ if (isByRef(field_ty, mod)) {
switch (operand) {
.stack_offset => |stack_offset| {
break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
@@ -3754,6 +3757,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
@@ -3787,7 +3791,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
errdefer func.gpa.free(values);
for (items, 0..) |ref, i| {
- const item_val = func.air.value(ref).?;
+ const item_val = func.air.value(ref, mod).?;
const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
lowest_maybe = int_val;
@@ -3810,7 +3814,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the target is an integer size larger than u32, we have no way to use the value
// as an index, therefore we also use an if/else-chain for those cases.
// TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'.
- const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32;
+ const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32;
const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len];
const has_else_body = else_body.len != 0;
@@ -3855,7 +3859,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// for errors that are not present in any branch. This is fine as this default
// case will never be hit for those cases but we do save runtime cost and size
// by using a jump table for this instead of if-else chains.
- break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable;
+ break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable;
};
func.mir_extra.appendAssumeCapacity(idx);
} else if (has_else_body) {
@@ -3866,10 +3870,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (target_ty.zigTypeTag() != .Int) break :blk .unsigned;
+ if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk target_ty.intInfo(func.target).signedness;
+ break :blk target_ty.intInfo(mod).signedness;
};
try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body));
@@ -3882,7 +3886,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(case.values[0].value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, func.target),
+ .valtype1 = typeToValtype(target_ty, mod),
.op = .ne, // not equal, because we want to jump out of this block if it does not match the condition.
.signedness = signedness,
});
@@ -3896,7 +3900,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(value.value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, func.target),
+ .valtype1 = typeToValtype(target_ty, mod),
.op = .eq,
.signedness = signedness,
});
@@ -3933,6 +3937,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
const err_union_ty = func.air.typeOf(un_op);
@@ -3948,10 +3953,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
try func.emitWValue(operand);
- if (pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
- .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)),
- .alignment = Type.anyerror.abiAlignment(func.target),
+ .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)),
+ .alignment = Type.anyerror.abiAlignment(mod),
});
}
@@ -3967,6 +3972,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -3975,15 +3981,15 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
const payload_ty = err_ty.errorUnionPayload();
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (op_is_ptr) {
break :result func.reuseOperand(ty_op.operand, operand);
}
break :result WValue{ .none = {} };
}
- const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target));
- if (op_is_ptr or isByRef(payload_ty, func.target)) {
+ const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+ if (op_is_ptr or isByRef(payload_ty, mod)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -3994,6 +4000,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
}
fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4006,17 +4013,18 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
break :result WValue{ .imm32 = 0 };
}
- if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target)));
+ const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod)));
break :result try error_val.toLocal(func, Type.anyerror);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4024,18 +4032,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
const pl_ty = func.air.typeOf(ty_op.operand);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
+ const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
try func.store(payload_ptr, operand, pl_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
try func.emitWValue(err_union);
try func.addImm32(0);
- const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+ const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
break :result err_union;
};
@@ -4043,6 +4051,7 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
}
fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4050,17 +4059,17 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_ty = err_ty.errorUnionPayload();
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
// store error value
- try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target)));
+ try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod)));
// write 'undefined' to the payload
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
- const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target));
+ const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
+ const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(mod));
try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
break :result err_union;
@@ -4074,15 +4083,16 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.getRefType(ty_op.ty);
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.air.typeOf(ty_op.operand);
- if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) {
return func.fail("todo Wasm intcast for vectors", .{});
}
- if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) {
+ if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) {
return func.fail("todo Wasm intcast for bitsize > 128", .{});
}
- const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?;
- const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?;
+ const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
const result = if (op_bits == wanted_bits)
func.reuseOperand(ty_op.operand, operand)
else
@@ -4096,8 +4106,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Asserts type's bitsize <= 128
/// NOTE: May leave the result on the top of the stack.
fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bitsize = @intCast(u16, given.bitSize(func.target));
- const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const given_bitsize = @intCast(u16, given.bitSize(mod));
+ const wanted_bitsize = @intCast(u16, wanted.bitSize(mod));
assert(given_bitsize <= 128);
assert(wanted_bitsize <= 128);
@@ -4110,7 +4121,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
try func.addTag(.i32_wrap_i64);
} else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) {
try func.emitWValue(operand);
- try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u);
+ try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u);
} else if (wanted_bits == 128) {
// for 128bit integers we store the integer in the virtual stack, rather than a local
const stack_ptr = try func.allocStack(wanted);
@@ -4119,14 +4130,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
// for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
// meaning less store operations are required.
const lhs = if (op_bits == 32) blk: {
- break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64);
+ break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64);
} else operand;
// store msb first
try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset());
// For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value
- if (wanted.isSignedInt()) {
+ if (wanted.isSignedInt(mod)) {
try func.emitWValue(stack_ptr);
const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset());
@@ -4154,16 +4165,16 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// For a given type and operand, checks if it's considered `null`.
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
try func.emitWValue(operand);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
- if (!optional_ty.optionalReprIsPayload()) {
+ if (!optional_ty.optionalReprIsPayload(mod)) {
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)});
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)});
};
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
@@ -4183,18 +4194,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
}
fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const opt_ty = func.air.typeOf(ty_op.operand);
const payload_ty = func.air.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.finishAir(inst, .none, &.{ty_op.operand});
}
const result = result: {
const operand = try func.resolveInst(ty_op.operand);
- if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand);
+ if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand);
- if (isByRef(payload_ty, func.target)) {
+ if (isByRef(payload_ty, mod)) {
break :result try func.buildPointerOffset(operand, 0, .new);
}
@@ -4209,10 +4221,11 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const opt_ty = func.air.typeOf(ty_op.operand).childType();
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
@@ -4222,22 +4235,22 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const opt_ty = func.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
return func.finishAir(inst, operand, &.{ty_op.operand});
}
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)});
};
try func.emitWValue(operand);
@@ -4251,9 +4264,10 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const payload_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const non_null_bit = try func.allocStack(Type.initTag(.u1));
try func.emitWValue(non_null_bit);
try func.addImm32(1);
@@ -4263,12 +4277,11 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.air.typeOfIndex(inst);
- if (op_ty.optionalReprIsPayload()) {
+ if (op_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type
@@ -4314,7 +4327,8 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
// load pointer onto stack
_ = try func.load(slice, Type.usize, 0);
@@ -4328,7 +4342,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result_ptr.local.value);
- const result = if (!isByRef(elem_ty, func.target)) result: {
+ const result = if (!isByRef(elem_ty, mod)) result: {
const elem_val = try func.load(result_ptr, elem_ty, 0);
break :result try elem_val.toLocal(func, elem_ty);
} else result_ptr;
@@ -4341,7 +4355,8 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4389,13 +4404,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Truncates a given operand to a given type, discarding any overflown bits.
/// NOTE: Resulting value is left on the stack.
fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
- const given_bits = @intCast(u16, given_ty.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const given_bits = @intCast(u16, given_ty.bitSize(mod));
if (toWasmBits(given_bits) == null) {
return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
}
var result = try func.intcast(operand, given_ty, wanted_ty);
- const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target));
+ const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod));
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
result = try func.wrapOperand(result, wanted_ty);
@@ -4412,6 +4428,7 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4422,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const slice_local = try func.allocStack(slice_ty);
// store the array ptr in the slice
- if (array_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.store(slice_local, operand, Type.usize, 0);
}
@@ -4454,7 +4471,8 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = ptr_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
// load pointer onto the stack
if (ptr_ty.isSlice()) {
@@ -4472,7 +4490,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_result = val: {
var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
break :val result;
}
defer result.free(func); // only free if it's not returned like above
@@ -4489,7 +4507,8 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ptr_ty = func.air.typeOf(bin_op.lhs);
const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4513,6 +4532,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4524,13 +4544,13 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
else => ptr_ty.childType(),
};
- const valtype = typeToValtype(Type.usize, func.target);
+ const valtype = typeToValtype(Type.usize, mod);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
try func.lowerToStack(ptr);
try func.emitWValue(offset);
- try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target))));
+ try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod))));
try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
@@ -4572,7 +4592,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
/// this to wasm's memset instruction. When the feature is not present,
/// we implement it manually.
fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
- const abi_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = @intCast(u32, elem_ty.abiSize(mod));
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves.
@@ -4666,24 +4687,25 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const array = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = array_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
- if (isByRef(array_ty, func.target)) {
+ if (isByRef(array_ty, mod)) {
try func.lowerToStack(array);
try func.emitWValue(index);
try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
- std.debug.assert(array_ty.zigTypeTag() == .Vector);
+ std.debug.assert(array_ty.zigTypeTag(mod) == .Vector);
switch (index) {
inline .imm32, .imm64 => |lane| {
- const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) {
- 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
- 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
- 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane,
- 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane,
+ const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) {
+ 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
+ 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
+ 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane,
+ 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane,
else => unreachable,
};
@@ -4715,7 +4737,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
break :val result;
}
defer result.free(func); // only free if no longer needed and not returned like above
@@ -4733,17 +4755,18 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const dest_ty = func.air.typeOfIndex(inst);
const op_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
- if (op_ty.abiSize(func.target) > 8) {
+ if (op_ty.abiSize(mod) > 8) {
return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{});
}
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .trunc,
- .valtype1 = typeToValtype(dest_ty, func.target),
- .valtype2 = typeToValtype(op_ty, func.target),
- .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(dest_ty, mod),
+ .valtype2 = typeToValtype(op_ty, mod),
+ .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty);
@@ -4757,17 +4780,18 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const dest_ty = func.air.typeOfIndex(inst);
const op_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
- if (op_ty.abiSize(func.target) > 8) {
+ if (op_ty.abiSize(mod) > 8) {
return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{});
}
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .convert,
- .valtype1 = typeToValtype(dest_ty, func.target),
- .valtype2 = typeToValtype(op_ty, func.target),
- .signedness = if (op_ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(dest_ty, mod),
+ .valtype2 = typeToValtype(op_ty, mod),
+ .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -4777,18 +4801,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.air.typeOfIndex(inst);
const elem_ty = ty.childType();
- if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, mod) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
// the scalar value onto the stack.
.stack_offset, .memory, .memory_offset => {
- const opcode = switch (elem_ty.bitSize(func.target)) {
+ const opcode = switch (elem_ty.bitSize(mod)) {
8 => std.wasm.simdOpcode(.v128_load8_splat),
16 => std.wasm.simdOpcode(.v128_load16_splat),
32 => std.wasm.simdOpcode(.v128_load32_splat),
@@ -4803,18 +4828,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
- elem_ty.abiAlignment(func.target),
+ elem_ty.abiAlignment(mod),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
return func.finishAir(inst, result, &.{ty_op.operand});
},
.local => {
- const opcode = switch (elem_ty.bitSize(func.target)) {
+ const opcode = switch (elem_ty.bitSize(mod)) {
8 => std.wasm.simdOpcode(.i8x16_splat),
16 => std.wasm.simdOpcode(.i16x8_splat),
- 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
- 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
+ 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
+ 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
else => break :blk, // Cannot make use of simd-instructions
};
const result = try func.allocLocal(ty);
@@ -4828,14 +4853,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
}
- const elem_size = elem_ty.bitSize(func.target);
+ const elem_size = elem_ty.bitSize(mod);
const vector_len = @intCast(usize, ty.vectorLen());
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
const result = try func.allocStack(ty);
- const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod));
var index: usize = 0;
var offset: u32 = 0;
while (index < vector_len) : (index += 1) {
@@ -4855,6 +4880,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const inst_ty = func.air.typeOfIndex(inst);
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -4865,16 +4891,15 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mask_len = extra.mask_len;
const child_ty = inst_ty.childType();
- const elem_size = child_ty.abiSize(func.target);
+ const elem_size = child_ty.abiSize(mod);
- const module = func.bin_file.base.options.module.?;
// TODO: One of them could be by ref; handle in loop
- if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) {
+ if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) {
const result = try func.allocStack(inst_ty);
for (0..mask_len) |index| {
var buf: Value.ElemValueBuffer = undefined;
- const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target);
+ const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod);
try func.emitWValue(result);
@@ -4895,7 +4920,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var lanes = std.mem.asBytes(operands[1..]);
for (0..@intCast(usize, mask_len)) |index| {
var buf: Value.ElemValueBuffer = undefined;
- const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target);
+ const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod);
const base_index = if (mask_elem >= 0)
@intCast(u8, @intCast(i64, elem_size) * mask_elem)
else
@@ -4930,13 +4955,14 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ty = func.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
+ const mod = func.bin_file.base.options.module.?;
const result: WValue = result_value: {
- switch (result_ty.zigTypeTag()) {
+ switch (result_ty.zigTypeTag(mod)) {
.Array => {
const result = try func.allocStack(result_ty);
const elem_ty = result_ty.childType();
- const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
const sentinel = if (result_ty.sentinel()) |sent| blk: {
break :blk try func.lowerConstant(sent, elem_ty);
} else null;
@@ -4944,7 +4970,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
@@ -4974,7 +5000,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.Struct => switch (result_ty.containerLayout()) {
.Packed => {
- if (isByRef(result_ty, func.target)) {
+ if (isByRef(result_ty, mod)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
const struct_obj = result_ty.castTag(.@"struct").?.data;
@@ -4983,7 +5009,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// ensure the result is zero'd
const result = try func.allocLocal(backing_type);
- if (struct_obj.backing_int_ty.bitSize(func.target) <= 32)
+ if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
try func.addImm32(0)
else
try func.addImm64(0);
@@ -4992,20 +5018,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
const field = fields[elem_index];
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32)
+ const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
WValue{ .imm32 = current_bit }
else
WValue{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
- const value_bit_size = @intCast(u16, field.ty.bitSize(func.target));
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = value_bit_size,
- };
- const int_ty = Type.initPayload(&int_ty_payload.base);
+ const value_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const int_ty = try mod.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
// using only stack values. Saving the cost of loads and stores.
@@ -5027,10 +5049,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = try func.allocStack(result_ty);
const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
for (elements, 0..) |elem, elem_index| {
- if (result_ty.structFieldValueComptime(elem_index) != null) continue;
+ if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index);
- const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
const value = try func.resolveInst(elem);
try func.store(offset, value, elem_ty, 0);
@@ -5058,12 +5080,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result = result: {
const union_ty = func.air.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(func.target);
+ const layout = union_ty.unionGetLayout(mod);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field = union_obj.fields.values()[extra.field_index];
const field_name = union_obj.fields.keys()[extra.field_index];
@@ -5082,15 +5105,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (layout.tag_size == 0) {
break :result WValue{ .none = {} };
}
- assert(!isByRef(union_ty, func.target));
+ assert(!isByRef(union_ty, mod));
break :result tag_int;
}
- if (isByRef(union_ty, func.target)) {
+ if (isByRef(union_ty, mod)) {
const result_ptr = try func.allocStack(union_ty);
const payload = try func.resolveInst(extra.init);
if (layout.tag_align >= layout.payload_align) {
- if (isByRef(field.ty, func.target)) {
+ if (isByRef(field.ty, mod)) {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field.ty, 0);
} else {
@@ -5114,26 +5137,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result result_ptr;
} else {
const operand = try func.resolveInst(extra.init);
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, union_ty.bitSize(func.target)),
- };
- const union_int_type = Type.initPayload(&payload.base);
- if (field.ty.zigTypeTag() == .Float) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field.ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod)));
+ if (field.ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
const bitcasted = try func.bitcast(field.ty, int_type, operand);
const casted = try func.trunc(bitcasted, int_type, union_int_type);
break :result try casted.toLocal(func, field.ty);
- } else if (field.ty.isPtrAtRuntime()) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field.ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ } else if (field.ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
const casted = try func.intcast(operand, int_type, union_int_type);
break :result try casted.toLocal(func, field.ty);
}
@@ -5171,7 +5182,8 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
}
fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- assert(operand_ty.hasRuntimeBitsIgnoreComptime());
+ const mod = func.bin_file.base.options.module.?;
+ assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod));
assert(op == .eq or op == .neq);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
@@ -5189,7 +5201,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
_ = try func.load(lhs, payload_ty, 0);
_ = try func.load(rhs, payload_ty, 0);
- const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) });
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try func.addLabel(.br_if, 0);
@@ -5207,10 +5219,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
/// NOTE: Leaves the result of the comparison on top of the stack.
/// TODO: Lower this to compiler_rt call when bitsize > 128
fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- assert(operand_ty.abiSize(func.target) >= 16);
+ const mod = func.bin_file.base.options.module.?;
+ assert(operand_ty.abiSize(mod) >= 16);
assert(!(lhs != .stack and rhs == .stack));
- if (operand_ty.bitSize(func.target) > 128) {
- return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)});
+ if (operand_ty.bitSize(mod) > 128) {
+ return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)});
}
var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
@@ -5233,7 +5246,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
},
else => {
- const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64;
+ const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64;
// leave those value on top of the stack for '.select'
const lhs_low_bit = try func.load(lhs, Type.u64, 8);
const rhs_low_bit = try func.load(rhs, Type.u64, 8);
@@ -5248,10 +5261,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const un_ty = func.air.typeOf(bin_op.lhs).childType();
const tag_ty = func.air.typeOf(bin_op.rhs);
- const layout = un_ty.unionGetLayout(func.target);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const union_ptr = try func.resolveInst(bin_op.lhs);
@@ -5271,11 +5285,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const un_ty = func.air.typeOf(ty_op.operand);
const tag_ty = func.air.typeOfIndex(inst);
- const layout = un_ty.unionGetLayout(func.target);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
@@ -5375,6 +5390,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
}
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const err_set_ty = func.air.typeOf(ty_op.operand).childType();
@@ -5386,26 +5402,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
operand,
.{ .imm32 = 0 },
Type.anyerror,
- @intCast(u32, errUnionErrorOffset(payload_ty, func.target)),
+ @intCast(u32, errUnionErrorOffset(payload_ty, mod)),
);
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new);
+ break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try func.resolveInst(extra.field_ptr);
const parent_ty = func.air.getRefType(ty_pl.ty).childType();
- const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new);
@@ -5428,6 +5445,7 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue
}
fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const dst = try func.resolveInst(bin_op.lhs);
const dst_ty = func.air.typeOf(bin_op.lhs);
@@ -5437,16 +5455,16 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const len = switch (dst_ty.ptrSize()) {
.Slice => blk: {
const slice_len = try func.sliceLen(dst);
- if (ptr_elem_ty.abiSize(func.target) != 1) {
+ if (ptr_elem_ty.abiSize(mod) != 1) {
try func.emitWValue(slice_len);
- try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) });
+ try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) });
try func.addTag(.i32_mul);
try func.addLabel(.local_set, slice_len.local.value);
}
break :blk slice_len;
},
.One => @as(WValue, .{
- .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)),
+ .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)),
}),
.C, .Many => unreachable,
};
@@ -5472,12 +5490,13 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
+ const mod = func.bin_file.base.options.module.?;
- if (op_ty.zigTypeTag() == .Vector) {
+ if (op_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement @popCount for vectors", .{});
}
- const int_info = op_ty.intInfo(func.target);
+ const int_info = op_ty.intInfo(mod);
const bits = int_info.bits;
const wasm_bits = toWasmBits(bits) orelse {
return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
@@ -5527,7 +5546,8 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// to make a copy of the ptr+value but can point towards them directly.
const error_table_symbol = try func.bin_file.getErrorTableSymbol();
const name_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const abi_size = name_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = name_ty.abiSize(mod);
const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
try func.emitWValue(error_name_value);
@@ -5566,12 +5586,13 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const lhs_op = try func.resolveInst(extra.lhs);
const rhs_op = try func.resolveInst(extra.rhs);
const lhs_ty = func.air.typeOf(extra.lhs);
+ const mod = func.bin_file.base.options.module.?;
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
@@ -5630,15 +5651,16 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
assert(op == .add or op == .sub);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits != 128) {
return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits});
@@ -5701,6 +5723,7 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type,
}
fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -5709,11 +5732,11 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs_ty = func.air.typeOf(extra.lhs);
const rhs_ty = func.air.typeOf(extra.rhs);
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
@@ -5721,7 +5744,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// Ensure rhs is coerced to lhs as they must have the same WebAssembly types
// before we can perform any binary operation.
- const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?;
+ const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?;
const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: {
const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try rhs_casted.toLocal(func, lhs_ty);
@@ -5750,7 +5773,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -5763,8 +5786,9 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
const lhs_ty = func.air.typeOf(extra.lhs);
+ const mod = func.bin_file.base.options.module.?;
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
@@ -5773,7 +5797,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1));
defer overflow_bit.free(func);
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
};
@@ -5924,7 +5948,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
try func.store(result_ptr, bin_op_local, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -5934,11 +5958,12 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
}
- if (ty.abiSize(func.target) > 16) {
+ if (ty.abiSize(mod) > 16) {
return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
}
@@ -5954,7 +5979,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE
try func.addTag(.select);
// store result in local
- const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty;
+ const result_ty = if (isByRef(ty, mod)) Type.u32 else ty;
const result = try func.allocLocal(result_ty);
try func.addLabel(.local_set, result.local.value);
func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
@@ -5965,7 +5990,8 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
const ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@mulAdd` for vectors", .{});
}
@@ -5998,12 +6024,13 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@clz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -6051,12 +6078,13 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@ctz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -6174,12 +6202,13 @@ fn lowerTry(
err_union_ty: Type,
operand_is_ptr: bool,
) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
if (operand_is_ptr) {
return func.fail("TODO: lowerTry for pointers", .{});
}
const pl_ty = err_union_ty.errorUnionPayload();
- const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime();
+ const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod);
if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
// Block we can jump out of when error is not set
@@ -6188,10 +6217,10 @@ fn lowerTry(
// check if the error tag is set for the error union.
try func.emitWValue(err_union);
if (pl_has_bits) {
- const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+ const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
- .alignment = Type.anyerror.abiAlignment(func.target),
+ .alignment = Type.anyerror.abiAlignment(mod),
});
}
try func.addTag(.i32_eqz);
@@ -6213,8 +6242,8 @@ fn lowerTry(
return WValue{ .none = {} };
}
- const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target));
- if (isByRef(pl_ty, func.target)) {
+ const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod));
+ if (isByRef(pl_ty, mod)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
const payload = try func.load(err_union, pl_ty, pl_offset);
@@ -6226,11 +6255,12 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: @byteSwap for vectors", .{});
}
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
// bytes are no-op
if (int_info.bits == 8) {
@@ -6292,13 +6322,14 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const result = if (ty.isSignedInt())
+ const result = if (ty.isSignedInt(mod))
try func.divSigned(lhs, rhs, ty)
else
try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
@@ -6306,13 +6337,14 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const div_result = if (ty.isSignedInt())
+ const div_result = if (ty.isSignedInt(mod))
try func.divSigned(lhs, rhs, ty)
else
try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
@@ -6328,15 +6360,16 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+ const mod = func.bin_file.base.options.module.?;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- if (ty.isUnsignedInt()) {
+ if (ty.isUnsignedInt(mod)) {
const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
- } else if (ty.isSignedInt()) {
- const int_bits = ty.intInfo(func.target).bits;
+ } else if (ty.isSignedInt(mod)) {
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits});
};
@@ -6414,7 +6447,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue {
- const int_bits = ty.intInfo(func.target).bits;
+ const mod = func.bin_file.base.options.module.?;
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits});
};
@@ -6441,7 +6475,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal
/// Retrieves the absolute value of a signed integer
/// NOTE: Leaves the result value on the stack.
fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- const int_bits = ty.intInfo(func.target).bits;
+ const mod = func.bin_file.base.options.module.?;
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits});
};
@@ -6476,11 +6511,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .add or op == .sub);
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+ const mod = func.bin_file.base.options.module.?;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
@@ -6523,7 +6559,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue {
- const int_info = ty.intInfo(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits).?;
const is_wasm_bits = wasm_bits == int_info.bits;
@@ -6588,8 +6625,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type,
fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+ const mod = func.bin_file.base.options.module.?;
const ty = func.air.typeOfIndex(inst);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
@@ -6707,12 +6745,13 @@ fn callIntrinsic(
};
// Always pass over C-ABI
- var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod);
defer func_type.deinit(func.gpa);
const func_type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
- const want_sret_param = firstParamSRet(.C, return_type, func.target);
+ const want_sret_param = firstParamSRet(.C, return_type, mod);
// if we want return as first param, we allocate a pointer to stack,
// and emit it as our first argument
const sret = if (want_sret_param) blk: {
@@ -6724,14 +6763,14 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
- assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
+ assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod));
try func.lowerArg(.C, param_types[arg_i], arg);
}
// Actually call our intrinsic
try func.addLabel(.call, symbol_index);
- if (!return_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
return WValue.none;
} else if (return_type.isNoReturn()) {
try func.addTag(.@"unreachable");
@@ -6759,15 +6798,15 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
+ const mod = func.bin_file.base.options.module.?;
const enum_decl_index = enum_ty.getOwnerDecl();
- const module = func.bin_file.base.options.module.?;
var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module);
- defer module.gpa.free(fqn);
+ const fqn = try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod);
+ defer mod.gpa.free(fqn);
const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn});
// check if we already generated code for this.
@@ -6775,10 +6814,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
return loc.index;
}
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
+ const int_tag_ty = enum_ty.intTagType();
- if (int_tag_ty.bitSize(func.target) > 64) {
+ if (int_tag_ty.bitSize(mod) > 64) {
return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
}
@@ -6806,9 +6844,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
.data = @intCast(u64, tag_name.len),
};
const name_ty = Type.initPayload(&name_ty_payload.base);
- const string_bytes = &module.string_literal_bytes;
- try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len);
- const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{
+ const string_bytes = &mod.string_literal_bytes;
+ try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len);
+ const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{
.bytes = string_bytes,
}, Module.StringLiteralContext{
.bytes = string_bytes,
@@ -6929,7 +6967,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.end));
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target);
+ const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod);
return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
}
@@ -6944,11 +6982,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len);
defer values.deinit();
- const module = func.bin_file.base.options.module.?;
+ const mod = func.bin_file.base.options.module.?;
var lowest: ?u32 = null;
var highest: ?u32 = null;
for (names) |name| {
- const err_int = module.global_error_set.get(name).?;
+ const err_int = mod.global_error_set.get(name).?;
if (lowest) |*l| {
if (err_int < l.*) {
l.* = err_int;
@@ -7019,6 +7057,7 @@ inline fn useAtomicFeature(func: *const CodeGen) bool {
}
fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
@@ -7037,7 +7076,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr_operand);
try func.lowerToStack(expected_val);
try func.lowerToStack(new_val);
- try func.addAtomicMemArg(switch (ty.abiSize(func.target)) {
+ try func.addAtomicMemArg(switch (ty.abiSize(mod)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7045,14 +7084,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
try func.addLabel(.local_set, cmp_result.local.value);
break :val val_local;
} else val: {
- if (ty.abiSize(func.target) > 8) {
+ if (ty.abiSize(mod) > 8) {
return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
}
const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
@@ -7068,7 +7107,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :val ptr_val;
};
- const result_ptr = if (isByRef(result_ty, func.target)) val: {
+ const result_ptr = if (isByRef(result_ty, mod)) val: {
try func.emitWValue(cmp_result);
try func.addImm32(-1);
try func.addTag(.i32_xor);
@@ -7076,7 +7115,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_and);
const and_result = try WValue.toLocal(.stack, func, Type.bool);
const result_ptr = try func.allocStack(result_ty);
- try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target)));
+ try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod)));
try func.store(result_ptr, ptr_val, ty, 0);
break :val result_ptr;
} else val: {
@@ -7091,12 +7130,13 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const atomic_load = func.air.instructions.items(.data)[inst].atomic_load;
const ptr = try func.resolveInst(atomic_load.ptr);
const ty = func.air.typeOfIndex(inst);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => .i32_atomic_load8_u,
2 => .i32_atomic_load16_u,
4 => .i32_atomic_load,
@@ -7106,7 +7146,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -7117,6 +7157,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
@@ -7140,7 +7181,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.emitWValue(value);
if (op == .Nand) {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
const and_res = try func.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
@@ -7157,7 +7198,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.select);
}
try func.addAtomicMemArg(
- switch (ty.abiSize(func.target)) {
+ switch (ty.abiSize(mod)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7166,7 +7207,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
},
);
const select_res = try func.allocLocal(ty);
@@ -7185,7 +7226,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
try func.emitWValue(ptr);
try func.emitWValue(operand);
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => switch (op) {
.Xchg => .i32_atomic_rmw8_xchg_u,
.Add => .i32_atomic_rmw8_add_u,
@@ -7226,7 +7267,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -7255,7 +7296,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Xor => .xor,
else => unreachable,
});
- if (ty.isInt() and (op == .Add or op == .Sub)) {
+ if (ty.isInt(mod) and (op == .Add or op == .Sub)) {
_ = try func.wrapOperand(.stack, ty);
}
try func.store(.stack, .stack, ty, ptr.offset());
@@ -7271,7 +7312,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(.stack, .stack, ty, ptr.offset());
},
.Nand => {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
try func.emitWValue(ptr);
const and_res = try func.binOp(result, operand, ty, .@"and");
@@ -7302,6 +7343,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ptr = try func.resolveInst(bin_op.lhs);
@@ -7310,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = ptr_ty.childType();
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => .i32_atomic_store8,
2 => .i32_atomic_store16,
4 => .i32_atomic_store,
@@ -7321,7 +7363,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
} else {
try func.store(ptr, operand, ty, 0);